Release 4.11 net/netfilter/ipvs/ip_vs_proto.c
/*
* ip_vs_proto.c: transport protocol load balancing support for IPVS
*
* Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
* Julian Anastasov <ja@ssi.bg>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Changes:
*
*/
#define KMSG_COMPONENT "IPVS"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/skbuff.h>
#include <linux/gfp.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <net/protocol.h>
#include <net/tcp.h>
#include <net/udp.h>
#include <linux/stat.h>
#include <linux/proc_fs.h>
#include <net/ip_vs.h>
/*
* IPVS protocols can only be registered/unregistered when the ipvs
* module is loaded/unloaded, so no lock is needed in accessing the
* ipvs protocol table.
*/
#define IP_VS_PROTO_TAB_SIZE 32
/* must be power of 2 */
#define IP_VS_PROTO_HASH(proto) ((proto) & (IP_VS_PROTO_TAB_SIZE-1))
static struct ip_vs_protocol *ip_vs_proto_table[IP_VS_PROTO_TAB_SIZE];
/*
* register an ipvs protocol
*/
static int __used __init register_ip_vs_protocol(struct ip_vs_protocol *pp)
{
unsigned int hash = IP_VS_PROTO_HASH(pp->protocol);
pp->next = ip_vs_proto_table[hash];
ip_vs_proto_table[hash] = pp;
if (pp->init != NULL)
pp->init(pp);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Wensong Zhang | 54 | 93.10% | 1 | 20.00% |
Sven Wegener | 1 | 1.72% | 1 | 20.00% |
Eric Dumazet | 1 | 1.72% | 1 | 20.00% |
Pavel Emelyanov | 1 | 1.72% | 1 | 20.00% |
Adrian Bunk | 1 | 1.72% | 1 | 20.00% |
Total | 58 | 100.00% | 5 | 100.00% |
/*
* register an ipvs protocols netns related data
*/
static int
register_ip_vs_proto_netns(struct netns_ipvs *ipvs, struct ip_vs_protocol *pp)
{
unsigned int hash = IP_VS_PROTO_HASH(pp->protocol);
struct ip_vs_proto_data *pd =
kzalloc(sizeof(struct ip_vs_proto_data), GFP_KERNEL);
if (!pd)
return -ENOMEM;
pd->pp = pp; /* For speed issues */
pd->next = ipvs->proto_data_table[hash];
ipvs->proto_data_table[hash] = pd;
atomic_set(&pd->appcnt, 0); /* Init app counter */
if (pp->init_netns != NULL) {
int ret = pp->init_netns(ipvs, pd);
if (ret) {
/* unlink an free proto data */
ipvs->proto_data_table[hash] = pd->next;
kfree(pd);
return ret;
}
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hans Schillstrom | 136 | 96.45% | 2 | 40.00% |
Eric W. Biedermann | 3 | 2.13% | 1 | 20.00% |
Eric Dumazet | 1 | 0.71% | 1 | 20.00% |
Sasha Levin | 1 | 0.71% | 1 | 20.00% |
Total | 141 | 100.00% | 5 | 100.00% |
/*
* unregister an ipvs protocol
*/
static int unregister_ip_vs_protocol(struct ip_vs_protocol *pp)
{
struct ip_vs_protocol **pp_p;
unsigned int hash = IP_VS_PROTO_HASH(pp->protocol);
pp_p = &ip_vs_proto_table[hash];
for (; *pp_p; pp_p = &(*pp_p)->next) {
if (*pp_p == pp) {
*pp_p = pp->next;
if (pp->exit != NULL)
pp->exit(pp);
return 0;
}
}
return -ESRCH;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Wensong Zhang | 90 | 97.83% | 1 | 33.33% |
Eric Dumazet | 1 | 1.09% | 1 | 33.33% |
Adrian Bunk | 1 | 1.09% | 1 | 33.33% |
Total | 92 | 100.00% | 3 | 100.00% |
/*
* unregister an ipvs protocols netns data
*/
static int
unregister_ip_vs_proto_netns(struct netns_ipvs *ipvs, struct ip_vs_proto_data *pd)
{
struct ip_vs_proto_data **pd_p;
unsigned int hash = IP_VS_PROTO_HASH(pd->pp->protocol);
pd_p = &ipvs->proto_data_table[hash];
for (; *pd_p; pd_p = &(*pd_p)->next) {
if (*pd_p == pd) {
*pd_p = pd->next;
if (pd->pp->exit_netns != NULL)
pd->pp->exit_netns(ipvs, pd);
kfree(pd);
return 0;
}
}
return -ESRCH;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hans Schillstrom | 108 | 96.43% | 1 | 33.33% |
Eric W. Biedermann | 3 | 2.68% | 1 | 33.33% |
Eric Dumazet | 1 | 0.89% | 1 | 33.33% |
Total | 112 | 100.00% | 3 | 100.00% |
/*
* get ip_vs_protocol object by its proto.
*/
struct ip_vs_protocol * ip_vs_proto_get(unsigned short proto)
{
struct ip_vs_protocol *pp;
unsigned int hash = IP_VS_PROTO_HASH(proto);
for (pp = ip_vs_proto_table[hash]; pp; pp = pp->next) {
if (pp->protocol == proto)
return pp;
}
return NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Wensong Zhang | 57 | 98.28% | 1 | 50.00% |
Eric Dumazet | 1 | 1.72% | 1 | 50.00% |
Total | 58 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL(ip_vs_proto_get);
/*
* get ip_vs_protocol object data by netns and proto
*/
struct ip_vs_proto_data *
ip_vs_proto_data_get(struct netns_ipvs *ipvs, unsigned short proto)
{
struct ip_vs_proto_data *pd;
unsigned int hash = IP_VS_PROTO_HASH(proto);
for (pd = ipvs->proto_data_table[hash]; pd; pd = pd->next) {
if (pd->pp->protocol == proto)
return pd;
}
return NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hans Schillstrom | 65 | 97.01% | 2 | 50.00% |
Eric Dumazet | 1 | 1.49% | 1 | 25.00% |
Eric W. Biedermann | 1 | 1.49% | 1 | 25.00% |
Total | 67 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL(ip_vs_proto_data_get);
/*
* Propagate event for state change to all protocols
*/
void ip_vs_protocol_timeout_change(struct netns_ipvs *ipvs, int flags)
{
struct ip_vs_proto_data *pd;
int i;
for (i = 0; i < IP_VS_PROTO_TAB_SIZE; i++) {
for (pd = ipvs->proto_data_table[i]; pd; pd = pd->next) {
if (pd->pp->timeout_change)
pd->pp->timeout_change(pd, flags);
}
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Wensong Zhang | 57 | 75.00% | 1 | 50.00% |
Hans Schillstrom | 19 | 25.00% | 1 | 50.00% |
Total | 76 | 100.00% | 2 | 100.00% |
int *
ip_vs_create_timeout_table(int *table, int size)
{
return kmemdup(table, size, GFP_KERNEL);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Wensong Zhang | 20 | 86.96% | 1 | 33.33% |
Arnaldo Carvalho de Melo | 2 | 8.70% | 1 | 33.33% |
Julian Anastasov | 1 | 4.35% | 1 | 33.33% |
Total | 23 | 100.00% | 3 | 100.00% |
/*
* Set timeout value for state specified by name
*/
int
ip_vs_set_state_timeout(int *table, int num, const char *const *names,
const char *name, int to)
{
int i;
if (!table || !name || !to)
return -EINVAL;
for (i = 0; i < num; i++) {
if (strcmp(names[i], name))
continue;
table[i] = to * HZ;
return 0;
}
return -ENOENT;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Wensong Zhang | 86 | 96.63% | 1 | 50.00% |
Jan Engelhardt | 3 | 3.37% | 1 | 50.00% |
Total | 89 | 100.00% | 2 | 100.00% |
const char * ip_vs_state_name(__u16 proto, int state)
{
struct ip_vs_protocol *pp = ip_vs_proto_get(proto);
if (pp == NULL || pp->state_name == NULL)
return (IPPROTO_IP == proto) ? "NONE" : "ERR!";
return pp->state_name(state);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Wensong Zhang | 46 | 85.19% | 1 | 50.00% |
Julian Anastasov | 8 | 14.81% | 1 | 50.00% |
Total | 54 | 100.00% | 2 | 100.00% |
static void
ip_vs_tcpudp_debug_packet_v4(struct ip_vs_protocol *pp,
const struct sk_buff *skb,
int offset,
const char *msg)
{
char buf[128];
struct iphdr _iph, *ih;
ih = skb_header_pointer(skb, offset, sizeof(_iph), &_iph);
if (ih == NULL)
sprintf(buf, "TRUNCATED");
else if (ih->frag_off & htons(IP_OFFSET))
sprintf(buf, "%pI4->%pI4 frag", &ih->saddr, &ih->daddr);
else {
__be16 _ports[2], *pptr;
pptr = skb_header_pointer(skb, offset + ih->ihl*4,
sizeof(_ports), _ports);
if (pptr == NULL)
sprintf(buf, "TRUNCATED %pI4->%pI4",
&ih->saddr, &ih->daddr);
else
sprintf(buf, "%pI4:%u->%pI4:%u",
&ih->saddr, ntohs(pptr[0]),
&ih->daddr, ntohs(pptr[1]));
}
pr_debug("%s: %s %s\n", msg, pp->name, buf);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Wensong Zhang | 73 | 36.32% | 1 | 10.00% |
David S. Miller | 58 | 28.86% | 1 | 10.00% |
Julian Anastasov | 50 | 24.88% | 1 | 10.00% |
Patrick McHardy | 9 | 4.48% | 1 | 10.00% |
Harvey Harrison | 6 | 2.99% | 1 | 10.00% |
Hideaki Yoshifuji / 吉藤英明 | 1 | 0.50% | 1 | 10.00% |
Al Viro | 1 | 0.50% | 1 | 10.00% |
Sven Wegener | 1 | 0.50% | 1 | 10.00% |
Hannes Eder | 1 | 0.50% | 1 | 10.00% |
Julius Volz | 1 | 0.50% | 1 | 10.00% |
Total | 201 | 100.00% | 10 | 100.00% |
#ifdef CONFIG_IP_VS_IPV6
static void
ip_vs_tcpudp_debug_packet_v6(struct ip_vs_protocol *pp,
const struct sk_buff *skb,
int offset,
const char *msg)
{
char buf[192];
struct ipv6hdr _iph, *ih;
ih = skb_header_pointer(skb, offset, sizeof(_iph), &_iph);
if (ih == NULL)
sprintf(buf, "TRUNCATED");
else if (ih->nexthdr == IPPROTO_FRAGMENT)
sprintf(buf, "%pI6c->%pI6c frag", &ih->saddr, &ih->daddr);
else {
__be16 _ports[2], *pptr;
pptr = skb_header_pointer(skb, offset + sizeof(struct ipv6hdr),
sizeof(_ports), _ports);
if (pptr == NULL)
sprintf(buf, "TRUNCATED %pI6c->%pI6c",
&ih->saddr, &ih->daddr);
else
sprintf(buf, "%pI6c:%u->%pI6c:%u",
&ih->saddr, ntohs(pptr[0]),
&ih->daddr, ntohs(pptr[1]));
}
pr_debug("%s: %s %s\n", msg, pp->name, buf);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Julius Volz | 181 | 91.41% | 1 | 16.67% |
Harvey Harrison | 6 | 3.03% | 1 | 16.67% |
Patrick McHardy | 6 | 3.03% | 1 | 16.67% |
Jesper Dangaard Brouer | 3 | 1.52% | 1 | 16.67% |
Hannes Eder | 1 | 0.51% | 1 | 16.67% |
Sven Wegener | 1 | 0.51% | 1 | 16.67% |
Total | 198 | 100.00% | 6 | 100.00% |
#endif
void
ip_vs_tcpudp_debug_packet(int af, struct ip_vs_protocol *pp,
const struct sk_buff *skb,
int offset,
const char *msg)
{
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6)
ip_vs_tcpudp_debug_packet_v6(pp, skb, offset, msg);
else
#endif
ip_vs_tcpudp_debug_packet_v4(pp, skb, offset, msg);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Julius Volz | 56 | 91.80% | 1 | 50.00% |
Julian Anastasov | 5 | 8.20% | 1 | 50.00% |
Total | 61 | 100.00% | 2 | 100.00% |
/*
* per network name-space init
*/
int __net_init ip_vs_protocol_net_init(struct netns_ipvs *ipvs)
{
int i, ret;
static struct ip_vs_protocol *protos[] = {
#ifdef CONFIG_IP_VS_PROTO_TCP
&ip_vs_protocol_tcp,
#endif
#ifdef CONFIG_IP_VS_PROTO_UDP
&ip_vs_protocol_udp,
#endif
#ifdef CONFIG_IP_VS_PROTO_SCTP
&ip_vs_protocol_sctp,
#endif
#ifdef CONFIG_IP_VS_PROTO_AH
&ip_vs_protocol_ah,
#endif
#ifdef CONFIG_IP_VS_PROTO_ESP
&ip_vs_protocol_esp,
#endif
};
for (i = 0; i < ARRAY_SIZE(protos); i++) {
ret = register_ip_vs_proto_netns(ipvs, protos[i]);
if (ret < 0)
goto cleanup;
}
return 0;
cleanup:
ip_vs_protocol_net_cleanup(ipvs);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Sasha Levin | 66 | 55.93% | 1 | 11.11% |
Hans Schillstrom | 48 | 40.68% | 6 | 66.67% |
Eric W. Biedermann | 4 | 3.39% | 2 | 22.22% |
Total | 118 | 100.00% | 9 | 100.00% |
void __net_exit ip_vs_protocol_net_cleanup(struct netns_ipvs *ipvs)
{
struct ip_vs_proto_data *pd;
int i;
/* unregister all the ipvs proto data for this netns */
for (i = 0; i < IP_VS_PROTO_TAB_SIZE; i++) {
while ((pd = ipvs->proto_data_table[i]) != NULL)
unregister_ip_vs_proto_netns(ipvs, pd);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hans Schillstrom | 54 | 94.74% | 3 | 60.00% |
Eric W. Biedermann | 3 | 5.26% | 2 | 40.00% |
Total | 57 | 100.00% | 5 | 100.00% |
int __init ip_vs_protocol_init(void)
{
char protocols[64];
#define REGISTER_PROTOCOL(p) \
do { \
register_ip_vs_protocol(p); \
strcat(protocols, ", "); \
strcat(protocols, (p)->name); \
} while (0)
protocols[0] = '\0';
protocols[2] = '\0';
#ifdef CONFIG_IP_VS_PROTO_TCP
REGISTER_PROTOCOL(&ip_vs_protocol_tcp);
#endif
#ifdef CONFIG_IP_VS_PROTO_UDP
REGISTER_PROTOCOL(&ip_vs_protocol_udp);
#endif
#ifdef CONFIG_IP_VS_PROTO_SCTP
REGISTER_PROTOCOL(&ip_vs_protocol_sctp);
#endif
#ifdef CONFIG_IP_VS_PROTO_AH
REGISTER_PROTOCOL(&ip_vs_protocol_ah);
#endif
#ifdef CONFIG_IP_VS_PROTO_ESP
REGISTER_PROTOCOL(&ip_vs_protocol_esp);
#endif
pr_info("Registered protocols (%s)\n", &protocols[2]);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Wensong Zhang | 92 | 87.62% | 1 | 25.00% |
Venkata Mohan Reddy | 11 | 10.48% | 1 | 25.00% |
Hannes Eder | 1 | 0.95% | 1 | 25.00% |
Sven Wegener | 1 | 0.95% | 1 | 25.00% |
Total | 105 | 100.00% | 4 | 100.00% |
void ip_vs_protocol_cleanup(void)
{
struct ip_vs_protocol *pp;
int i;
/* unregister all the ipvs protocols */
for (i = 0; i < IP_VS_PROTO_TAB_SIZE; i++) {
while ((pp = ip_vs_proto_table[i]) != NULL)
unregister_ip_vs_protocol(pp);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Wensong Zhang | 49 | 100.00% | 1 | 100.00% |
Total | 49 | 100.00% | 1 | 100.00% |
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Wensong Zhang | 686 | 41.50% | 1 | 2.50% |
Hans Schillstrom | 439 | 26.56% | 9 | 22.50% |
Julius Volz | 243 | 14.70% | 1 | 2.50% |
Sasha Levin | 67 | 4.05% | 2 | 5.00% |
Julian Anastasov | 64 | 3.87% | 4 | 10.00% |
David S. Miller | 58 | 3.51% | 1 | 2.50% |
Hannes Eder | 19 | 1.15% | 3 | 7.50% |
Patrick McHardy | 15 | 0.91% | 1 | 2.50% |
Eric W. Biedermann | 14 | 0.85% | 3 | 7.50% |
Harvey Harrison | 12 | 0.73% | 2 | 5.00% |
Venkata Mohan Reddy | 11 | 0.67% | 1 | 2.50% |
Eric Dumazet | 6 | 0.36% | 1 | 2.50% |
Sven Wegener | 4 | 0.24% | 2 | 5.00% |
Jesper Dangaard Brouer | 3 | 0.18% | 1 | 2.50% |
Adrian Bunk | 3 | 0.18% | 2 | 5.00% |
Jan Engelhardt | 3 | 0.18% | 1 | 2.50% |
Arnaldo Carvalho de Melo | 2 | 0.12% | 1 | 2.50% |
Pavel Emelyanov | 1 | 0.06% | 1 | 2.50% |
Hideaki Yoshifuji / 吉藤英明 | 1 | 0.06% | 1 | 2.50% |
Al Viro | 1 | 0.06% | 1 | 2.50% |
Tejun Heo | 1 | 0.06% | 1 | 2.50% |
Total | 1653 | 100.00% | 40 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.