Release 4.7 net/netfilter/ipvs/ip_vs_sh.c
/*
* IPVS: Source Hashing scheduling module
*
* Authors: Wensong Zhang <wensong@gnuchina.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Changes:
*
*/
/*
* The sh algorithm is to select server by the hash key of source IP
* address. The pseudo code is as follows:
*
* n <- servernode[src_ip];
* if (n is dead) OR
* (n is overloaded) or (n.weight <= 0) then
* return NULL;
*
* return n;
*
* Notes that servernode is a 256-bucket hash table that maps the hash
* index derived from packet source IP address to the current server
* array. If the sh scheduler is used in cache cluster, it is good to
* combine it with cache_bypass feature. When the statically assigned
* server is dead or overloaded, the load balancer can bypass the cache
* server and send requests to the original server directly.
*
* The weight destination attribute can be used to control the
* distribution of connections to the destinations in servernode. The
* greater the weight, the more connections the destination
* will receive.
*
*/
#define KMSG_COMPONENT "IPVS"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/ip.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/skbuff.h>
#include <net/ip_vs.h>
#include <net/tcp.h>
#include <linux/udp.h>
#include <linux/sctp.h>
/*
* IPVS SH bucket
*/
struct ip_vs_sh_bucket {
struct ip_vs_dest __rcu *dest; /* real server (cache) */
};
/*
* for IPVS SH entry hash table
*/
#ifndef CONFIG_IP_VS_SH_TAB_BITS
#define CONFIG_IP_VS_SH_TAB_BITS 8
#endif
#define IP_VS_SH_TAB_BITS CONFIG_IP_VS_SH_TAB_BITS
#define IP_VS_SH_TAB_SIZE (1 << IP_VS_SH_TAB_BITS)
#define IP_VS_SH_TAB_MASK (IP_VS_SH_TAB_SIZE - 1)
struct ip_vs_sh_state {
struct rcu_head rcu_head;
struct ip_vs_sh_bucket buckets[IP_VS_SH_TAB_SIZE];
};
/* Helper function to determine if server is unavailable */
static inline bool is_unavailable(struct ip_vs_dest *dest)
{
return atomic_read(&dest->weight) <= 0 ||
dest->flags & IP_VS_DEST_F_OVERLOAD;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
alexander frolkin | alexander frolkin | 29 | 100.00% | 1 | 100.00% |
| Total | 29 | 100.00% | 1 | 100.00% |
/*
* Returns hash value for IPVS SH entry
*/
static inline unsigned int
ip_vs_sh_hashkey(int af, const union nf_inet_addr *addr,
__be16 port, unsigned int offset)
{
__be32 addr_fold = addr->ip;
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6)
addr_fold = addr->ip6[0]^addr->ip6[1]^
addr->ip6[2]^addr->ip6[3];
#endif
return (offset + (ntohs(port) + ntohl(addr_fold))*2654435761UL) &
IP_VS_SH_TAB_MASK;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
julius volz | julius volz | 56 | 60.22% | 1 | 25.00% |
wensong zhang | wensong zhang | 20 | 21.51% | 1 | 25.00% |
alexander frolkin | alexander frolkin | 16 | 17.20% | 1 | 25.00% |
eric dumazet | eric dumazet | 1 | 1.08% | 1 | 25.00% |
| Total | 93 | 100.00% | 4 | 100.00% |
/*
* Get ip_vs_dest associated with supplied parameters.
*/
static inline struct ip_vs_dest *
ip_vs_sh_get(struct ip_vs_service *svc, struct ip_vs_sh_state *s,
const union nf_inet_addr *addr, __be16 port)
{
unsigned int hash = ip_vs_sh_hashkey(svc->af, addr, port, 0);
struct ip_vs_dest *dest = rcu_dereference(s->buckets[hash].dest);
return (!dest || is_unavailable(dest)) ? NULL : dest;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
alexander frolkin | alexander frolkin | 51 | 66.23% | 1 | 25.00% |
wensong zhang | wensong zhang | 14 | 18.18% | 1 | 25.00% |
julian anastasov | julian anastasov | 7 | 9.09% | 1 | 25.00% |
julius volz | julius volz | 5 | 6.49% | 1 | 25.00% |
| Total | 77 | 100.00% | 4 | 100.00% |
/* As ip_vs_sh_get, but with fallback if selected server is unavailable
*
* The fallback strategy loops around the table starting from a "random"
* point (in fact, it is chosen to be the original hash value to make the
* algorithm deterministic) to find a new server.
*/
static inline struct ip_vs_dest *
ip_vs_sh_get_fallback(struct ip_vs_service *svc, struct ip_vs_sh_state *s,
const union nf_inet_addr *addr, __be16 port)
{
unsigned int offset, roffset;
unsigned int hash, ihash;
struct ip_vs_dest *dest;
/* first try the dest it's supposed to go to */
ihash = ip_vs_sh_hashkey(svc->af, addr, port, 0);
dest = rcu_dereference(s->buckets[ihash].dest);
if (!dest)
return NULL;
if (!is_unavailable(dest))
return dest;
IP_VS_DBG_BUF(6, "SH: selected unavailable server %s:%d, reselecting",
IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port));
/* if the original dest is unavailable, loop around the table
* starting from ihash to find a new dest
*/
for (offset = 0; offset < IP_VS_SH_TAB_SIZE; offset++) {
roffset = (offset + ihash) % IP_VS_SH_TAB_SIZE;
hash = ip_vs_sh_hashkey(svc->af, addr, port, roffset);
dest = rcu_dereference(s->buckets[hash].dest);
if (!dest)
break;
if (!is_unavailable(dest))
return dest;
IP_VS_DBG_BUF(6, "SH: selected unavailable "
"server %s:%d (offset %d), reselecting",
IP_VS_DBG_ADDR(dest->af, &dest->addr),
ntohs(dest->port), roffset);
}
return NULL;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
alexander frolkin | alexander frolkin | 210 | 93.75% | 2 | 33.33% |
wensong zhang | wensong zhang | 9 | 4.02% | 1 | 16.67% |
julian anastasov | julian anastasov | 3 | 1.34% | 2 | 33.33% |
julius volz | julius volz | 2 | 0.89% | 1 | 16.67% |
| Total | 224 | 100.00% | 6 | 100.00% |
/*
* Assign all the hash buckets of the specified table with the service.
*/
static int
ip_vs_sh_reassign(struct ip_vs_sh_state *s, struct ip_vs_service *svc)
{
int i;
struct ip_vs_sh_bucket *b;
struct list_head *p;
struct ip_vs_dest *dest;
int d_count;
bool empty;
b = &s->buckets[0];
p = &svc->destinations;
empty = list_empty(p);
d_count = 0;
for (i=0; i<IP_VS_SH_TAB_SIZE; i++) {
dest = rcu_dereference_protected(b->dest, 1);
if (dest)
ip_vs_dest_put(dest);
if (empty)
RCU_INIT_POINTER(b->dest, NULL);
else {
if (p == &svc->destinations)
p = p->next;
dest = list_entry(p, struct ip_vs_dest, n_list);
ip_vs_dest_hold(dest);
RCU_INIT_POINTER(b->dest, dest);
IP_VS_DBG_BUF(6, "assigned i: %d dest: %s weight: %d\n",
i, IP_VS_DBG_ADDR(dest->af, &dest->addr),
atomic_read(&dest->weight));
/* Don't move to next dest until filling weight */
if (++d_count >= atomic_read(&dest->weight)) {
p = p->next;
d_count = 0;
}
}
b++;
}
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
wensong zhang | wensong zhang | 117 | 52.94% | 1 | 25.00% |
michael maxim | michael maxim | 55 | 24.89% | 1 | 25.00% |
julian anastasov | julian anastasov | 49 | 22.17% | 2 | 50.00% |
| Total | 221 | 100.00% | 4 | 100.00% |
/*
* Flush all the hash buckets of the specified table.
*/
static void ip_vs_sh_flush(struct ip_vs_sh_state *s)
{
int i;
struct ip_vs_sh_bucket *b;
struct ip_vs_dest *dest;
b = &s->buckets[0];
for (i=0; i<IP_VS_SH_TAB_SIZE; i++) {
dest = rcu_dereference_protected(b->dest, 1);
if (dest) {
ip_vs_dest_put(dest);
RCU_INIT_POINTER(b->dest, NULL);
}
b++;
}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
wensong zhang | wensong zhang | 53 | 63.86% | 1 | 50.00% |
julian anastasov | julian anastasov | 30 | 36.14% | 1 | 50.00% |
| Total | 83 | 100.00% | 2 | 100.00% |
static int ip_vs_sh_init_svc(struct ip_vs_service *svc)
{
struct ip_vs_sh_state *s;
/* allocate the SH table for this service */
s = kzalloc(sizeof(struct ip_vs_sh_state), GFP_KERNEL);
if (s == NULL)
return -ENOMEM;
svc->sched_data = s;
IP_VS_DBG(6, "SH hash table (memory=%Zdbytes) allocated for "
"current service\n",
sizeof(struct ip_vs_sh_bucket)*IP_VS_SH_TAB_SIZE);
/* assign the hash buckets with current dests */
ip_vs_sh_reassign(s, svc);
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
wensong zhang | wensong zhang | 61 | 83.56% | 1 | 25.00% |
julian anastasov | julian anastasov | 11 | 15.07% | 2 | 50.00% |
randy dunlap | randy dunlap | 1 | 1.37% | 1 | 25.00% |
| Total | 73 | 100.00% | 4 | 100.00% |
static void ip_vs_sh_done_svc(struct ip_vs_service *svc)
{
struct ip_vs_sh_state *s = svc->sched_data;
/* got to clean up hash buckets here */
ip_vs_sh_flush(s);
/* release the table itself */
kfree_rcu(s, rcu_head);
IP_VS_DBG(6, "SH hash table (memory=%Zdbytes) released\n",
sizeof(struct ip_vs_sh_bucket)*IP_VS_SH_TAB_SIZE);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
wensong zhang | wensong zhang | 40 | 81.63% | 1 | 25.00% |
julian anastasov | julian anastasov | 8 | 16.33% | 2 | 50.00% |
randy dunlap | randy dunlap | 1 | 2.04% | 1 | 25.00% |
| Total | 49 | 100.00% | 4 | 100.00% |
static int ip_vs_sh_dest_changed(struct ip_vs_service *svc,
struct ip_vs_dest *dest)
{
struct ip_vs_sh_state *s = svc->sched_data;
/* assign the hash buckets with the updated service */
ip_vs_sh_reassign(s, svc);
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
wensong zhang | wensong zhang | 26 | 72.22% | 1 | 50.00% |
julian anastasov | julian anastasov | 10 | 27.78% | 1 | 50.00% |
| Total | 36 | 100.00% | 2 | 100.00% |
/* Helper function to get port number */
static inline __be16
ip_vs_sh_get_port(const struct sk_buff *skb, struct ip_vs_iphdr *iph)
{
__be16 _ports[2], *ports;
/* At this point we know that we have a valid packet of some kind.
* Because ICMP packets are only guaranteed to have the first 8
* bytes, let's just grab the ports. Fortunately they're in the
* same position for all three of the protocols we care about.
*/
switch (iph->protocol) {
case IPPROTO_TCP:
case IPPROTO_UDP:
case IPPROTO_SCTP:
ports = skb_header_pointer(skb, iph->len, sizeof(_ports),
&_ports);
if (unlikely(!ports))
return 0;
if (likely(!ip_vs_iph_inverse(iph)))
return ports[0];
else
return ports[1];
default:
return 0;
}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
alexander frolkin | alexander frolkin | 44 | 42.72% | 1 | 25.00% |
alex gartrell | alex gartrell | 32 | 31.07% | 1 | 25.00% |
daniel borkmann | daniel borkmann | 17 | 16.50% | 1 | 25.00% |
wensong zhang | wensong zhang | 10 | 9.71% | 1 | 25.00% |
| Total | 103 | 100.00% | 4 | 100.00% |
/*
* Source Hashing scheduling
*/
static struct ip_vs_dest *
ip_vs_sh_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
struct ip_vs_iphdr *iph)
{
struct ip_vs_dest *dest;
struct ip_vs_sh_state *s;
__be16 port = 0;
const union nf_inet_addr *hash_addr;
hash_addr = ip_vs_iph_inverse(iph) ? &iph->daddr : &iph->saddr;
IP_VS_DBG(6, "ip_vs_sh_schedule(): Scheduling...\n");
if (svc->flags & IP_VS_SVC_F_SCHED_SH_PORT)
port = ip_vs_sh_get_port(skb, iph);
s = (struct ip_vs_sh_state *) svc->sched_data;
if (svc->flags & IP_VS_SVC_F_SCHED_SH_FALLBACK)
dest = ip_vs_sh_get_fallback(svc, s, hash_addr, port);
else
dest = ip_vs_sh_get(svc, s, hash_addr, port);
if (!dest) {
ip_vs_scheduler_err(svc, "no destination available");
return NULL;
}
IP_VS_DBG_BUF(6, "SH: source IP address %s --> server %s:%d\n",
IP_VS_DBG_ADDR(svc->af, hash_addr),
IP_VS_DBG_ADDR(dest->af, &dest->addr),
ntohs(dest->port));
return dest;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
wensong zhang | wensong zhang | 76 | 40.64% | 1 | 9.09% |
alexander frolkin | alexander frolkin | 46 | 24.60% | 1 | 9.09% |
alex gartrell | alex gartrell | 26 | 13.90% | 1 | 9.09% |
julius volz | julius volz | 17 | 9.09% | 1 | 9.09% |
julian anastasov | julian anastasov | 14 | 7.49% | 4 | 36.36% |
patrick schaaf | patrick schaaf | 4 | 2.14% | 1 | 9.09% |
simon horman | simon horman | 3 | 1.60% | 1 | 9.09% |
harvey harrison | harvey harrison | 1 | 0.53% | 1 | 9.09% |
| Total | 187 | 100.00% | 11 | 100.00% |
/*
* IPVS SH Scheduler structure
*/
static struct ip_vs_scheduler ip_vs_sh_scheduler =
{
.name = "sh",
.refcnt = ATOMIC_INIT(0),
.module = THIS_MODULE,
.n_list = LIST_HEAD_INIT(ip_vs_sh_scheduler.n_list),
.init_service = ip_vs_sh_init_svc,
.done_service = ip_vs_sh_done_svc,
.add_dest = ip_vs_sh_dest_changed,
.del_dest = ip_vs_sh_dest_changed,
.upd_dest = ip_vs_sh_dest_changed,
.schedule = ip_vs_sh_schedule,
};
static int __init ip_vs_sh_init(void)
{
return register_ip_vs_scheduler(&ip_vs_sh_scheduler);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
wensong zhang | wensong zhang | 16 | 100.00% | 1 | 100.00% |
| Total | 16 | 100.00% | 1 | 100.00% |
static void __exit ip_vs_sh_cleanup(void)
{
unregister_ip_vs_scheduler(&ip_vs_sh_scheduler);
synchronize_rcu();
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
wensong zhang | wensong zhang | 15 | 83.33% | 1 | 50.00% |
julian anastasov | julian anastasov | 3 | 16.67% | 1 | 50.00% |
| Total | 18 | 100.00% | 2 | 100.00% |
module_init(ip_vs_sh_init);
module_exit(ip_vs_sh_cleanup);
MODULE_LICENSE("GPL");
Overall Contributors
| Person | Tokens | Prop | Commits | CommitProp |
wensong zhang | wensong zhang | 564 | 40.60% | 1 | 4.00% |
alexander frolkin | alexander frolkin | 408 | 29.37% | 2 | 8.00% |
julian anastasov | julian anastasov | 160 | 11.52% | 7 | 28.00% |
julius volz | julius volz | 80 | 5.76% | 1 | 4.00% |
alex gartrell | alex gartrell | 58 | 4.18% | 1 | 4.00% |
michael maxim | michael maxim | 56 | 4.03% | 1 | 4.00% |
daniel borkmann | daniel borkmann | 17 | 1.22% | 1 | 4.00% |
hannes eder | hannes eder | 11 | 0.79% | 1 | 4.00% |
sven wegener | sven wegener | 10 | 0.72% | 1 | 4.00% |
arnaldo carvalho de melo | arnaldo carvalho de melo | 6 | 0.43% | 1 | 4.00% |
patrick schaaf | patrick schaaf | 4 | 0.29% | 1 | 4.00% |
jan beulich | jan beulich | 4 | 0.29% | 1 | 4.00% |
simon horman | simon horman | 3 | 0.22% | 1 | 4.00% |
tejun heo | tejun heo | 3 | 0.22% | 1 | 4.00% |
randy dunlap | randy dunlap | 2 | 0.14% | 1 | 4.00% |
adrian bunk | adrian bunk | 1 | 0.07% | 1 | 4.00% |
eric dumazet | eric dumazet | 1 | 0.07% | 1 | 4.00% |
harvey harrison | harvey harrison | 1 | 0.07% | 1 | 4.00% |
| Total | 1389 | 100.00% | 25 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.