Author | Tokens | Token Proportion | Commits | Commit Proportion |
---|---|---|---|---|
Wensong Zhang | 1375 | 58.41% | 1 | 1.92% |
Julian Anastasov | 369 | 15.68% | 6 | 11.54% |
Stephen Hemminger | 302 | 12.83% | 3 | 5.77% |
Hans Schillstrom | 111 | 4.72% | 4 | 7.69% |
Eric W. Biedermann | 24 | 1.02% | 7 | 13.46% |
Hannes Eder | 23 | 0.98% | 2 | 3.85% |
Arnaldo Carvalho de Melo | 23 | 0.98% | 3 | 5.77% |
Linus Torvalds (pre-git) | 20 | 0.85% | 9 | 17.31% |
Zhengchao Shao | 18 | 0.76% | 1 | 1.92% |
Davide Caratti | 17 | 0.72% | 1 | 1.92% |
Herbert Xu | 16 | 0.68% | 1 | 1.92% |
Simon Horman | 14 | 0.59% | 1 | 1.92% |
Ingo Molnar | 11 | 0.47% | 1 | 1.92% |
Gao Feng | 9 | 0.38% | 2 | 3.85% |
Venkata Mohan Reddy | 5 | 0.21% | 1 | 1.92% |
Arnd Bergmann | 4 | 0.17% | 1 | 1.92% |
Jean Tourrilhes | 4 | 0.17% | 1 | 1.92% |
Thomas Gleixner | 2 | 0.08% | 1 | 1.92% |
Florian Westphal | 2 | 0.08% | 1 | 1.92% |
Adrian Bunk | 1 | 0.04% | 1 | 1.92% |
Philippe De Muyter | 1 | 0.04% | 1 | 1.92% |
Eric Dumazet | 1 | 0.04% | 1 | 1.92% |
Christoph Hellwig | 1 | 0.04% | 1 | 1.92% |
Linus Torvalds | 1 | 0.04% | 1 | 1.92% |
Total | 2354 | 52 |
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617
// SPDX-License-Identifier: GPL-2.0-or-later /* * ip_vs_app.c: Application module support for IPVS * * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> * * Most code here is taken from ip_masq_app.c in kernel 2.2. The difference * is that ip_vs_app module handles the reverse direction (incoming requests * and outgoing responses). * * IP_MASQ_APP application masquerading module * * Author: Juan Jose Ciarlante, <jjciarla@raiz.uncu.edu.ar> */ #define KMSG_COMPONENT "IPVS" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/module.h> #include <linux/kernel.h> #include <linux/skbuff.h> #include <linux/in.h> #include <linux/ip.h> #include <linux/netfilter.h> #include <linux/slab.h> #include <net/net_namespace.h> #include <net/protocol.h> #include <net/tcp.h> #include <linux/stat.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/mutex.h> #include <net/ip_vs.h> EXPORT_SYMBOL(register_ip_vs_app); EXPORT_SYMBOL(unregister_ip_vs_app); EXPORT_SYMBOL(register_ip_vs_app_inc); static DEFINE_MUTEX(__ip_vs_app_mutex); /* * Get an ip_vs_app object */ static inline int ip_vs_app_get(struct ip_vs_app *app) { return try_module_get(app->module); } static inline void ip_vs_app_put(struct ip_vs_app *app) { module_put(app->module); } static void ip_vs_app_inc_destroy(struct ip_vs_app *inc) { kfree(inc->timeout_table); kfree(inc); } static void ip_vs_app_inc_rcu_free(struct rcu_head *head) { struct ip_vs_app *inc = container_of(head, struct ip_vs_app, rcu_head); ip_vs_app_inc_destroy(inc); } /* * Allocate/initialize app incarnation and register it in proto apps. */ static int ip_vs_app_inc_new(struct netns_ipvs *ipvs, struct ip_vs_app *app, __u16 proto, __u16 port) { struct ip_vs_protocol *pp; struct ip_vs_app *inc; int ret; if (!(pp = ip_vs_proto_get(proto))) return -EPROTONOSUPPORT; if (!pp->unregister_app) return -EOPNOTSUPP; inc = kmemdup(app, sizeof(*inc), GFP_KERNEL); if (!inc) return -ENOMEM; INIT_LIST_HEAD(&inc->p_list); INIT_LIST_HEAD(&inc->incs_list); inc->app = app; inc->port = htons(port); atomic_set(&inc->usecnt, 0); if (app->timeouts) { inc->timeout_table = ip_vs_create_timeout_table(app->timeouts, app->timeouts_size); if (!inc->timeout_table) { ret = -ENOMEM; goto out; } } ret = pp->register_app(ipvs, inc); if (ret) goto out; list_add(&inc->a_list, &app->incs_list); IP_VS_DBG(9, "%s App %s:%u registered\n", pp->name, inc->name, ntohs(inc->port)); return 0; out: ip_vs_app_inc_destroy(inc); return ret; } /* * Release app incarnation */ static void ip_vs_app_inc_release(struct netns_ipvs *ipvs, struct ip_vs_app *inc) { struct ip_vs_protocol *pp; if (!(pp = ip_vs_proto_get(inc->protocol))) return; if (pp->unregister_app) pp->unregister_app(ipvs, inc); IP_VS_DBG(9, "%s App %s:%u unregistered\n", pp->name, inc->name, ntohs(inc->port)); list_del(&inc->a_list); call_rcu(&inc->rcu_head, ip_vs_app_inc_rcu_free); } /* * Get reference to app inc (only called from softirq) * */ int ip_vs_app_inc_get(struct ip_vs_app *inc) { int result; result = ip_vs_app_get(inc->app); if (result) atomic_inc(&inc->usecnt); return result; } /* * Put the app inc (only called from timer or net softirq) */ void ip_vs_app_inc_put(struct ip_vs_app *inc) { atomic_dec(&inc->usecnt); ip_vs_app_put(inc->app); } /* * Register an application incarnation in protocol applications */ int register_ip_vs_app_inc(struct netns_ipvs *ipvs, struct ip_vs_app *app, __u16 proto, __u16 port) { int result; mutex_lock(&__ip_vs_app_mutex); result = ip_vs_app_inc_new(ipvs, app, proto, port); mutex_unlock(&__ip_vs_app_mutex); return result; } /* Register application for netns */ struct ip_vs_app *register_ip_vs_app(struct netns_ipvs *ipvs, struct ip_vs_app *app) { struct ip_vs_app *a; int err = 0; mutex_lock(&__ip_vs_app_mutex); /* increase the module use count */ if (!ip_vs_use_count_inc()) { err = -ENOENT; goto out_unlock; } list_for_each_entry(a, &ipvs->app_list, a_list) { if (!strcmp(app->name, a->name)) { err = -EEXIST; /* decrease the module use count */ ip_vs_use_count_dec(); goto out_unlock; } } a = kmemdup(app, sizeof(*app), GFP_KERNEL); if (!a) { err = -ENOMEM; /* decrease the module use count */ ip_vs_use_count_dec(); goto out_unlock; } INIT_LIST_HEAD(&a->incs_list); list_add(&a->a_list, &ipvs->app_list); out_unlock: mutex_unlock(&__ip_vs_app_mutex); return err ? ERR_PTR(err) : a; } /* * ip_vs_app unregistration routine * We are sure there are no app incarnations attached to services * Caller should use synchronize_rcu() or rcu_barrier() */ void unregister_ip_vs_app(struct netns_ipvs *ipvs, struct ip_vs_app *app) { struct ip_vs_app *a, *anxt, *inc, *nxt; mutex_lock(&__ip_vs_app_mutex); list_for_each_entry_safe(a, anxt, &ipvs->app_list, a_list) { if (app && strcmp(app->name, a->name)) continue; list_for_each_entry_safe(inc, nxt, &a->incs_list, a_list) { ip_vs_app_inc_release(ipvs, inc); } list_del(&a->a_list); kfree(a); /* decrease the module use count */ ip_vs_use_count_dec(); } mutex_unlock(&__ip_vs_app_mutex); } /* * Bind ip_vs_conn to its ip_vs_app (called by cp constructor) */ int ip_vs_bind_app(struct ip_vs_conn *cp, struct ip_vs_protocol *pp) { return pp->app_conn_bind(cp); } /* * Unbind cp from application incarnation (called by cp destructor) */ void ip_vs_unbind_app(struct ip_vs_conn *cp) { struct ip_vs_app *inc = cp->app; if (!inc) return; if (inc->unbind_conn) inc->unbind_conn(inc, cp); if (inc->done_conn) inc->done_conn(inc, cp); ip_vs_app_inc_put(inc); cp->app = NULL; } /* * Fixes th->seq based on ip_vs_seq info. */ static inline void vs_fix_seq(const struct ip_vs_seq *vseq, struct tcphdr *th) { __u32 seq = ntohl(th->seq); /* * Adjust seq with delta-offset for all packets after * the most recent resized pkt seq and with previous_delta offset * for all packets before most recent resized pkt seq. */ if (vseq->delta || vseq->previous_delta) { if(after(seq, vseq->init_seq)) { th->seq = htonl(seq + vseq->delta); IP_VS_DBG(9, "%s(): added delta (%d) to seq\n", __func__, vseq->delta); } else { th->seq = htonl(seq + vseq->previous_delta); IP_VS_DBG(9, "%s(): added previous_delta (%d) to seq\n", __func__, vseq->previous_delta); } } } /* * Fixes th->ack_seq based on ip_vs_seq info. */ static inline void vs_fix_ack_seq(const struct ip_vs_seq *vseq, struct tcphdr *th) { __u32 ack_seq = ntohl(th->ack_seq); /* * Adjust ack_seq with delta-offset for * the packets AFTER most recent resized pkt has caused a shift * for packets before most recent resized pkt, use previous_delta */ if (vseq->delta || vseq->previous_delta) { /* since ack_seq is the number of octet that is expected to receive next, so compare it with init_seq+delta */ if(after(ack_seq, vseq->init_seq+vseq->delta)) { th->ack_seq = htonl(ack_seq - vseq->delta); IP_VS_DBG(9, "%s(): subtracted delta " "(%d) from ack_seq\n", __func__, vseq->delta); } else { th->ack_seq = htonl(ack_seq - vseq->previous_delta); IP_VS_DBG(9, "%s(): subtracted " "previous_delta (%d) from ack_seq\n", __func__, vseq->previous_delta); } } } /* * Updates ip_vs_seq if pkt has been resized * Assumes already checked proto==IPPROTO_TCP and diff!=0. */ static inline void vs_seq_update(struct ip_vs_conn *cp, struct ip_vs_seq *vseq, unsigned int flag, __u32 seq, int diff) { /* spinlock is to keep updating cp->flags atomic */ spin_lock_bh(&cp->lock); if (!(cp->flags & flag) || after(seq, vseq->init_seq)) { vseq->previous_delta = vseq->delta; vseq->delta += diff; vseq->init_seq = seq; cp->flags |= flag; } spin_unlock_bh(&cp->lock); } static inline int app_tcp_pkt_out(struct ip_vs_conn *cp, struct sk_buff *skb, struct ip_vs_app *app, struct ip_vs_iphdr *ipvsh) { int diff; const unsigned int tcp_offset = ip_hdrlen(skb); struct tcphdr *th; __u32 seq; if (skb_ensure_writable(skb, tcp_offset + sizeof(*th))) return 0; th = (struct tcphdr *)(skb_network_header(skb) + tcp_offset); /* * Remember seq number in case this pkt gets resized */ seq = ntohl(th->seq); /* * Fix seq stuff if flagged as so. */ if (cp->flags & IP_VS_CONN_F_OUT_SEQ) vs_fix_seq(&cp->out_seq, th); if (cp->flags & IP_VS_CONN_F_IN_SEQ) vs_fix_ack_seq(&cp->in_seq, th); /* * Call private output hook function */ if (app->pkt_out == NULL) return 1; if (!app->pkt_out(app, cp, skb, &diff, ipvsh)) return 0; /* * Update ip_vs seq stuff if len has changed. */ if (diff != 0) vs_seq_update(cp, &cp->out_seq, IP_VS_CONN_F_OUT_SEQ, seq, diff); return 1; } /* * Output pkt hook. Will call bound ip_vs_app specific function * called by ipvs packet handler, assumes previously checked cp!=NULL * returns false if it can't handle packet (oom) */ int ip_vs_app_pkt_out(struct ip_vs_conn *cp, struct sk_buff *skb, struct ip_vs_iphdr *ipvsh) { struct ip_vs_app *app; /* * check if application module is bound to * this ip_vs_conn. */ if ((app = cp->app) == NULL) return 1; /* TCP is complicated */ if (cp->protocol == IPPROTO_TCP) return app_tcp_pkt_out(cp, skb, app, ipvsh); /* * Call private output hook function */ if (app->pkt_out == NULL) return 1; return app->pkt_out(app, cp, skb, NULL, ipvsh); } static inline int app_tcp_pkt_in(struct ip_vs_conn *cp, struct sk_buff *skb, struct ip_vs_app *app, struct ip_vs_iphdr *ipvsh) { int diff; const unsigned int tcp_offset = ip_hdrlen(skb); struct tcphdr *th; __u32 seq; if (skb_ensure_writable(skb, tcp_offset + sizeof(*th))) return 0; th = (struct tcphdr *)(skb_network_header(skb) + tcp_offset); /* * Remember seq number in case this pkt gets resized */ seq = ntohl(th->seq); /* * Fix seq stuff if flagged as so. */ if (cp->flags & IP_VS_CONN_F_IN_SEQ) vs_fix_seq(&cp->in_seq, th); if (cp->flags & IP_VS_CONN_F_OUT_SEQ) vs_fix_ack_seq(&cp->out_seq, th); /* * Call private input hook function */ if (app->pkt_in == NULL) return 1; if (!app->pkt_in(app, cp, skb, &diff, ipvsh)) return 0; /* * Update ip_vs seq stuff if len has changed. */ if (diff != 0) vs_seq_update(cp, &cp->in_seq, IP_VS_CONN_F_IN_SEQ, seq, diff); return 1; } /* * Input pkt hook. Will call bound ip_vs_app specific function * called by ipvs packet handler, assumes previously checked cp!=NULL. * returns false if can't handle packet (oom). */ int ip_vs_app_pkt_in(struct ip_vs_conn *cp, struct sk_buff *skb, struct ip_vs_iphdr *ipvsh) { struct ip_vs_app *app; /* * check if application module is bound to * this ip_vs_conn. */ if ((app = cp->app) == NULL) return 1; /* TCP is complicated */ if (cp->protocol == IPPROTO_TCP) return app_tcp_pkt_in(cp, skb, app, ipvsh); /* * Call private input hook function */ if (app->pkt_in == NULL) return 1; return app->pkt_in(app, cp, skb, NULL, ipvsh); } #ifdef CONFIG_PROC_FS /* * /proc/net/ip_vs_app entry function */ static struct ip_vs_app *ip_vs_app_idx(struct netns_ipvs *ipvs, loff_t pos) { struct ip_vs_app *app, *inc; list_for_each_entry(app, &ipvs->app_list, a_list) { list_for_each_entry(inc, &app->incs_list, a_list) { if (pos-- == 0) return inc; } } return NULL; } static void *ip_vs_app_seq_start(struct seq_file *seq, loff_t *pos) { struct net *net = seq_file_net(seq); struct netns_ipvs *ipvs = net_ipvs(net); mutex_lock(&__ip_vs_app_mutex); return *pos ? ip_vs_app_idx(ipvs, *pos - 1) : SEQ_START_TOKEN; } static void *ip_vs_app_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct ip_vs_app *inc, *app; struct list_head *e; struct net *net = seq_file_net(seq); struct netns_ipvs *ipvs = net_ipvs(net); ++*pos; if (v == SEQ_START_TOKEN) return ip_vs_app_idx(ipvs, 0); inc = v; app = inc->app; if ((e = inc->a_list.next) != &app->incs_list) return list_entry(e, struct ip_vs_app, a_list); /* go on to next application */ for (e = app->a_list.next; e != &ipvs->app_list; e = e->next) { app = list_entry(e, struct ip_vs_app, a_list); list_for_each_entry(inc, &app->incs_list, a_list) { return inc; } } return NULL; } static void ip_vs_app_seq_stop(struct seq_file *seq, void *v) { mutex_unlock(&__ip_vs_app_mutex); } static int ip_vs_app_seq_show(struct seq_file *seq, void *v) { if (v == SEQ_START_TOKEN) seq_puts(seq, "prot port usecnt name\n"); else { const struct ip_vs_app *inc = v; seq_printf(seq, "%-3s %-7u %-6d %-17s\n", ip_vs_proto_name(inc->protocol), ntohs(inc->port), atomic_read(&inc->usecnt), inc->name); } return 0; } static const struct seq_operations ip_vs_app_seq_ops = { .start = ip_vs_app_seq_start, .next = ip_vs_app_seq_next, .stop = ip_vs_app_seq_stop, .show = ip_vs_app_seq_show, }; #endif int __net_init ip_vs_app_net_init(struct netns_ipvs *ipvs) { INIT_LIST_HEAD(&ipvs->app_list); #ifdef CONFIG_PROC_FS if (!proc_create_net("ip_vs_app", 0, ipvs->net->proc_net, &ip_vs_app_seq_ops, sizeof(struct seq_net_private))) return -ENOMEM; #endif return 0; } void __net_exit ip_vs_app_net_cleanup(struct netns_ipvs *ipvs) { unregister_ip_vs_app(ipvs, NULL /* all */); #ifdef CONFIG_PROC_FS remove_proc_entry("ip_vs_app", ipvs->net->proc_net); #endif }
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with Cregit http://github.com/cregit/cregit
Version 2.0-RC1