Release 4.11 net/netfilter/ipvs/ip_vs_app.c
/*
* ip_vs_app.c: Application module support for IPVS
*
* Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Most code here is taken from ip_masq_app.c in kernel 2.2. The difference
* is that ip_vs_app module handles the reverse direction (incoming requests
* and outgoing responses).
*
* IP_MASQ_APP application masquerading module
*
* Author: Juan Jose Ciarlante, <jjciarla@raiz.uncu.edu.ar>
*
*/
#define KMSG_COMPONENT "IPVS"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/skbuff.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/netfilter.h>
#include <linux/slab.h>
#include <net/net_namespace.h>
#include <net/protocol.h>
#include <net/tcp.h>
#include <linux/stat.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/mutex.h>
#include <net/ip_vs.h>
EXPORT_SYMBOL(register_ip_vs_app);
EXPORT_SYMBOL(unregister_ip_vs_app);
EXPORT_SYMBOL(register_ip_vs_app_inc);
static DEFINE_MUTEX(__ip_vs_app_mutex);
/*
* Get an ip_vs_app object
*/
static inline int ip_vs_app_get(struct ip_vs_app *app)
{
return try_module_get(app->module);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Wensong Zhang | 20 | 100.00% | 1 | 100.00% |
Total | 20 | 100.00% | 1 | 100.00% |
static inline void ip_vs_app_put(struct ip_vs_app *app)
{
module_put(app->module);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Wensong Zhang | 19 | 100.00% | 1 | 100.00% |
Total | 19 | 100.00% | 1 | 100.00% |
static void ip_vs_app_inc_destroy(struct ip_vs_app *inc)
{
kfree(inc->timeout_table);
kfree(inc);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Julian Anastasov | 23 | 100.00% | 1 | 100.00% |
Total | 23 | 100.00% | 1 | 100.00% |
static void ip_vs_app_inc_rcu_free(struct rcu_head *head)
{
struct ip_vs_app *inc = container_of(head, struct ip_vs_app, rcu_head);
ip_vs_app_inc_destroy(inc);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Julian Anastasov | 31 | 100.00% | 1 | 100.00% |
Total | 31 | 100.00% | 1 | 100.00% |
/*
* Allocate/initialize app incarnation and register it in proto apps.
*/
static int
ip_vs_app_inc_new(struct netns_ipvs *ipvs, struct ip_vs_app *app, __u16 proto,
__u16 port)
{
struct ip_vs_protocol *pp;
struct ip_vs_app *inc;
int ret;
if (!(pp = ip_vs_proto_get(proto)))
return -EPROTONOSUPPORT;
if (!pp->unregister_app)
return -EOPNOTSUPP;
inc = kmemdup(app, sizeof(*inc), GFP_KERNEL);
if (!inc)
return -ENOMEM;
INIT_LIST_HEAD(&inc->p_list);
INIT_LIST_HEAD(&inc->incs_list);
inc->app = app;
inc->port = htons(port);
atomic_set(&inc->usecnt, 0);
if (app->timeouts) {
inc->timeout_table =
ip_vs_create_timeout_table(app->timeouts,
app->timeouts_size);
if (!inc->timeout_table) {
ret = -ENOMEM;
goto out;
}
}
ret = pp->register_app(ipvs, inc);
if (ret)
goto out;
list_add(&inc->a_list, &app->incs_list);
IP_VS_DBG(9, "%s App %s:%u registered\n",
pp->name, inc->name, ntohs(inc->port));
return 0;
out:
ip_vs_app_inc_destroy(inc);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Wensong Zhang | 215 | 92.27% | 1 | 12.50% |
Arnaldo Carvalho de Melo | 5 | 2.15% | 1 | 12.50% |
Julian Anastasov | 5 | 2.15% | 2 | 25.00% |
Hans Schillstrom | 4 | 1.72% | 1 | 12.50% |
Eric W. Biedermann | 3 | 1.29% | 2 | 25.00% |
Adrian Bunk | 1 | 0.43% | 1 | 12.50% |
Total | 233 | 100.00% | 8 | 100.00% |
/*
* Release app incarnation
*/
static void
ip_vs_app_inc_release(struct netns_ipvs *ipvs, struct ip_vs_app *inc)
{
struct ip_vs_protocol *pp;
if (!(pp = ip_vs_proto_get(inc->protocol)))
return;
if (pp->unregister_app)
pp->unregister_app(ipvs, inc);
IP_VS_DBG(9, "%s App %s:%u unregistered\n",
pp->name, inc->name, ntohs(inc->port));
list_del(&inc->a_list);
call_rcu(&inc->rcu_head, ip_vs_app_inc_rcu_free);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Wensong Zhang | 76 | 83.52% | 1 | 16.67% |
Julian Anastasov | 8 | 8.79% | 2 | 33.33% |
Hans Schillstrom | 4 | 4.40% | 1 | 16.67% |
Eric W. Biedermann | 3 | 3.30% | 2 | 33.33% |
Total | 91 | 100.00% | 6 | 100.00% |
/*
* Get reference to app inc (only called from softirq)
*
*/
int ip_vs_app_inc_get(struct ip_vs_app *inc)
{
int result;
result = ip_vs_app_get(inc->app);
if (result)
atomic_inc(&inc->usecnt);
return result;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Wensong Zhang | 32 | 86.49% | 1 | 50.00% |
Julian Anastasov | 5 | 13.51% | 1 | 50.00% |
Total | 37 | 100.00% | 2 | 100.00% |
/*
* Put the app inc (only called from timer or net softirq)
*/
void ip_vs_app_inc_put(struct ip_vs_app *inc)
{
atomic_dec(&inc->usecnt);
ip_vs_app_put(inc->app);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Wensong Zhang | 20 | 80.00% | 1 | 50.00% |
Julian Anastasov | 5 | 20.00% | 1 | 50.00% |
Total | 25 | 100.00% | 2 | 100.00% |
/*
* Register an application incarnation in protocol applications
*/
int
register_ip_vs_app_inc(struct netns_ipvs *ipvs, struct ip_vs_app *app, __u16 proto,
__u16 port)
{
int result;
mutex_lock(&__ip_vs_app_mutex);
result = ip_vs_app_inc_new(ipvs, app, proto, port);
mutex_unlock(&__ip_vs_app_mutex);
return result;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Wensong Zhang | 41 | 78.85% | 1 | 16.67% |
Hans Schillstrom | 4 | 7.69% | 1 | 16.67% |
Eric W. Biedermann | 3 | 5.77% | 2 | 33.33% |
Simon Horman | 2 | 3.85% | 1 | 16.67% |
Ingo Molnar | 2 | 3.85% | 1 | 16.67% |
Total | 52 | 100.00% | 6 | 100.00% |
/* Register application for netns */
struct ip_vs_app *register_ip_vs_app(struct netns_ipvs *ipvs, struct ip_vs_app *app)
{
struct ip_vs_app *a;
int err = 0;
mutex_lock(&__ip_vs_app_mutex);
list_for_each_entry(a, &ipvs->app_list, a_list) {
if (!strcmp(app->name, a->name)) {
err = -EEXIST;
goto out_unlock;
}
}
a = kmemdup(app, sizeof(*app), GFP_KERNEL);
if (!a) {
err = -ENOMEM;
goto out_unlock;
}
INIT_LIST_HEAD(&a->incs_list);
list_add(&a->a_list, &ipvs->app_list);
/* increase the module use count */
ip_vs_use_count_inc();
out_unlock:
mutex_unlock(&__ip_vs_app_mutex);
return err ? ERR_PTR(err) : a;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Julian Anastasov | 99 | 70.71% | 1 | 16.67% |
Wensong Zhang | 28 | 20.00% | 1 | 16.67% |
Hans Schillstrom | 7 | 5.00% | 1 | 16.67% |
Ingo Molnar | 2 | 1.43% | 1 | 16.67% |
Eric W. Biedermann | 2 | 1.43% | 1 | 16.67% |
Simon Horman | 2 | 1.43% | 1 | 16.67% |
Total | 140 | 100.00% | 6 | 100.00% |
/*
* ip_vs_app unregistration routine
* We are sure there are no app incarnations attached to services
* Caller should use synchronize_rcu() or rcu_barrier()
*/
void unregister_ip_vs_app(struct netns_ipvs *ipvs, struct ip_vs_app *app)
{
struct ip_vs_app *a, *anxt, *inc, *nxt;
mutex_lock(&__ip_vs_app_mutex);
list_for_each_entry_safe(a, anxt, &ipvs->app_list, a_list) {
if (app && strcmp(app->name, a->name))
continue;
list_for_each_entry_safe(inc, nxt, &a->incs_list, a_list) {
ip_vs_app_inc_release(ipvs, inc);
}
list_del(&a->a_list);
kfree(a);
/* decrease the module use count */
ip_vs_use_count_dec();
}
mutex_unlock(&__ip_vs_app_mutex);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Julian Anastasov | 44 | 41.90% | 1 | 11.11% |
Wensong Zhang | 37 | 35.24% | 1 | 11.11% |
Stephen Hemminger | 15 | 14.29% | 2 | 22.22% |
Hans Schillstrom | 4 | 3.81% | 1 | 11.11% |
Eric W. Biedermann | 3 | 2.86% | 2 | 22.22% |
Simon Horman | 1 | 0.95% | 1 | 11.11% |
Ingo Molnar | 1 | 0.95% | 1 | 11.11% |
Total | 105 | 100.00% | 9 | 100.00% |
/*
* Bind ip_vs_conn to its ip_vs_app (called by cp constructor)
*/
int ip_vs_bind_app(struct ip_vs_conn *cp,
struct ip_vs_protocol *pp)
{
return pp->app_conn_bind(cp);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Wensong Zhang | 23 | 100.00% | 1 | 100.00% |
Total | 23 | 100.00% | 1 | 100.00% |
/*
* Unbind cp from application incarnation (called by cp destructor)
*/
void ip_vs_unbind_app(struct ip_vs_conn *cp)
{
struct ip_vs_app *inc = cp->app;
if (!inc)
return;
if (inc->unbind_conn)
inc->unbind_conn(inc, cp);
if (inc->done_conn)
inc->done_conn(inc, cp);
ip_vs_app_inc_put(inc);
cp->app = NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Wensong Zhang | 66 | 100.00% | 1 | 100.00% |
Total | 66 | 100.00% | 1 | 100.00% |
/*
* Fixes th->seq based on ip_vs_seq info.
*/
static inline void vs_fix_seq(const struct ip_vs_seq *vseq, struct tcphdr *th)
{
__u32 seq = ntohl(th->seq);
/*
* Adjust seq with delta-offset for all packets after
* the most recent resized pkt seq and with previous_delta offset
* for all packets before most recent resized pkt seq.
*/
if (vseq->delta || vseq->previous_delta) {
if(after(seq, vseq->init_seq)) {
th->seq = htonl(seq + vseq->delta);
IP_VS_DBG(9, "%s(): added delta (%d) to seq\n",
__func__, vseq->delta);
} else {
th->seq = htonl(seq + vseq->previous_delta);
IP_VS_DBG(9, "%s(): added previous_delta (%d) to seq\n",
__func__, vseq->previous_delta);
}
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Wensong Zhang | 103 | 94.50% | 1 | 50.00% |
Hannes Eder | 6 | 5.50% | 1 | 50.00% |
Total | 109 | 100.00% | 2 | 100.00% |
/*
* Fixes th->ack_seq based on ip_vs_seq info.
*/
static inline void
vs_fix_ack_seq(const struct ip_vs_seq *vseq, struct tcphdr *th)
{
__u32 ack_seq = ntohl(th->ack_seq);
/*
* Adjust ack_seq with delta-offset for
* the packets AFTER most recent resized pkt has caused a shift
* for packets before most recent resized pkt, use previous_delta
*/
if (vseq->delta || vseq->previous_delta) {
/* since ack_seq is the number of octet that is expected
to receive next, so compare it with init_seq+delta */
if(after(ack_seq, vseq->init_seq+vseq->delta)) {
th->ack_seq = htonl(ack_seq - vseq->delta);
IP_VS_DBG(9, "%s(): subtracted delta "
"(%d) from ack_seq\n", __func__, vseq->delta);
} else {
th->ack_seq = htonl(ack_seq - vseq->previous_delta);
IP_VS_DBG(9, "%s(): subtracted "
"previous_delta (%d) from ack_seq\n",
__func__, vseq->previous_delta);
}
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Wensong Zhang | 110 | 94.83% | 1 | 50.00% |
Hannes Eder | 6 | 5.17% | 1 | 50.00% |
Total | 116 | 100.00% | 2 | 100.00% |
/*
* Updates ip_vs_seq if pkt has been resized
* Assumes already checked proto==IPPROTO_TCP and diff!=0.
*/
static inline void vs_seq_update(struct ip_vs_conn *cp, struct ip_vs_seq *vseq,
unsigned int flag, __u32 seq, int diff)
{
/* spinlock is to keep updating cp->flags atomic */
spin_lock_bh(&cp->lock);
if (!(cp->flags & flag) || after(seq, vseq->init_seq)) {
vseq->previous_delta = vseq->delta;
vseq->delta += diff;
vseq->init_seq = seq;
cp->flags |= flag;
}
spin_unlock_bh(&cp->lock);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Wensong Zhang | 89 | 96.74% | 1 | 33.33% |
Julian Anastasov | 2 | 2.17% | 1 | 33.33% |
Eric Dumazet | 1 | 1.09% | 1 | 33.33% |
Total | 92 | 100.00% | 3 | 100.00% |
static inline int app_tcp_pkt_out(struct ip_vs_conn *cp, struct sk_buff *skb,
struct ip_vs_app *app)
{
int diff;
const unsigned int tcp_offset = ip_hdrlen(skb);
struct tcphdr *th;
__u32 seq;
if (!skb_make_writable(skb, tcp_offset + sizeof(*th)))
return 0;
th = (struct tcphdr *)(skb_network_header(skb) + tcp_offset);
/*
* Remember seq number in case this pkt gets resized
*/
seq = ntohl(th->seq);
/*
* Fix seq stuff if flagged as so.
*/
if (cp->flags & IP_VS_CONN_F_OUT_SEQ)
vs_fix_seq(&cp->out_seq, th);
if (cp->flags & IP_VS_CONN_F_IN_SEQ)
vs_fix_ack_seq(&cp->in_seq, th);
/*
* Call private output hook function
*/
if (app->pkt_out == NULL)
return 1;
if (!app->pkt_out(app, cp, skb, &diff))
return 0;
/*
* Update ip_vs seq stuff if len has changed.
*/
if (diff != 0)
vs_seq_update(cp, &cp->out_seq,
IP_VS_CONN_F_OUT_SEQ, seq, diff);
return 1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Wensong Zhang | 135 | 73.77% | 1 | 16.67% |
Julian Anastasov | 35 | 19.13% | 1 | 16.67% |
Arnaldo Carvalho de Melo | 7 | 3.83% | 2 | 33.33% |
Herbert Xu | 6 | 3.28% | 2 | 33.33% |
Total | 183 | 100.00% | 6 | 100.00% |
/*
* Output pkt hook. Will call bound ip_vs_app specific function
* called by ipvs packet handler, assumes previously checked cp!=NULL
* returns false if it can't handle packet (oom)
*/
int ip_vs_app_pkt_out(struct ip_vs_conn *cp, struct sk_buff *skb)
{
struct ip_vs_app *app;
/*
* check if application module is bound to
* this ip_vs_conn.
*/
if ((app = cp->app) == NULL)
return 1;
/* TCP is complicated */
if (cp->protocol == IPPROTO_TCP)
return app_tcp_pkt_out(cp, skb, app);
/*
* Call private output hook function
*/
if (app->pkt_out == NULL)
return 1;
return app->pkt_out(app, cp, skb, NULL);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Julian Anastasov | 46 | 56.79% | 1 | 33.33% |
Wensong Zhang | 32 | 39.51% | 1 | 33.33% |
Herbert Xu | 3 | 3.70% | 1 | 33.33% |
Total | 81 | 100.00% | 3 | 100.00% |
static inline int app_tcp_pkt_in(struct ip_vs_conn *cp, struct sk_buff *skb,
struct ip_vs_app *app)
{
int diff;
const unsigned int tcp_offset = ip_hdrlen(skb);
struct tcphdr *th;
__u32 seq;
if (!skb_make_writable(skb, tcp_offset + sizeof(*th)))
return 0;
th = (struct tcphdr *)(skb_network_header(skb) + tcp_offset);
/*
* Remember seq number in case this pkt gets resized
*/
seq = ntohl(th->seq);
/*
* Fix seq stuff if flagged as so.
*/
if (cp->flags & IP_VS_CONN_F_IN_SEQ)
vs_fix_seq(&cp->in_seq, th);
if (cp->flags & IP_VS_CONN_F_OUT_SEQ)
vs_fix_ack_seq(&cp->out_seq, th);
/*
* Call private input hook function
*/
if (app->pkt_in == NULL)
return 1;
if (!app->pkt_in(app, cp, skb, &diff))
return 0;
/*
* Update ip_vs seq stuff if len has changed.
*/
if (diff != 0)
vs_seq_update(cp, &cp->in_seq,
IP_VS_CONN_F_IN_SEQ, seq, diff);
return 1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Wensong Zhang | 101 | 55.19% | 1 | 16.67% |
Julian Anastasov | 69 | 37.70% | 1 | 16.67% |
Arnaldo Carvalho de Melo | 7 | 3.83% | 2 | 33.33% |
Herbert Xu | 6 | 3.28% | 2 | 33.33% |
Total | 183 | 100.00% | 6 | 100.00% |
/*
* Input pkt hook. Will call bound ip_vs_app specific function
* called by ipvs packet handler, assumes previously checked cp!=NULL.
* returns false if can't handle packet (oom).
*/
int ip_vs_app_pkt_in(struct ip_vs_conn *cp, struct sk_buff *skb)
{
struct ip_vs_app *app;
/*
* check if application module is bound to
* this ip_vs_conn.
*/
if ((app = cp->app) == NULL)
return 1;
/* TCP is complicated */
if (cp->protocol == IPPROTO_TCP)
return app_tcp_pkt_in(cp, skb, app);
/*
* Call private input hook function
*/
if (app->pkt_in == NULL)
return 1;
return app->pkt_in(app, cp, skb, NULL);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Julian Anastasov | 76 | 93.83% | 1 | 33.33% |
Herbert Xu | 3 | 3.70% | 1 | 33.33% |
Wensong Zhang | 2 | 2.47% | 1 | 33.33% |
Total | 81 | 100.00% | 3 | 100.00% |
#ifdef CONFIG_PROC_FS
/*
* /proc/net/ip_vs_app entry function
*/
static struct ip_vs_app *ip_vs_app_idx(struct netns_ipvs *ipvs, loff_t pos)
{
struct ip_vs_app *app, *inc;
list_for_each_entry(app, &ipvs->app_list, a_list) {
list_for_each_entry(inc, &app->incs_list, a_list) {
if (pos-- == 0)
return inc;
}
}
return NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Stephen Hemminger | 37 | 64.91% | 2 | 50.00% |
Wensong Zhang | 14 | 24.56% | 1 | 25.00% |
Hans Schillstrom | 6 | 10.53% | 1 | 25.00% |
Total | 57 | 100.00% | 4 | 100.00% |
static void *ip_vs_app_seq_start(struct seq_file *seq, loff_t *pos)
{
struct net *net = seq_file_net(seq);
struct netns_ipvs *ipvs = net_ipvs(net);
mutex_lock(&__ip_vs_app_mutex);
return *pos ? ip_vs_app_idx(ipvs, *pos - 1) : SEQ_START_TOKEN;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Stephen Hemminger | 29 | 50.00% | 1 | 20.00% |
Hans Schillstrom | 22 | 37.93% | 1 | 20.00% |
Wensong Zhang | 5 | 8.62% | 1 | 20.00% |
Ingo Molnar | 1 | 1.72% | 1 | 20.00% |
Simon Horman | 1 | 1.72% | 1 | 20.00% |
Total | 58 | 100.00% | 5 | 100.00% |
static void *ip_vs_app_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct ip_vs_app *inc, *app;
struct list_head *e;
struct net *net = seq_file_net(seq);
struct netns_ipvs *ipvs = net_ipvs(net);
++*pos;
if (v == SEQ_START_TOKEN)
return ip_vs_app_idx(ipvs, 0);
inc = v;
app = inc->app;
if ((e = inc->a_list.next) != &app->incs_list)
return list_entry(e, struct ip_vs_app, a_list);
/* go on to next application */
for (e = app->a_list.next; e != &ipvs->app_list; e = e->next) {
app = list_entry(e, struct ip_vs_app, a_list);
list_for_each_entry(inc, &app->incs_list, a_list) {
return inc;
}
}
return NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Stephen Hemminger | 118 | 72.39% | 2 | 50.00% |
Hans Schillstrom | 25 | 15.34% | 1 | 25.00% |
Wensong Zhang | 20 | 12.27% | 1 | 25.00% |
Total | 163 | 100.00% | 4 | 100.00% |
static void ip_vs_app_seq_stop(struct seq_file *seq, void *v)
{
mutex_unlock(&__ip_vs_app_mutex);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Stephen Hemminger | 18 | 85.71% | 1 | 25.00% |
Wensong Zhang | 1 | 4.76% | 1 | 25.00% |
Simon Horman | 1 | 4.76% | 1 | 25.00% |
Ingo Molnar | 1 | 4.76% | 1 | 25.00% |
Total | 21 | 100.00% | 4 | 100.00% |
static int ip_vs_app_seq_show(struct seq_file *seq, void *v)
{
if (v == SEQ_START_TOKEN)
seq_puts(seq, "prot port usecnt name\n");
else {
const struct ip_vs_app *inc = v;
seq_printf(seq, "%-3s %-7u %-6d %-17s\n",
ip_vs_proto_name(inc->protocol),
ntohs(inc->port),
atomic_read(&inc->usecnt),
inc->name);
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Stephen Hemminger | 40 | 53.33% | 1 | 50.00% |
Wensong Zhang | 35 | 46.67% | 1 | 50.00% |
Total | 75 | 100.00% | 2 | 100.00% |
static const struct seq_operations ip_vs_app_seq_ops = {
.start = ip_vs_app_seq_start,
.next = ip_vs_app_seq_next,
.stop = ip_vs_app_seq_stop,
.show = ip_vs_app_seq_show,
};
static int ip_vs_app_open(struct inode *inode, struct file *file)
{
return seq_open_net(inode, file, &ip_vs_app_seq_ops,
sizeof(struct seq_net_private));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Stephen Hemminger | 21 | 63.64% | 1 | 33.33% |
Hans Schillstrom | 9 | 27.27% | 1 | 33.33% |
Wensong Zhang | 3 | 9.09% | 1 | 33.33% |
Total | 33 | 100.00% | 3 | 100.00% |
static const struct file_operations ip_vs_app_fops = {
.owner = THIS_MODULE,
.open = ip_vs_app_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_net,
};
#endif
int __net_init ip_vs_app_net_init(struct netns_ipvs *ipvs)
{
INIT_LIST_HEAD(&ipvs->app_list);
proc_create("ip_vs_app", 0, ipvs->net->proc_net, &ip_vs_app_fops);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Wensong Zhang | 14 | 36.84% | 1 | 12.50% |
Hans Schillstrom | 13 | 34.21% | 3 | 37.50% |
Gao Feng | 5 | 13.16% | 1 | 12.50% |
Eric W. Biedermann | 2 | 5.26% | 1 | 12.50% |
Stephen Hemminger | 2 | 5.26% | 1 | 12.50% |
Arnd Bergmann | 2 | 5.26% | 1 | 12.50% |
Total | 38 | 100.00% | 8 | 100.00% |
void __net_exit ip_vs_app_net_cleanup(struct netns_ipvs *ipvs)
{
unregister_ip_vs_app(ipvs, NULL /* all */);
remove_proc_entry("ip_vs_app", ipvs->net->proc_net);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hans Schillstrom | 13 | 43.33% | 2 | 28.57% |
Eric W. Biedermann | 5 | 16.67% | 2 | 28.57% |
Gao Feng | 5 | 16.67% | 1 | 14.29% |
Julian Anastasov | 5 | 16.67% | 1 | 14.29% |
Arnd Bergmann | 2 | 6.67% | 1 | 14.29% |
Total | 30 | 100.00% | 7 | 100.00% |
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Wensong Zhang | 1300 | 55.44% | 1 | 2.56% |
Julian Anastasov | 457 | 19.49% | 5 | 12.82% |
Stephen Hemminger | 341 | 14.54% | 3 | 7.69% |
Hans Schillstrom | 112 | 4.78% | 4 | 10.26% |
Eric W. Biedermann | 24 | 1.02% | 7 | 17.95% |
Hannes Eder | 23 | 0.98% | 2 | 5.13% |
Arnaldo Carvalho de Melo | 22 | 0.94% | 4 | 10.26% |
Herbert Xu | 21 | 0.90% | 2 | 5.13% |
Simon Horman | 13 | 0.55% | 1 | 2.56% |
Gao Feng | 10 | 0.43% | 2 | 5.13% |
Ingo Molnar | 10 | 0.43% | 1 | 2.56% |
Arnd Bergmann | 4 | 0.17% | 1 | 2.56% |
Tejun Heo | 3 | 0.13% | 1 | 2.56% |
Adrian Bunk | 2 | 0.09% | 2 | 5.13% |
Philippe De Muyter | 1 | 0.04% | 1 | 2.56% |
Arjan van de Ven | 1 | 0.04% | 1 | 2.56% |
Eric Dumazet | 1 | 0.04% | 1 | 2.56% |
Total | 2345 | 100.00% | 39 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.