Release 4.12 include/net/inetpeer.h
/*
* INETPEER - A storage for permanent information about peers
*
* Authors: Andrey V. Savochkin <saw@msu.ru>
*/
#ifndef _NET_INETPEER_H
#define _NET_INETPEER_H
#include <linux/types.h>
#include <linux/init.h>
#include <linux/jiffies.h>
#include <linux/spinlock.h>
#include <linux/rtnetlink.h>
#include <net/ipv6.h>
#include <linux/atomic.h>
/* IPv4 address key for cache lookups */
struct ipv4_addr_key {
__be32 addr;
int vif;
};
#define INETPEER_MAXKEYSZ (sizeof(struct in6_addr) / sizeof(u32))
struct inetpeer_addr {
union {
struct ipv4_addr_key a4;
struct in6_addr a6;
u32 key[INETPEER_MAXKEYSZ];
};
__u16 family;
};
struct inet_peer {
/* group together avl_left,avl_right,v4daddr to speedup lookups */
struct inet_peer __rcu *avl_left, *avl_right;
struct inetpeer_addr daddr;
__u32 avl_height;
u32 metrics[RTAX_MAX];
u32 rate_tokens; /* rate limiting for ICMP */
unsigned long rate_last;
union {
struct list_head gc_list;
struct rcu_head gc_rcu;
};
/*
* Once inet_peer is queued for deletion (refcnt == -1), following field
* is not available: rid
* We can share memory with rcu_head to help keep inet_peer small.
*/
union {
struct {
atomic_t rid; /* Frag reception counter */
};
struct rcu_head rcu;
struct inet_peer *gc_next;
};
/* following fields might be frequently dirtied */
__u32 dtime; /* the time of last use of not referenced entries */
atomic_t refcnt;
};
struct inet_peer_base {
struct inet_peer __rcu *root;
seqlock_t lock;
int total;
};
void inet_peer_base_init(struct inet_peer_base *);
void inet_initpeers(void) __init;
#define INETPEER_METRICS_NEW (~(u32) 0)
static inline void inetpeer_set_addr_v4(struct inetpeer_addr *iaddr, __be32 ip)
{
iaddr->a4.addr = ip;
iaddr->a4.vif = 0;
iaddr->family = AF_INET;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Ahern | 29 | 78.38% | 2 | 66.67% |
Eric Dumazet | 8 | 21.62% | 1 | 33.33% |
Total | 37 | 100.00% | 3 | 100.00% |
static inline __be32 inetpeer_get_addr_v4(struct inetpeer_addr *iaddr)
{
return iaddr->a4.addr;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Ahern | 19 | 100.00% | 2 | 100.00% |
Total | 19 | 100.00% | 2 | 100.00% |
static inline void inetpeer_set_addr_v6(struct inetpeer_addr *iaddr,
struct in6_addr *in6)
{
iaddr->a6 = *in6;
iaddr->family = AF_INET6;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Ahern | 30 | 100.00% | 2 | 100.00% |
Total | 30 | 100.00% | 2 | 100.00% |
static inline struct in6_addr *inetpeer_get_addr_v6(struct inetpeer_addr *iaddr)
{
return &iaddr->a6;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Ahern | 20 | 100.00% | 2 | 100.00% |
Total | 20 | 100.00% | 2 | 100.00% |
/* can be called with or without local BH being disabled */
struct inet_peer *inet_getpeer(struct inet_peer_base *base,
const struct inetpeer_addr *daddr,
int create);
static inline struct inet_peer *inet_getpeer_v4(struct inet_peer_base *base,
__be32 v4daddr,
int vif, int create)
{
struct inetpeer_addr daddr;
daddr.a4.addr = v4daddr;
daddr.a4.vif = vif;
daddr.family = AF_INET;
return inet_getpeer(base, &daddr, create);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 42 | 70.00% | 3 | 50.00% |
David Ahern | 13 | 21.67% | 1 | 16.67% |
Gao Feng | 5 | 8.33% | 2 | 33.33% |
Total | 60 | 100.00% | 6 | 100.00% |
static inline struct inet_peer *inet_getpeer_v6(struct inet_peer_base *base,
const struct in6_addr *v6daddr,
int create)
{
struct inetpeer_addr daddr;
daddr.a6 = *v6daddr;
daddr.family = AF_INET6;
return inet_getpeer(base, &daddr, create);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 42 | 82.35% | 3 | 37.50% |
Gao Feng | 5 | 9.80% | 2 | 25.00% |
Alexey Dobriyan | 2 | 3.92% | 1 | 12.50% |
Eric Dumazet | 1 | 1.96% | 1 | 12.50% |
David Ahern | 1 | 1.96% | 1 | 12.50% |
Total | 51 | 100.00% | 8 | 100.00% |
static inline int inetpeer_addr_cmp(const struct inetpeer_addr *a,
const struct inetpeer_addr *b)
{
int i, n;
if (a->family == AF_INET)
n = sizeof(a->a4) / sizeof(u32);
else
n = sizeof(a->a6) / sizeof(u32);
for (i = 0; i < n; i++) {
if (a->key[i] == b->key[i])
continue;
if (a->key[i] < b->key[i])
return -1;
return 1;
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Ahern | 119 | 100.00% | 2 | 100.00% |
Total | 119 | 100.00% | 2 | 100.00% |
/* can be called from BH context or outside */
void inet_putpeer(struct inet_peer *p);
bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout);
void inetpeer_invalidate_tree(struct inet_peer_base *);
#endif /* _NET_INETPEER_H */
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Ahern | 256 | 44.60% | 4 | 11.43% |
David S. Miller | 171 | 29.79% | 10 | 28.57% |
Linus Torvalds (pre-git) | 63 | 10.98% | 1 | 2.86% |
Eric Dumazet | 55 | 9.58% | 13 | 37.14% |
Gao Feng | 15 | 2.61% | 2 | 5.71% |
Steffen Klassert | 9 | 1.57% | 1 | 2.86% |
Alexey Dobriyan | 2 | 0.35% | 1 | 2.86% |
Adrian Bunk | 1 | 0.17% | 1 | 2.86% |
Arun Sharma | 1 | 0.17% | 1 | 2.86% |
Rusty Russell | 1 | 0.17% | 1 | 2.86% |
Total | 574 | 100.00% | 35 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.