Release 4.11 net/xfrm/xfrm_policy.c
/*
* xfrm_policy.c
*
* Changes:
* Mitsuru KANDA @USAGI
* Kazunori MIYAZAWA @USAGI
* Kunihiro Ishiguro <kunihiro@ipinfusion.com>
* IPv6 support
* Kazunori MIYAZAWA @USAGI
* YOSHIFUJI Hideaki
* Split up af-specific portion
* Derek Atkins <derek@ihtfp.com> Add the post_input processor
*
*/
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/kmod.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/notifier.h>
#include <linux/netdevice.h>
#include <linux/netfilter.h>
#include <linux/module.h>
#include <linux/cache.h>
#include <linux/audit.h>
#include <net/dst.h>
#include <net/flow.h>
#include <net/xfrm.h>
#include <net/ip.h>
#ifdef CONFIG_XFRM_STATISTICS
#include <net/snmp.h>
#endif
#include "xfrm_hash.h"
#define XFRM_QUEUE_TMO_MIN ((unsigned)(HZ/10))
#define XFRM_QUEUE_TMO_MAX ((unsigned)(60*HZ))
#define XFRM_MAX_QUEUE_LEN 100
struct xfrm_flo {
struct dst_entry *dst_orig;
u8 flags;
};
static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock);
static struct xfrm_policy_afinfo const __rcu *xfrm_policy_afinfo[AF_INET6 + 1]
__read_mostly;
static struct kmem_cache *xfrm_dst_cache __read_mostly;
static __read_mostly seqcount_t xfrm_policy_hash_generation;
static void xfrm_init_pmtu(struct dst_entry *dst);
static int stale_bundle(struct dst_entry *dst);
static int xfrm_bundle_ok(struct xfrm_dst *xdst);
static void xfrm_policy_queue_process(unsigned long arg);
static void __xfrm_policy_link(struct xfrm_policy *pol, int dir);
static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
int dir);
static inline bool xfrm_pol_hold_rcu(struct xfrm_policy *policy)
{
return atomic_inc_not_zero(&policy->refcnt);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Florian Westphal | 21 | 100.00% | 1 | 100.00% |
Total | 21 | 100.00% | 1 | 100.00% |
static inline bool
__xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
{
const struct flowi4 *fl4 = &fl->u.ip4;
return addr4_match(fl4->daddr, sel->daddr.a4, sel->prefixlen_d) &&
addr4_match(fl4->saddr, sel->saddr.a4, sel->prefixlen_s) &&
!((xfrm_flowi_dport(fl, &fl4->uli) ^ sel->dport) & sel->dport_mask) &&
!((xfrm_flowi_sport(fl, &fl4->uli) ^ sel->sport) & sel->sport_mask) &&
(fl4->flowi4_proto == sel->proto || !sel->proto) &&
(fl4->flowi4_oif == sel->ifindex || !sel->ifindex);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andrew Morton | 103 | 72.03% | 1 | 12.50% |
David S. Miller | 34 | 23.78% | 6 | 75.00% |
Alexey Dobriyan | 6 | 4.20% | 1 | 12.50% |
Total | 143 | 100.00% | 8 | 100.00% |
static inline bool
__xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
{
const struct flowi6 *fl6 = &fl->u.ip6;
return addr_match(&fl6->daddr, &sel->daddr, sel->prefixlen_d) &&
addr_match(&fl6->saddr, &sel->saddr, sel->prefixlen_s) &&
!((xfrm_flowi_dport(fl, &fl6->uli) ^ sel->dport) & sel->dport_mask) &&
!((xfrm_flowi_sport(fl, &fl6->uli) ^ sel->sport) & sel->sport_mask) &&
(fl6->flowi6_proto == sel->proto || !sel->proto) &&
(fl6->flowi6_oif == sel->ifindex || !sel->ifindex);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andrew Morton | 109 | 76.22% | 1 | 14.29% |
David S. Miller | 34 | 23.78% | 6 | 85.71% |
Total | 143 | 100.00% | 7 | 100.00% |
bool xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl,
unsigned short family)
{
switch (family) {
case AF_INET:
return __xfrm4_selector_match(sel, fl);
case AF_INET6:
return __xfrm6_selector_match(sel, fl);
}
return false;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Herbert Xu | 38 | 73.08% | 2 | 33.33% |
Andrew Morton | 10 | 19.23% | 1 | 16.67% |
David S. Miller | 4 | 7.69% | 3 | 50.00% |
Total | 52 | 100.00% | 6 | 100.00% |
static const struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
{
const struct xfrm_policy_afinfo *afinfo;
if (unlikely(family >= ARRAY_SIZE(xfrm_policy_afinfo)))
return NULL;
rcu_read_lock();
afinfo = rcu_dereference(xfrm_policy_afinfo[family]);
if (unlikely(!afinfo))
rcu_read_unlock();
return afinfo;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Dumazet | 55 | 90.16% | 1 | 50.00% |
Florian Westphal | 6 | 9.84% | 1 | 50.00% |
Total | 61 | 100.00% | 2 | 100.00% |
static inline struct dst_entry *__xfrm_dst_lookup(struct net *net,
int tos, int oif,
const xfrm_address_t *saddr,
const xfrm_address_t *daddr,
int family)
{
const struct xfrm_policy_afinfo *afinfo;
struct dst_entry *dst;
afinfo = xfrm_policy_get_afinfo(family);
if (unlikely(afinfo == NULL))
return ERR_PTR(-EAFNOSUPPORT);
dst = afinfo->dst_lookup(net, tos, oif, saddr, daddr);
rcu_read_unlock();
return dst;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hideaki Yoshifuji / 吉藤英明 | 73 | 81.11% | 1 | 16.67% |
Alexey Dobriyan | 7 | 7.78% | 1 | 16.67% |
David Ahern | 5 | 5.56% | 1 | 16.67% |
Florian Westphal | 3 | 3.33% | 2 | 33.33% |
David S. Miller | 2 | 2.22% | 1 | 16.67% |
Total | 90 | 100.00% | 6 | 100.00% |
static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x,
int tos, int oif,
xfrm_address_t *prev_saddr,
xfrm_address_t *prev_daddr,
int family)
{
struct net *net = xs_net(x);
xfrm_address_t *saddr = &x->props.saddr;
xfrm_address_t *daddr = &x->id.daddr;
struct dst_entry *dst;
if (x->type->flags & XFRM_TYPE_LOCAL_COADDR) {
saddr = x->coaddr;
daddr = prev_daddr;
}
if (x->type->flags & XFRM_TYPE_REMOTE_COADDR) {
saddr = prev_saddr;
daddr = x->coaddr;
}
dst = __xfrm_dst_lookup(net, tos, oif, saddr, daddr, family);
if (!IS_ERR(dst)) {
if (prev_saddr != saddr)
memcpy(prev_saddr, saddr, sizeof(*prev_saddr));
if (prev_daddr != daddr)
memcpy(prev_daddr, daddr, sizeof(*prev_daddr));
}
return dst;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Herbert Xu | 101 | 56.11% | 4 | 57.14% |
Hideaki Yoshifuji / 吉藤英明 | 62 | 34.44% | 1 | 14.29% |
Alexey Dobriyan | 12 | 6.67% | 1 | 14.29% |
David Ahern | 5 | 2.78% | 1 | 14.29% |
Total | 180 | 100.00% | 7 | 100.00% |
static inline unsigned long make_jiffies(long secs)
{
if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
return MAX_SCHEDULE_TIMEOUT-1;
else
return secs*HZ;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hideaki Yoshifuji / 吉藤英明 | 21 | 61.76% | 1 | 50.00% |
Kazunori Miyazawa | 13 | 38.24% | 1 | 50.00% |
Total | 34 | 100.00% | 2 | 100.00% |
static void xfrm_policy_timer(unsigned long data)
{
struct xfrm_policy *xp = (struct xfrm_policy *)data;
unsigned long now = get_seconds();
long next = LONG_MAX;
int warn = 0;
int dir;
read_lock(&xp->lock);
if (unlikely(xp->walk.dead))
goto out;
dir = xfrm_policy_id2dir(xp->index);
if (xp->lft.hard_add_expires_seconds) {
long tmo = xp->lft.hard_add_expires_seconds +
xp->curlft.add_time - now;
if (tmo <= 0)
goto expired;
if (tmo < next)
next = tmo;
}
if (xp->lft.hard_use_expires_seconds) {
long tmo = xp->lft.hard_use_expires_seconds +
(xp->curlft.use_time ? : xp->curlft.add_time) - now;
if (tmo <= 0)
goto expired;
if (tmo < next)
next = tmo;
}
if (xp->lft.soft_add_expires_seconds) {
long tmo = xp->lft.soft_add_expires_seconds +
xp->curlft.add_time - now;
if (tmo <= 0) {
warn = 1;
tmo = XFRM_KM_TIMEOUT;
}
if (tmo < next)
next = tmo;
}
if (xp->lft.soft_use_expires_seconds) {
long tmo = xp->lft.soft_use_expires_seconds +
(xp->curlft.use_time ? : xp->curlft.add_time) - now;
if (tmo <= 0) {
warn = 1;
tmo = XFRM_KM_TIMEOUT;
}
if (tmo < next)
next = tmo;
}
if (warn)
km_policy_expired(xp, dir, 0, 0);
if (next != LONG_MAX &&
!mod_timer(&xp->timer, jiffies + make_jiffies(next)))
xfrm_pol_hold(xp);
out:
read_unlock(&xp->lock);
xfrm_pol_put(xp);
return;
expired:
read_unlock(&xp->lock);
if (!xfrm_policy_delete(xp, dir))
km_policy_expired(xp, dir, 1, 0);
xfrm_pol_put(xp);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Herbert Xu | 240 | 62.66% | 6 | 42.86% |
Hideaki Yoshifuji / 吉藤英明 | 83 | 21.67% | 1 | 7.14% |
Alexey Kuznetsov | 27 | 7.05% | 1 | 7.14% |
David S. Miller | 17 | 4.44% | 1 | 7.14% |
Kazunori Miyazawa | 6 | 1.57% | 1 | 7.14% |
Jamal Hadi Salim | 4 | 1.04% | 1 | 7.14% |
Timo Teräs | 3 | 0.78% | 1 | 7.14% |
James Morris | 3 | 0.78% | 2 | 14.29% |
Total | 383 | 100.00% | 14 | 100.00% |
static struct flow_cache_object *xfrm_policy_flo_get(struct flow_cache_object *flo)
{
struct xfrm_policy *pol = container_of(flo, struct xfrm_policy, flo);
if (unlikely(pol->walk.dead))
flo = NULL;
else
xfrm_pol_hold(pol);
return flo;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Timo Teräs | 52 | 100.00% | 1 | 100.00% |
Total | 52 | 100.00% | 1 | 100.00% |
static int xfrm_policy_flo_check(struct flow_cache_object *flo)
{
struct xfrm_policy *pol = container_of(flo, struct xfrm_policy, flo);
return !pol->walk.dead;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Timo Teräs | 34 | 100.00% | 1 | 100.00% |
Total | 34 | 100.00% | 1 | 100.00% |
static void xfrm_policy_flo_delete(struct flow_cache_object *flo)
{
xfrm_pol_put(container_of(flo, struct xfrm_policy, flo));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Timo Teräs | 24 | 100.00% | 1 | 100.00% |
Total | 24 | 100.00% | 1 | 100.00% |
static const struct flow_cache_ops xfrm_policy_fc_ops = {
.get = xfrm_policy_flo_get,
.check = xfrm_policy_flo_check,
.delete = xfrm_policy_flo_delete,
};
/* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
* SPD calls.
*/
struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp)
{
struct xfrm_policy *policy;
policy = kzalloc(sizeof(struct xfrm_policy), gfp);
if (policy) {
write_pnet(&policy->xp_net, net);
INIT_LIST_HEAD(&policy->walk.all);
INIT_HLIST_NODE(&policy->bydst);
INIT_HLIST_NODE(&policy->byidx);
rwlock_init(&policy->lock);
atomic_set(&policy->refcnt, 1);
skb_queue_head_init(&policy->polq.hold_queue);
setup_timer(&policy->timer, xfrm_policy_timer,
(unsigned long)policy);
setup_timer(&policy->polq.hold_timer, xfrm_policy_queue_process,
(unsigned long)policy);
policy->flo.ops = &xfrm_policy_fc_ops;
}
return policy;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alexey Kuznetsov | 56 | 37.58% | 4 | 28.57% |
Steffen Klassert | 28 | 18.79% | 1 | 7.14% |
David S. Miller | 20 | 13.42% | 1 | 7.14% |
Timo Teräs | 16 | 10.74% | 2 | 14.29% |
Alexey Dobriyan | 15 | 10.07% | 1 | 7.14% |
Pavel Emelyanov | 5 | 3.36% | 1 | 7.14% |
Andrew Morton | 4 | 2.68% | 1 | 7.14% |
Herbert Xu | 3 | 2.01% | 1 | 7.14% |
Panagiotis Issaris | 1 | 0.67% | 1 | 7.14% |
Al Viro | 1 | 0.67% | 1 | 7.14% |
Total | 149 | 100.00% | 14 | 100.00% |
EXPORT_SYMBOL(xfrm_policy_alloc);
static void xfrm_policy_destroy_rcu(struct rcu_head *head)
{
struct xfrm_policy *policy = container_of(head, struct xfrm_policy, rcu);
security_xfrm_policy_free(policy->security);
kfree(policy);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Dumazet | 38 | 100.00% | 1 | 100.00% |
Total | 38 | 100.00% | 1 | 100.00% |
/* Destroy xfrm_policy: descendant resources must be released to this moment. */
void xfrm_policy_destroy(struct xfrm_policy *policy)
{
BUG_ON(!policy->walk.dead);
if (del_timer(&policy->timer) || del_timer(&policy->polq.hold_timer))
BUG();
call_rcu(&policy->rcu, xfrm_policy_destroy_rcu);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alexey Kuznetsov | 29 | 54.72% | 2 | 22.22% |
Fan Du | 10 | 18.87% | 1 | 11.11% |
Eric Dumazet | 5 | 9.43% | 1 | 11.11% |
Kris Katterjohn | 3 | 5.66% | 1 | 11.11% |
Trent Jaeger | 2 | 3.77% | 1 | 11.11% |
Herbert Xu | 2 | 3.77% | 1 | 11.11% |
Paul Moore | 1 | 1.89% | 1 | 11.11% |
Américo Wang | 1 | 1.89% | 1 | 11.11% |
Total | 53 | 100.00% | 9 | 100.00% |
EXPORT_SYMBOL(xfrm_policy_destroy);
/* Rule must be locked. Release descendant resources, announce
* entry dead. The rule must be unlinked from lists to the moment.
*/
static void xfrm_policy_kill(struct xfrm_policy *policy)
{
policy->walk.dead = 1;
atomic_inc(&policy->genid);
if (del_timer(&policy->polq.hold_timer))
xfrm_pol_put(policy);
skb_queue_purge(&policy->polq.hold_queue);
if (del_timer(&policy->timer))
xfrm_pol_put(policy);
xfrm_pol_put(policy);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Herbert Xu | 29 | 39.19% | 3 | 33.33% |
Steffen Klassert | 26 | 35.14% | 2 | 22.22% |
Timo Teräs | 13 | 17.57% | 1 | 11.11% |
Alexey Kuznetsov | 4 | 5.41% | 1 | 11.11% |
Li RongQing | 1 | 1.35% | 1 | 11.11% |
Christoph Hellwig | 1 | 1.35% | 1 | 11.11% |
Total | 74 | 100.00% | 9 | 100.00% |
static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024;
static inline unsigned int idx_hash(struct net *net, u32 index)
{
return __idx_hash(index, net->xfrm.policy_idx_hmask);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 17 | 60.71% | 1 | 25.00% |
Alexey Dobriyan | 10 | 35.71% | 2 | 50.00% |
Masahide Nakamura | 1 | 3.57% | 1 | 25.00% |
Total | 28 | 100.00% | 4 | 100.00% |
/* calculate policy hash thresholds */
static void __get_hash_thresh(struct net *net,
unsigned short family, int dir,
u8 *dbits, u8 *sbits)
{
switch (family) {
case AF_INET:
*dbits = net->xfrm.policy_bydst[dir].dbits4;
*sbits = net->xfrm.policy_bydst[dir].sbits4;
break;
case AF_INET6:
*dbits = net->xfrm.policy_bydst[dir].dbits6;
*sbits = net->xfrm.policy_bydst[dir].sbits6;
break;
default:
*dbits = 0;
*sbits = 0;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christophe Gouault | 107 | 100.00% | 1 | 100.00% |
Total | 107 | 100.00% | 1 | 100.00% |
static struct hlist_head *policy_hash_bysel(struct net *net,
const struct xfrm_selector *sel,
unsigned short family, int dir)
{
unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
unsigned int hash;
u8 dbits;
u8 sbits;
__get_hash_thresh(net, family, dir, &dbits, &sbits);
hash = __sel_hash(sel, family, hmask, dbits, sbits);
if (hash == hmask + 1)
return &net->xfrm.policy_inexact[dir];
return rcu_dereference_check(net->xfrm.policy_bydst[dir].table,
lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 59 | 46.46% | 2 | 28.57% |
Christophe Gouault | 29 | 22.83% | 1 | 14.29% |
Alexey Dobriyan | 20 | 15.75% | 3 | 42.86% |
Florian Westphal | 19 | 14.96% | 1 | 14.29% |
Total | 127 | 100.00% | 7 | 100.00% |
static struct hlist_head *policy_hash_direct(struct net *net,
const xfrm_address_t *daddr,
const xfrm_address_t *saddr,
unsigned short family, int dir)
{
unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
unsigned int hash;
u8 dbits;
u8 sbits;
__get_hash_thresh(net, family, dir, &dbits, &sbits);
hash = __addr_hash(daddr, saddr, family, hmask, dbits, sbits);
return rcu_dereference_check(net->xfrm.policy_bydst[dir].table,
lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 57 | 50.00% | 2 | 33.33% |
Christophe Gouault | 29 | 25.44% | 1 | 16.67% |
Alexey Dobriyan | 15 | 13.16% | 2 | 33.33% |
Florian Westphal | 13 | 11.40% | 1 | 16.67% |
Total | 114 | 100.00% | 6 | 100.00% |
static void xfrm_dst_hash_transfer(struct net *net,
struct hlist_head *list,
struct hlist_head *ndsttable,
unsigned int nhashmask,
int dir)
{
struct hlist_node *tmp, *entry0 = NULL;
struct xfrm_policy *pol;
unsigned int h0 = 0;
u8 dbits;
u8 sbits;
redo:
hlist_for_each_entry_safe(pol, tmp, list, bydst) {
unsigned int h;
__get_hash_thresh(net, pol->family, dir, &dbits, &sbits);
h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr,
pol->family, nhashmask, dbits, sbits);
if (!entry0) {
hlist_del_rcu(&pol->bydst);
hlist_add_head_rcu(&pol->bydst, ndsttable + h);
h0 = h;
} else {
if (h != h0)
continue;
hlist_del_rcu(&pol->bydst);
hlist_add_behind_rcu(&pol->bydst, entry0);
}
entry0 = &pol->bydst;
}
if (!hlist_empty(list)) {
entry0 = NULL;
goto redo;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 82 | 40.59% | 1 | 16.67% |
Hideaki Yoshifuji / 吉藤英明 | 67 | 33.17% | 1 | 16.67% |
Christophe Gouault | 35 | 17.33% | 1 | 16.67% |
Sasha Levin | 12 | 5.94% | 1 | 16.67% |
Florian Westphal | 4 | 1.98% | 1 | 16.67% |
Ken Helias | 2 | 0.99% | 1 | 16.67% |
Total | 202 | 100.00% | 6 | 100.00% |
static void xfrm_idx_hash_transfer(struct hlist_head *list,
struct hlist_head *nidxtable,
unsigned int nhashmask)
{
struct hlist_node *tmp;
struct xfrm_policy *pol;
hlist_for_each_entry_safe(pol, tmp, list, byidx) {
unsigned int h;
h = __idx_hash(pol->index, nhashmask);
hlist_add_head(&pol->byidx, nidxtable+h);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 69 | 100.00% | 1 | 100.00% |
Total | 69 | 100.00% | 1 | 100.00% |
static unsigned long xfrm_new_hash_mask(unsigned int old_hmask)
{
return ((old_hmask + 1) << 1) - 1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 24 | 100.00% | 1 | 100.00% |
Total | 24 | 100.00% | 1 | 100.00% |
static void xfrm_bydst_resize(struct net *net, int dir)
{
unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
unsigned int nhashmask = xfrm_new_hash_mask(hmask);
unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
struct hlist_head *ndst = xfrm_hash_alloc(nsize);
struct hlist_head *odst;
int i;
if (!ndst)
return;
spin_lock_bh(&net->xfrm.xfrm_policy_lock);
write_seqcount_begin(&xfrm_policy_hash_generation);
odst = rcu_dereference_protected(net->xfrm.policy_bydst[dir].table,
lockdep_is_held(&net->xfrm.xfrm_policy_lock));
odst = rcu_dereference_protected(net->xfrm.policy_bydst[dir].table,
lockdep_is_held(&net->xfrm.xfrm_policy_lock));
for (i = hmask; i >= 0; i--)
xfrm_dst_hash_transfer(net, odst + i, ndst, nhashmask, dir);
rcu_assign_pointer(net->xfrm.policy_bydst[dir].table, ndst);
net->xfrm.policy_bydst[dir].hmask = nhashmask;
write_seqcount_end(&xfrm_policy_hash_generation);
spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
synchronize_rcu();
xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 129 | 53.97% | 2 | 22.22% |
Florian Westphal | 78 | 32.64% | 3 | 33.33% |
Alexey Dobriyan | 20 | 8.37% | 2 | 22.22% |
Fan Du | 8 | 3.35% | 1 | 11.11% |
Christophe Gouault | 4 | 1.67% | 1 | 11.11% |
Total | 239 | 100.00% | 9 | 100.00% |
static void xfrm_byidx_resize(struct net *net, int total)
{
unsigned int hmask = net->xfrm.policy_idx_hmask;
unsigned int nhashmask = xfrm_new_hash_mask(hmask);
unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
struct hlist_head *oidx = net->xfrm.policy_byidx;
struct hlist_head *nidx = xfrm_hash_alloc(nsize);
int i;
if (!nidx)
return;
spin_lock_bh(&net->xfrm.xfrm_policy_lock);
for (i = hmask; i >= 0; i--)
xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask);
net->xfrm.policy_byidx = nidx;
net->xfrm.policy_idx_hmask = nhashmask;
spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 121 | 77.56% | 2 | 28.57% |
Alexey Dobriyan | 25 | 16.03% | 3 | 42.86% |
Fan Du | 8 | 5.13% | 1 | 14.29% |
Florian Westphal | 2 | 1.28% | 1 | 14.29% |
Total | 156 | 100.00% | 7 | 100.00% |
static inline int xfrm_bydst_should_resize(struct net *net, int dir, int *total)
{
unsigned int cnt = net->xfrm.policy_count[dir];
unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
if (total)
*total += cnt;
if ((hmask + 1) < xfrm_policy_hashmax &&
cnt > hmask)
return 1;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 61 | 80.26% | 1 | 25.00% |
Alexey Dobriyan | 15 | 19.74% | 3 | 75.00% |
Total | 76 | 100.00% | 4 | 100.00% |
static inline int xfrm_byidx_should_resize(struct net *net, int total)
{
unsigned int hmask = net->xfrm.policy_idx_hmask;
if ((hmask + 1) < xfrm_policy_hashmax &&
total > hmask)
return 1;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 35 | 77.78% | 1 | 33.33% |
Alexey Dobriyan | 10 | 22.22% | 2 | 66.67% |
Total | 45 | 100.00% | 3 | 100.00% |
void xfrm_spd_getinfo(