Release 4.11 net/xfrm/xfrm_state.c
/*
* xfrm_state.c
*
* Changes:
* Mitsuru KANDA @USAGI
* Kazunori MIYAZAWA @USAGI
* Kunihiro Ishiguro <kunihiro@ipinfusion.com>
* IPv6 support
* YOSHIFUJI Hideaki @USAGI
* Split up af-specific functions
* Derek Atkins <derek@ihtfp.com>
* Add UDP Encapsulation
*
*/
#include <linux/workqueue.h>
#include <net/xfrm.h>
#include <linux/pfkeyv2.h>
#include <linux/ipsec.h>
#include <linux/module.h>
#include <linux/cache.h>
#include <linux/audit.h>
#include <linux/uaccess.h>
#include <linux/ktime.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include "xfrm_hash.h"
#define xfrm_state_deref_prot(table, net) \
rcu_dereference_protected((table), lockdep_is_held(&(net)->xfrm.xfrm_state_lock))
static void xfrm_state_gc_task(struct work_struct *work);
/* Each xfrm_state may be linked to two tables:
1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl)
2. Hash table by (daddr,family,reqid) to find what SAs exist for given
destination/tunnel endpoint. (output)
*/
static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024;
static __read_mostly seqcount_t xfrm_state_hash_generation = SEQCNT_ZERO(xfrm_state_hash_generation);
static DECLARE_WORK(xfrm_state_gc_work, xfrm_state_gc_task);
static HLIST_HEAD(xfrm_state_gc_list);
static inline bool xfrm_state_hold_rcu(struct xfrm_state __rcu *x)
{
return atomic_inc_not_zero(&x->refcnt);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Florian Westphal | 22 | 100.00% | 1 | 100.00% |
Total | 22 | 100.00% | 1 | 100.00% |
static inline unsigned int xfrm_dst_hash(struct net *net,
const xfrm_address_t *daddr,
const xfrm_address_t *saddr,
u32 reqid,
unsigned short family)
{
return __xfrm_dst_hash(daddr, saddr, reqid, family, net->xfrm.state_hmask);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 38 | 79.17% | 5 | 71.43% |
Alexey Dobriyan | 10 | 20.83% | 2 | 28.57% |
Total | 48 | 100.00% | 7 | 100.00% |
static inline unsigned int xfrm_src_hash(struct net *net,
const xfrm_address_t *daddr,
const xfrm_address_t *saddr,
unsigned short family)
{
return __xfrm_src_hash(daddr, saddr, family, net->xfrm.state_hmask);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 24 | 55.81% | 4 | 50.00% |
Alexey Dobriyan | 10 | 23.26% | 2 | 25.00% |
Masahide Nakamura | 8 | 18.60% | 1 | 12.50% |
Hideaki Yoshifuji / 吉藤英明 | 1 | 2.33% | 1 | 12.50% |
Total | 43 | 100.00% | 8 | 100.00% |
static inline unsigned int
xfrm_spi_hash(struct net *net, const xfrm_address_t *daddr,
__be32 spi, u8 proto, unsigned short family)
{
return __xfrm_spi_hash(daddr, spi, proto, family, net->xfrm.state_hmask);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 35 | 76.09% | 4 | 57.14% |
Alexey Dobriyan | 10 | 21.74% | 2 | 28.57% |
Al Viro | 1 | 2.17% | 1 | 14.29% |
Total | 46 | 100.00% | 7 | 100.00% |
static void xfrm_hash_transfer(struct hlist_head *list,
struct hlist_head *ndsttable,
struct hlist_head *nsrctable,
struct hlist_head *nspitable,
unsigned int nhashmask)
{
struct hlist_node *tmp;
struct xfrm_state *x;
hlist_for_each_entry_safe(x, tmp, list, bydst) {
unsigned int h;
h = __xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
x->props.reqid, x->props.family,
nhashmask);
hlist_add_head_rcu(&x->bydst, ndsttable + h);
h = __xfrm_src_hash(&x->id.daddr, &x->props.saddr,
x->props.family,
nhashmask);
hlist_add_head_rcu(&x->bysrc, nsrctable + h);
if (x->id.spi) {
h = __xfrm_spi_hash(&x->id.daddr, x->id.spi,
x->id.proto, x->props.family,
nhashmask);
hlist_add_head_rcu(&x->byspi, nspitable + h);
}
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 174 | 89.69% | 3 | 50.00% |
Masahide Nakamura | 17 | 8.76% | 2 | 33.33% |
Florian Westphal | 3 | 1.55% | 1 | 16.67% |
Total | 194 | 100.00% | 6 | 100.00% |
static unsigned long xfrm_hash_new_size(unsigned int state_hmask)
{
return ((state_hmask + 1) << 1) * sizeof(struct hlist_head);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 24 | 85.71% | 1 | 33.33% |
Alexey Dobriyan | 4 | 14.29% | 2 | 66.67% |
Total | 28 | 100.00% | 3 | 100.00% |
static void xfrm_hash_resize(struct work_struct *work)
{
struct net *net = container_of(work, struct net, xfrm.state_hash_work);
struct hlist_head *ndst, *nsrc, *nspi, *odst, *osrc, *ospi;
unsigned long nsize, osize;
unsigned int nhashmask, ohashmask;
int i;
nsize = xfrm_hash_new_size(net->xfrm.state_hmask);
ndst = xfrm_hash_alloc(nsize);
if (!ndst)
return;
nsrc = xfrm_hash_alloc(nsize);
if (!nsrc) {
xfrm_hash_free(ndst, nsize);
return;
}
nspi = xfrm_hash_alloc(nsize);
if (!nspi) {
xfrm_hash_free(ndst, nsize);
xfrm_hash_free(nsrc, nsize);
return;
}
spin_lock_bh(&net->xfrm.xfrm_state_lock);
write_seqcount_begin(&xfrm_state_hash_generation);
nhashmask = (nsize / sizeof(struct hlist_head)) - 1U;
odst = xfrm_state_deref_prot(net->xfrm.state_bydst, net);
for (i = net->xfrm.state_hmask; i >= 0; i--)
xfrm_hash_transfer(odst + i, ndst, nsrc, nspi, nhashmask);
osrc = xfrm_state_deref_prot(net->xfrm.state_bysrc, net);
ospi = xfrm_state_deref_prot(net->xfrm.state_byspi, net);
ohashmask = net->xfrm.state_hmask;
rcu_assign_pointer(net->xfrm.state_bydst, ndst);
rcu_assign_pointer(net->xfrm.state_bysrc, nsrc);
rcu_assign_pointer(net->xfrm.state_byspi, nspi);
net->xfrm.state_hmask = nhashmask;
write_seqcount_end(&xfrm_state_hash_generation);
spin_unlock_bh(&net->xfrm.xfrm_state_lock);
osize = (ohashmask + 1) * sizeof(struct hlist_head);
synchronize_rcu();
xfrm_hash_free(odst, osize);
xfrm_hash_free(osrc, osize);
xfrm_hash_free(ospi, osize);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 213 | 62.28% | 3 | 21.43% |
Alexey Dobriyan | 65 | 19.01% | 5 | 35.71% |
Florian Westphal | 51 | 14.91% | 3 | 21.43% |
Fan Du | 8 | 2.34% | 1 | 7.14% |
Ying Xue | 3 | 0.88% | 1 | 7.14% |
David Howells | 2 | 0.58% | 1 | 7.14% |
Total | 342 | 100.00% | 14 | 100.00% |
static DEFINE_SPINLOCK(xfrm_state_afinfo_lock);
static struct xfrm_state_afinfo __rcu *xfrm_state_afinfo[NPROTO];
static DEFINE_SPINLOCK(xfrm_state_gc_lock);
int __xfrm_state_delete(struct xfrm_state *x);
int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
bool km_is_alive(const struct km_event *c);
void km_state_expired(struct xfrm_state *x, int hard, u32 portid);
static DEFINE_SPINLOCK(xfrm_type_lock);
int xfrm_register_type(const struct xfrm_type *type, unsigned short family)
{
struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
const struct xfrm_type **typemap;
int err = 0;
if (unlikely(afinfo == NULL))
return -EAFNOSUPPORT;
typemap = afinfo->type_map;
spin_lock_bh(&xfrm_type_lock);
if (likely(typemap[type->proto] == NULL))
typemap[type->proto] = type;
else
err = -EEXIST;
spin_unlock_bh(&xfrm_type_lock);
rcu_read_unlock();
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Herbert Xu | 86 | 83.50% | 1 | 25.00% |
Américo Wang | 13 | 12.62% | 1 | 25.00% |
Florian Westphal | 2 | 1.94% | 1 | 25.00% |
Eric Dumazet | 2 | 1.94% | 1 | 25.00% |
Total | 103 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL(xfrm_register_type);
int xfrm_unregister_type(const struct xfrm_type *type, unsigned short family)
{
struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
const struct xfrm_type **typemap;
int err = 0;
if (unlikely(afinfo == NULL))
return -EAFNOSUPPORT;
typemap = afinfo->type_map;
spin_lock_bh(&xfrm_type_lock);
if (unlikely(typemap[type->proto] != type))
err = -ENOENT;
else
typemap[type->proto] = NULL;
spin_unlock_bh(&xfrm_type_lock);
rcu_read_unlock();
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Herbert Xu | 86 | 83.50% | 1 | 25.00% |
Américo Wang | 13 | 12.62% | 1 | 25.00% |
Florian Westphal | 2 | 1.94% | 1 | 25.00% |
Eric Dumazet | 2 | 1.94% | 1 | 25.00% |
Total | 103 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL(xfrm_unregister_type);
static const struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family)
{
struct xfrm_state_afinfo *afinfo;
const struct xfrm_type **typemap;
const struct xfrm_type *type;
int modload_attempted = 0;
retry:
afinfo = xfrm_state_get_afinfo(family);
if (unlikely(afinfo == NULL))
return NULL;
typemap = afinfo->type_map;
type = READ_ONCE(typemap[proto]);
if (unlikely(type && !try_module_get(type->owner)))
type = NULL;
rcu_read_unlock();
if (!type && !modload_attempted) {
request_module("xfrm-type-%d-%d", family, proto);
modload_attempted = 1;
goto retry;
}
return type;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Herbert Xu | 118 | 92.91% | 1 | 33.33% |
Florian Westphal | 6 | 4.72% | 1 | 33.33% |
Eric Dumazet | 3 | 2.36% | 1 | 33.33% |
Total | 127 | 100.00% | 3 | 100.00% |
static void xfrm_put_type(const struct xfrm_type *type)
{
module_put(type->owner);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Herbert Xu | 18 | 94.74% | 1 | 50.00% |
Eric Dumazet | 1 | 5.26% | 1 | 50.00% |
Total | 19 | 100.00% | 2 | 100.00% |
static DEFINE_SPINLOCK(xfrm_mode_lock);
int xfrm_register_mode(struct xfrm_mode *mode, int family)
{
struct xfrm_state_afinfo *afinfo;
struct xfrm_mode **modemap;
int err;
if (unlikely(mode->encap >= XFRM_MODE_MAX))
return -EINVAL;
afinfo = xfrm_state_get_afinfo(family);
if (unlikely(afinfo == NULL))
return -EAFNOSUPPORT;
err = -EEXIST;
modemap = afinfo->mode_map;
spin_lock_bh(&xfrm_mode_lock);
if (modemap[mode->encap])
goto out;
err = -ENOENT;
if (!try_module_get(afinfo->owner))
goto out;
mode->afinfo = afinfo;
modemap[mode->encap] = mode;
err = 0;
out:
spin_unlock_bh(&xfrm_mode_lock);
rcu_read_unlock();
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Herbert Xu | 127 | 89.44% | 2 | 50.00% |
Américo Wang | 13 | 9.15% | 1 | 25.00% |
Florian Westphal | 2 | 1.41% | 1 | 25.00% |
Total | 142 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL(xfrm_register_mode);
int xfrm_unregister_mode(struct xfrm_mode *mode, int family)
{
struct xfrm_state_afinfo *afinfo;
struct xfrm_mode **modemap;
int err;
if (unlikely(mode->encap >= XFRM_MODE_MAX))
return -EINVAL;
afinfo = xfrm_state_get_afinfo(family);
if (unlikely(afinfo == NULL))
return -EAFNOSUPPORT;
err = -ENOENT;
modemap = afinfo->mode_map;
spin_lock_bh(&xfrm_mode_lock);
if (likely(modemap[mode->encap] == mode)) {
modemap[mode->encap] = NULL;
module_put(mode->afinfo->owner);
err = 0;
}
spin_unlock_bh(&xfrm_mode_lock);
rcu_read_unlock();
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Herbert Xu | 114 | 88.37% | 2 | 50.00% |
Américo Wang | 13 | 10.08% | 1 | 25.00% |
Florian Westphal | 2 | 1.55% | 1 | 25.00% |
Total | 129 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL(xfrm_unregister_mode);
static struct xfrm_mode *xfrm_get_mode(unsigned int encap, int family)
{
struct xfrm_state_afinfo *afinfo;
struct xfrm_mode *mode;
int modload_attempted = 0;
if (unlikely(encap >= XFRM_MODE_MAX))
return NULL;
retry:
afinfo = xfrm_state_get_afinfo(family);
if (unlikely(afinfo == NULL))
return NULL;
mode = READ_ONCE(afinfo->mode_map[encap]);
if (unlikely(mode && !try_module_get(mode->owner)))
mode = NULL;
rcu_read_unlock();
if (!mode && !modload_attempted) {
request_module("xfrm-mode-%d-%d", family, encap);
modload_attempted = 1;
goto retry;
}
return mode;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Herbert Xu | 120 | 95.24% | 1 | 50.00% |
Florian Westphal | 6 | 4.76% | 1 | 50.00% |
Total | 126 | 100.00% | 2 | 100.00% |
static void xfrm_put_mode(struct xfrm_mode *mode)
{
module_put(mode->owner);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Herbert Xu | 18 | 100.00% | 1 | 100.00% |
Total | 18 | 100.00% | 1 | 100.00% |
static void xfrm_state_gc_destroy(struct xfrm_state *x)
{
tasklet_hrtimer_cancel(&x->mtimer);
del_timer_sync(&x->rtimer);
kfree(x->aead);
kfree(x->aalg);
kfree(x->ealg);
kfree(x->calg);
kfree(x->encap);
kfree(x->coaddr);
kfree(x->replay_esn);
kfree(x->preplay_esn);
if (x->inner_mode)
xfrm_put_mode(x->inner_mode);
if (x->inner_mode_iaf)
xfrm_put_mode(x->inner_mode_iaf);
if (x->outer_mode)
xfrm_put_mode(x->outer_mode);
if (x->type) {
x->type->destructor(x);
xfrm_put_type(x->type);
}
security_xfrm_state_free(x);
kfree(x);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
James Morris | 74 | 47.44% | 2 | 16.67% |
Herbert Xu | 26 | 16.67% | 2 | 16.67% |
Steffen Klassert | 14 | 8.97% | 1 | 8.33% |
Kazunori Miyazawa | 13 | 8.33% | 1 | 8.33% |
Ilan Tayari | 7 | 4.49% | 1 | 8.33% |
Noriaki Takamiya | 7 | 4.49% | 1 | 8.33% |
Jamal Hadi Salim | 7 | 4.49% | 1 | 8.33% |
Trent Jaeger | 5 | 3.21% | 1 | 8.33% |
Yury Polyanskiy | 2 | 1.28% | 1 | 8.33% |
David S. Miller | 1 | 0.64% | 1 | 8.33% |
Total | 156 | 100.00% | 12 | 100.00% |
static void xfrm_state_gc_task(struct work_struct *work)
{
struct xfrm_state *x;
struct hlist_node *tmp;
struct hlist_head gc_list;
spin_lock_bh(&xfrm_state_gc_lock);
hlist_move_list(&xfrm_state_gc_list, &gc_list);
spin_unlock_bh(&xfrm_state_gc_lock);
synchronize_rcu();
hlist_for_each_entry_safe(x, tmp, &gc_list, gclist)
xfrm_state_gc_destroy(x);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
James Morris | 41 | 64.06% | 1 | 12.50% |
Herbert Xu | 14 | 21.88% | 2 | 25.00% |
Florian Westphal | 4 | 6.25% | 2 | 25.00% |
David S. Miller | 2 | 3.12% | 1 | 12.50% |
David Howells | 2 | 3.12% | 1 | 12.50% |
Alexey Dobriyan | 1 | 1.56% | 1 | 12.50% |
Total | 64 | 100.00% | 8 | 100.00% |
static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me)
{
struct tasklet_hrtimer *thr = container_of(me, struct tasklet_hrtimer, timer);
struct xfrm_state *x = container_of(thr, struct xfrm_state, mtimer);
unsigned long now = get_seconds();
long next = LONG_MAX;
int warn = 0;
int err = 0;
spin_lock(&x->lock);
if (x->km.state == XFRM_STATE_DEAD)
goto out;
if (x->km.state == XFRM_STATE_EXPIRED)
goto expired;
if (x->lft.hard_add_expires_seconds) {
long tmo = x->lft.hard_add_expires_seconds +
x->curlft.add_time - now;
if (tmo <= 0) {
if (x->xflags & XFRM_SOFT_EXPIRE) {
/* enter hard expire without soft expire first?!
* setting a new date could trigger this.
* workaround: fix x->curflt.add_time by below:
*/
x->curlft.add_time = now - x->saved_tmo - 1;
tmo = x->lft.hard_add_expires_seconds - x->saved_tmo;
} else
goto expired;
}
if (tmo < next)
next = tmo;
}
if (x->lft.hard_use_expires_seconds) {
long tmo = x->lft.hard_use_expires_seconds +
(x->curlft.use_time ? : now) - now;
if (tmo <= 0)
goto expired;
if (tmo < next)
next = tmo;
}
if (x->km.dying)
goto resched;
if (x->lft.soft_add_expires_seconds) {
long tmo = x->lft.soft_add_expires_seconds +
x->curlft.add_time - now;
if (tmo <= 0) {
warn = 1;
x->xflags &= ~XFRM_SOFT_EXPIRE;
} else if (tmo < next) {
next = tmo;
x->xflags |= XFRM_SOFT_EXPIRE;
x->saved_tmo = tmo;
}
}
if (x->lft.soft_use_expires_seconds) {
long tmo = x->lft.soft_use_expires_seconds +
(x->curlft.use_time ? : now) - now;
if (tmo <= 0)
warn = 1;
else if (tmo < next)
next = tmo;
}
x->km.dying = warn;
if (warn)
km_state_expired(x, 0, 0);
resched:
if (next != LONG_MAX) {
tasklet_hrtimer_start(&x->mtimer, ktime_set(next, 0), HRTIMER_MODE_REL);
}
goto out;
expired:
if (x->km.state == XFRM_STATE_ACQ && x->id.spi == 0)
x->km.state = XFRM_STATE_EXPIRED;
err = __xfrm_state_delete(x);
if (!err)
km_state_expired(x, 1, 0);
xfrm_audit_state_delete(x, err ?