Release 4.12 include/linux/netfilter.h
#ifndef __LINUX_NETFILTER_H
#define __LINUX_NETFILTER_H
#include <linux/init.h>
#include <linux/skbuff.h>
#include <linux/net.h>
#include <linux/if.h>
#include <linux/in.h>
#include <linux/in6.h>
#include <linux/wait.h>
#include <linux/list.h>
#include <linux/static_key.h>
#include <linux/netfilter_defs.h>
#include <linux/netdevice.h>
#include <net/net_namespace.h>
#ifdef CONFIG_NETFILTER
static inline int NF_DROP_GETERR(int verdict)
{
return -(verdict >> NF_VERDICT_QBITS);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Florian Westphal | 18 | 100.00% | 1 | 100.00% |
Total | 18 | 100.00% | 1 | 100.00% |
static inline int nf_inet_addr_cmp(const union nf_inet_addr *a1,
const union nf_inet_addr *a2)
{
return a1->all[0] == a2->all[0] &&
a1->all[1] == a2->all[1] &&
a1->all[2] == a2->all[2] &&
a1->all[3] == a2->all[3];
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Patrick McHardy | 76 | 100.00% | 1 | 100.00% |
Total | 76 | 100.00% | 1 | 100.00% |
static inline void nf_inet_addr_mask(const union nf_inet_addr *a1,
union nf_inet_addr *result,
const union nf_inet_addr *mask)
{
result->all[0] = a1->all[0] & mask->all[0];
result->all[1] = a1->all[1] & mask->all[1];
result->all[2] = a1->all[2] & mask->all[2];
result->all[3] = a1->all[3] & mask->all[3];
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Denys Fedoryshchenko | 108 | 100.00% | 1 | 100.00% |
Total | 108 | 100.00% | 1 | 100.00% |
int netfilter_init(void);
struct sk_buff;
struct nf_hook_ops;
struct sock;
struct nf_hook_state {
unsigned int hook;
u_int8_t pf;
struct net_device *in;
struct net_device *out;
struct sock *sk;
struct net *net;
int (*okfn)(struct net *, struct sock *, struct sk_buff *);
};
typedef unsigned int nf_hookfn(void *priv,
struct sk_buff *skb,
const struct nf_hook_state *state);
struct nf_hook_ops {
struct list_head list;
/* User fills in from here down. */
nf_hookfn *hook;
struct net_device *dev;
void *priv;
u_int8_t pf;
unsigned int hooknum;
/* Hooks are ordered in ascending priority. */
int priority;
};
struct nf_hook_entry {
struct nf_hook_entry __rcu *next;
nf_hookfn *hook;
void *priv;
const struct nf_hook_ops *orig_ops;
};
static inline void
nf_hook_entry_init(struct nf_hook_entry *entry, const struct nf_hook_ops *ops)
{
entry->next = NULL;
entry->hook = ops->hook;
entry->priv = ops->priv;
entry->orig_ops = ops;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Aaron Conole | 46 | 100.00% | 2 | 100.00% |
Total | 46 | 100.00% | 2 | 100.00% |
static inline int
nf_hook_entry_priority(const struct nf_hook_entry *entry)
{
return entry->orig_ops->priority;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Aaron Conole | 20 | 100.00% | 2 | 100.00% |
Total | 20 | 100.00% | 2 | 100.00% |
static inline int
nf_hook_entry_hookfn(const struct nf_hook_entry *entry, struct sk_buff *skb,
struct nf_hook_state *state)
{
return entry->hook(entry->priv, skb, state);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Aaron Conole | 37 | 100.00% | 1 | 100.00% |
Total | 37 | 100.00% | 1 | 100.00% |
static inline const struct nf_hook_ops *
nf_hook_entry_ops(const struct nf_hook_entry *entry)
{
return entry->orig_ops;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Aaron Conole | 21 | 100.00% | 1 | 100.00% |
Total | 21 | 100.00% | 1 | 100.00% |
static inline void nf_hook_state_init(struct nf_hook_state *p,
unsigned int hook,
u_int8_t pf,
struct net_device *indev,
struct net_device *outdev,
struct sock *sk,
struct net *net,
int (*okfn)(struct net *, struct sock *, struct sk_buff *))
{
p->hook = hook;
p->pf = pf;
p->in = indev;
p->out = outdev;
p->sk = sk;
p->net = net;
p->okfn = okfn;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 85 | 85.00% | 3 | 50.00% |
Eric W. Biedermann | 13 | 13.00% | 2 | 33.33% |
Pablo Neira Ayuso | 2 | 2.00% | 1 | 16.67% |
Total | 100 | 100.00% | 6 | 100.00% |
struct nf_sockopt_ops {
struct list_head list;
u_int8_t pf;
/* Non-inclusive ranges: use 0/0/NULL to never get called. */
int set_optmin;
int set_optmax;
int (*set)(struct sock *sk, int optval, void __user *user, unsigned int len);
#ifdef CONFIG_COMPAT
int (*compat_set)(struct sock *sk, int optval,
void __user *user, unsigned int len);
#endif
int get_optmin;
int get_optmax;
int (*get)(struct sock *sk, int optval, void __user *user, int *len);
#ifdef CONFIG_COMPAT
int (*compat_get)(struct sock *sk, int optval,
void __user *user, int *len);
#endif
/* Use the module struct to lock set/get code in place */
struct module *owner;
};
/* Function to register/unregister hook points. */
int nf_register_net_hook(struct net *net, const struct nf_hook_ops *ops);
void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *ops);
int nf_register_net_hooks(struct net *net, const struct nf_hook_ops *reg,
unsigned int n);
void nf_unregister_net_hooks(struct net *net, const struct nf_hook_ops *reg,
unsigned int n);
int nf_register_hook(struct nf_hook_ops *reg);
void nf_unregister_hook(struct nf_hook_ops *reg);
int nf_register_hooks(struct nf_hook_ops *reg, unsigned int n);
void nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n);
int _nf_register_hooks(struct nf_hook_ops *reg, unsigned int n);
void _nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n);
/* Functions to register get/setsockopt ranges (non-inclusive). You
need to check permissions yourself! */
int nf_register_sockopt(struct nf_sockopt_ops *reg);
void nf_unregister_sockopt(struct nf_sockopt_ops *reg);
#ifdef HAVE_JUMP_LABEL
extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
#endif
int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state,
struct nf_hook_entry *entry);
/**
* nf_hook - call a netfilter hook
*
* Returns 1 if the hook has allowed the packet to pass. The function
* okfn must be invoked by the caller in this case. Any other return
* value indicates the packet has been consumed by the hook.
*/
static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net,
struct sock *sk, struct sk_buff *skb,
struct net_device *indev, struct net_device *outdev,
int (*okfn)(struct net *, struct sock *, struct sk_buff *))
{
struct nf_hook_entry *hook_head;
int ret = 1;
#ifdef HAVE_JUMP_LABEL
if (__builtin_constant_p(pf) &&
__builtin_constant_p(hook) &&
!static_key_false(&nf_hooks_needed[pf][hook]))
return 1;
#endif
rcu_read_lock();
hook_head = rcu_dereference(net->nf.hooks[pf][hook]);
if (hook_head) {
struct nf_hook_state state;
nf_hook_state_init(&state, hook, pf, indev, outdev,
sk, net, okfn);
ret = nf_hook_slow(skb, &state, hook_head);
}
rcu_read_unlock();
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Florian Westphal | 44 | 26.04% | 2 | 11.11% |
David S. Miller | 35 | 20.71% | 4 | 22.22% |
Patrick McHardy | 33 | 19.53% | 1 | 5.56% |
Eric W. Biedermann | 24 | 14.20% | 5 | 27.78% |
Aaron Conole | 16 | 9.47% | 1 | 5.56% |
Linus Torvalds | 12 | 7.10% | 1 | 5.56% |
Pablo Neira Ayuso | 3 | 1.78% | 2 | 11.11% |
Jan Engelhardt | 1 | 0.59% | 1 | 5.56% |
Herbert Xu | 1 | 0.59% | 1 | 5.56% |
Total | 169 | 100.00% | 18 | 100.00% |
/* Activate hook; either okfn or kfree_skb called, unless a hook
returns NF_STOLEN (in which case, it's up to the hook to deal with
the consequences).
Returns -ERRNO if packet dropped. Zero means queued, stolen or
accepted.
*/
/* RR:
> I don't want nf_hook to return anything because people might forget
> about async and trust the return value to mean "packet was ok".
AK:
Just document it clearly, then you can expect some sense from kernel
coders :)
*/
static inline int
NF_HOOK_COND(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
struct sk_buff *skb, struct net_device *in, struct net_device *out,
int (*okfn)(struct net *, struct sock *, struct sk_buff *),
bool cond)
{
int ret;
if (!cond ||
((ret = nf_hook(pf, hook, net, sk, skb, in, out, okfn)) == 1))
ret = okfn(net, sk, skb);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jan Engelhardt | 66 | 60.00% | 1 | 11.11% |
Patrick McHardy | 15 | 13.64% | 2 | 22.22% |
David S. Miller | 13 | 11.82% | 1 | 11.11% |
Eric W. Biedermann | 13 | 11.82% | 3 | 33.33% |
Eric Paris | 2 | 1.82% | 1 | 11.11% |
Pablo Neira Ayuso | 1 | 0.91% | 1 | 11.11% |
Total | 110 | 100.00% | 9 | 100.00% |
static inline int
NF_HOOK(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, struct sk_buff *skb,
struct net_device *in, struct net_device *out,
int (*okfn)(struct net *, struct sock *, struct sk_buff *))
{
int ret = nf_hook(pf, hook, net, sk, skb, in, out, okfn);
if (ret == 1)
ret = okfn(net, sk, skb);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jan Engelhardt | 43 | 43.00% | 1 | 14.29% |
Pablo Neira Ayuso | 23 | 23.00% | 1 | 14.29% |
David S. Miller | 11 | 11.00% | 1 | 14.29% |
Eric W. Biedermann | 11 | 11.00% | 2 | 28.57% |
Linus Torvalds (pre-git) | 10 | 10.00% | 1 | 14.29% |
Patrick McHardy | 2 | 2.00% | 1 | 14.29% |
Total | 100 | 100.00% | 7 | 100.00% |
/* Call setsockopt() */
int nf_setsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt,
unsigned int len);
int nf_getsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt,
int *len);
#ifdef CONFIG_COMPAT
int compat_nf_setsockopt(struct sock *sk, u_int8_t pf, int optval,
char __user *opt, unsigned int len);
int compat_nf_getsockopt(struct sock *sk, u_int8_t pf, int optval,
char __user *opt, int *len);
#endif
/* Call this before modifying an existing packet: ensures it is
modifiable and linear to the point you care about (writable_len).
Returns true or false. */
int skb_make_writable(struct sk_buff *skb, unsigned int writable_len);
struct flowi;
struct nf_queue_entry;
struct nf_afinfo {
unsigned short family;
__sum16 (*checksum)(struct sk_buff *skb, unsigned int hook,
unsigned int dataoff, u_int8_t protocol);
__sum16 (*checksum_partial)(struct sk_buff *skb,
unsigned int hook,
unsigned int dataoff,
unsigned int len,
u_int8_t protocol);
int (*route)(struct net *net, struct dst_entry **dst,
struct flowi *fl, bool strict);
void (*saveroute)(const struct sk_buff *skb,
struct nf_queue_entry *entry);
int (*reroute)(struct net *net, struct sk_buff *skb,
const struct nf_queue_entry *entry);
int route_key_size;
};
extern const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO];
static inline const struct nf_afinfo *nf_get_afinfo(unsigned short family)
{
return rcu_dereference(nf_afinfo[family]);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Patrick McHardy | 21 | 91.30% | 2 | 66.67% |
Harald Welte | 2 | 8.70% | 1 | 33.33% |
Total | 23 | 100.00% | 3 | 100.00% |
static inline __sum16
nf_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff,
u_int8_t protocol, unsigned short family)
{
const struct nf_afinfo *afinfo;
__sum16 csum = 0;
rcu_read_lock();
afinfo = nf_get_afinfo(family);
if (afinfo)
csum = afinfo->checksum(skb, hook, dataoff, protocol);
rcu_read_unlock();
return csum;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Patrick McHardy | 71 | 97.26% | 2 | 66.67% |
Al Viro | 2 | 2.74% | 1 | 33.33% |
Total | 73 | 100.00% | 3 | 100.00% |
static inline __sum16
nf_checksum_partial(struct sk_buff *skb, unsigned int hook,
unsigned int dataoff, unsigned int len,
u_int8_t protocol, unsigned short family)
{
const struct nf_afinfo *afinfo;
__sum16 csum = 0;
rcu_read_lock();
afinfo = nf_get_afinfo(family);
if (afinfo)
csum = afinfo->checksum_partial(skb, hook, dataoff, len,
protocol);
rcu_read_unlock();
return csum;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Patrick McHardy | 79 | 100.00% | 1 | 100.00% |
Total | 79 | 100.00% | 1 | 100.00% |
int nf_register_afinfo(const struct nf_afinfo *afinfo);
void nf_unregister_afinfo(const struct nf_afinfo *afinfo);
#include <net/flow.h>
extern void (*nf_nat_decode_session_hook)(struct sk_buff *, struct flowi *);
static inline void
nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
{
#ifdef CONFIG_NF_NAT_NEEDED
void (*decodefn)(struct sk_buff *, struct flowi *);
rcu_read_lock();
decodefn = rcu_dereference(nf_nat_decode_session_hook);
if (decodefn)
decodefn(skb, fl);
rcu_read_unlock();
#endif
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Patrick McHardy | 62 | 96.88% | 3 | 60.00% |
Jan Engelhardt | 1 | 1.56% | 1 | 20.00% |
Jozsef Kadlecsik | 1 | 1.56% | 1 | 20.00% |
Total | 64 | 100.00% | 5 | 100.00% |
#else /* !CONFIG_NETFILTER */
static inline int
NF_HOOK_COND(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
struct sk_buff *skb, struct net_device *in, struct net_device *out,
int (*okfn)(struct net *, struct sock *, struct sk_buff *),
bool cond)
{
return okfn(net, sk, skb);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Arnd Bergmann | 56 | 78.87% | 1 | 25.00% |
Linus Torvalds (pre-git) | 11 | 15.49% | 1 | 25.00% |
Eric W. Biedermann | 2 | 2.82% | 1 | 25.00% |
David S. Miller | 2 | 2.82% | 1 | 25.00% |
Total | 71 | 100.00% | 4 | 100.00% |
static inline int
NF_HOOK(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
struct sk_buff *skb, struct net_device *in, struct net_device *out,
int (*okfn)(struct net *, struct sock *, struct sk_buff *))
{
return okfn(net, sk, skb);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Arnd Bergmann | 52 | 76.47% | 1 | 25.00% |
Patrick McHardy | 12 | 17.65% | 1 | 25.00% |
David S. Miller | 2 | 2.94% | 1 | 25.00% |
Eric W. Biedermann | 2 | 2.94% | 1 | 25.00% |
Total | 68 | 100.00% | 4 | 100.00% |
static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net,
struct sock *sk, struct sk_buff *skb,
struct net_device *indev, struct net_device *outdev,
int (*okfn)(struct net *, struct sock *, struct sk_buff *))
{
return 1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 49 | 80.33% | 2 | 28.57% |
Eric W. Biedermann | 9 | 14.75% | 2 | 28.57% |
Patrick McHardy | 1 | 1.64% | 1 | 14.29% |
Herbert Xu | 1 | 1.64% | 1 | 14.29% |
Jan Engelhardt | 1 | 1.64% | 1 | 14.29% |
Total | 61 | 100.00% | 7 | 100.00% |
struct flowi;
static inline void
nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
{
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Patrick McHardy | 17 | 89.47% | 1 | 50.00% |
Jan Engelhardt | 2 | 10.53% | 1 | 50.00% |
Total | 19 | 100.00% | 2 | 100.00% |
#endif /*CONFIG_NETFILTER*/
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
#include <linux/netfilter/nf_conntrack_zones_common.h>
extern void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *) __rcu;
void nf_ct_attach(struct sk_buff *, const struct sk_buff *);
extern void (*nf_ct_destroy)(struct nf_conntrack *) __rcu;
#else
static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Pablo Neira Ayuso | 16 | 100.00% | 1 | 100.00% |
Total | 16 | 100.00% | 1 | 100.00% |
#endif
struct nf_conn;
enum ip_conntrack_info;
struct nlattr;
struct nfnl_ct_hook {
struct nf_conn *(*get_ct)(const struct sk_buff *skb,
enum ip_conntrack_info *ctinfo);
size_t (*build_size)(const struct nf_conn *ct);
int (*build)(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
u_int16_t ct_attr, u_int16_t ct_info_attr);
int (*parse)(const struct nlattr *attr, struct nf_conn *ct);
int (*attach_expect)(const struct nlattr *attr, struct nf_conn *ct,
u32 portid, u32 report);
void (*seq_adjust)(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo, s32 off);
};
extern struct nfnl_ct_hook __rcu *nfnl_ct_hook;
/**
* nf_skb_duplicated - TEE target has sent a packet
*
* When a xtables target sends a packet, the OUTPUT and POSTROUTING
* hooks are traversed again, i.e. nft and xtables are invoked recursively.
*
* This is used by xtables TEE target to prevent the duplicated skb from
* being duplicated again.
*/
DECLARE_PER_CPU(bool, nf_skb_duplicated);
#endif /*__LINUX_NETFILTER_H*/
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Patrick McHardy | 560 | 24.09% | 20 | 22.73% |
David S. Miller | 249 | 10.71% | 6 | 6.82% |
Aaron Conole | 220 | 9.46% | 3 | 3.41% |
Linus Torvalds (pre-git) | 220 | 9.46% | 5 | 5.68% |
Pablo Neira Ayuso | 196 | 8.43% | 11 | 12.50% |
Eric W. Biedermann | 162 | 6.97% | 7 | 7.95% |
Jan Engelhardt | 126 | 5.42% | 4 | 4.55% |
Arnd Bergmann | 108 | 4.65% | 1 | 1.14% |
Denys Fedoryshchenko | 108 | 4.65% | 1 | 1.14% |
Dmitry Mishin | 93 | 4.00% | 1 | 1.14% |
Florian Westphal | 78 | 3.35% | 6 | 6.82% |
Harald Welte | 61 | 2.62% | 2 | 2.27% |
Eric Dumazet | 30 | 1.29% | 2 | 2.27% |
Yasuyuki Kozakai | 26 | 1.12% | 2 | 2.27% |
Mahesh Bandewar | 26 | 1.12% | 1 | 1.14% |
Alexey Dobriyan | 15 | 0.65% | 1 | 1.14% |
Linus Torvalds | 12 | 0.52% | 1 | 1.14% |
Al Viro | 7 | 0.30% | 2 | 2.27% |
Zhouyi Zhou | 6 | 0.26% | 1 | 1.14% |
Herbert Xu | 4 | 0.17% | 2 | 2.27% |
Ken-ichirou MATSUZAWA | 4 | 0.17% | 2 | 2.27% |
Daniel Borkmann | 3 | 0.13% | 1 | 1.14% |
Neil Horman | 3 | 0.13% | 1 | 1.14% |
Bart De Schuymer | 2 | 0.09% | 1 | 1.14% |
Hideaki Yoshifuji / 吉藤英明 | 2 | 0.09% | 1 | 1.14% |
Eric Paris | 2 | 0.09% | 1 | 1.14% |
Jozsef Kadlecsik | 1 | 0.04% | 1 | 1.14% |
Ingo Molnar | 1 | 0.04% | 1 | 1.14% |
Total | 2325 | 100.00% | 88 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.