cregit-Linux how code gets into the kernel

Release 4.16 include/linux/netfilter.h

Directory: include/linux
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_NETFILTER_H

#define __LINUX_NETFILTER_H

#include <linux/init.h>
#include <linux/skbuff.h>
#include <linux/net.h>
#include <linux/if.h>
#include <linux/in.h>
#include <linux/in6.h>
#include <linux/wait.h>
#include <linux/list.h>
#include <linux/static_key.h>
#include <linux/netfilter_defs.h>
#include <linux/netdevice.h>
#include <net/net_namespace.h>

#ifdef CONFIG_NETFILTER

static inline int NF_DROP_GETERR(int verdict) { return -(verdict >> NF_VERDICT_QBITS); }

Contributors

PersonTokensPropCommitsCommitProp
Florian Westphal18100.00%1100.00%
Total18100.00%1100.00%


static inline int nf_inet_addr_cmp(const union nf_inet_addr *a1, const union nf_inet_addr *a2) { return a1->all[0] == a2->all[0] && a1->all[1] == a2->all[1] && a1->all[2] == a2->all[2] && a1->all[3] == a2->all[3]; }

Contributors

PersonTokensPropCommitsCommitProp
Patrick McHardy76100.00%1100.00%
Total76100.00%1100.00%


static inline void nf_inet_addr_mask(const union nf_inet_addr *a1, union nf_inet_addr *result, const union nf_inet_addr *mask) { result->all[0] = a1->all[0] & mask->all[0]; result->all[1] = a1->all[1] & mask->all[1]; result->all[2] = a1->all[2] & mask->all[2]; result->all[3] = a1->all[3] & mask->all[3]; }

Contributors

PersonTokensPropCommitsCommitProp
Denys Fedoryshchenko108100.00%1100.00%
Total108100.00%1100.00%

int netfilter_init(void); struct sk_buff; struct nf_hook_ops; struct sock; struct nf_hook_state { unsigned int hook; u_int8_t pf; struct net_device *in; struct net_device *out; struct sock *sk; struct net *net; int (*okfn)(struct net *, struct sock *, struct sk_buff *); }; typedef unsigned int nf_hookfn(void *priv, struct sk_buff *skb, const struct nf_hook_state *state); struct nf_hook_ops { /* User fills in from here down. */ nf_hookfn *hook; struct net_device *dev; void *priv; u_int8_t pf; bool nat_hook; unsigned int hooknum; /* Hooks are ordered in ascending priority. */ int priority; }; struct nf_hook_entry { nf_hookfn *hook; void *priv; }; struct nf_hook_entries_rcu_head { struct rcu_head head; void *allocation; }; struct nf_hook_entries { u16 num_hook_entries; /* padding */ struct nf_hook_entry hooks[]; /* trailer: pointers to original orig_ops of each hook, * followed by rcu_head and scratch space used for freeing * the structure via call_rcu. * * This is not part of struct nf_hook_entry since its only * needed in slow path (hook register/unregister): * const struct nf_hook_ops *orig_ops[] * * For the same reason, we store this at end -- its * only needed when a hook is deleted, not during * packet path processing: * struct nf_hook_entries_rcu_head head */ };
static inline struct nf_hook_ops **nf_hook_entries_get_hook_ops(const struct nf_hook_entries *e) { unsigned int n = e->num_hook_entries; const void *hook_end; hook_end = &e->hooks[n]; /* this is *past* ->hooks[]! */ return (struct nf_hook_ops **)hook_end; }

Contributors

PersonTokensPropCommitsCommitProp
Aaron Conole49100.00%3100.00%
Total49100.00%3100.00%


static inline int nf_hook_entry_hookfn(const struct nf_hook_entry *entry, struct sk_buff *skb, struct nf_hook_state *state) { return entry->hook(entry->priv, skb, state); }

Contributors

PersonTokensPropCommitsCommitProp
Aaron Conole37100.00%1100.00%
Total37100.00%1100.00%


static inline void nf_hook_state_init(struct nf_hook_state *p, unsigned int hook, u_int8_t pf, struct net_device *indev, struct net_device *outdev, struct sock *sk, struct net *net, int (*okfn)(struct net *, struct sock *, struct sk_buff *)) { p->hook = hook; p->pf = pf; p->in = indev; p->out = outdev; p->sk = sk; p->net = net; p->okfn = okfn; }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller8585.00%350.00%
Eric W. Biedermann1313.00%233.33%
Pablo Neira Ayuso22.00%116.67%
Total100100.00%6100.00%

struct nf_sockopt_ops { struct list_head list; u_int8_t pf; /* Non-inclusive ranges: use 0/0/NULL to never get called. */ int set_optmin; int set_optmax; int (*set)(struct sock *sk, int optval, void __user *user, unsigned int len); #ifdef CONFIG_COMPAT int (*compat_set)(struct sock *sk, int optval, void __user *user, unsigned int len); #endif int get_optmin; int get_optmax; int (*get)(struct sock *sk, int optval, void __user *user, int *len); #ifdef CONFIG_COMPAT int (*compat_get)(struct sock *sk, int optval, void __user *user, int *len); #endif /* Use the module struct to lock set/get code in place */ struct module *owner; }; /* Function to register/unregister hook points. */ int nf_register_net_hook(struct net *net, const struct nf_hook_ops *ops); void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *ops); int nf_register_net_hooks(struct net *net, const struct nf_hook_ops *reg, unsigned int n); void nf_unregister_net_hooks(struct net *net, const struct nf_hook_ops *reg, unsigned int n); /* Functions to register get/setsockopt ranges (non-inclusive). You need to check permissions yourself! */ int nf_register_sockopt(struct nf_sockopt_ops *reg); void nf_unregister_sockopt(struct nf_sockopt_ops *reg); #ifdef HAVE_JUMP_LABEL extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; #endif int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state, const struct nf_hook_entries *e, unsigned int i); /** * nf_hook - call a netfilter hook * * Returns 1 if the hook has allowed the packet to pass. The function * okfn must be invoked by the caller in this case. Any other return * value indicates the packet has been consumed by the hook. */
static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net, struct sock *sk, struct sk_buff *skb, struct net_device *indev, struct net_device *outdev, int (*okfn)(struct net *, struct sock *, struct sk_buff *)) { struct nf_hook_entries *hook_head = NULL; int ret = 1; #ifdef HAVE_JUMP_LABEL if (__builtin_constant_p(pf) && __builtin_constant_p(hook) && !static_key_false(&nf_hooks_needed[pf][hook])) return 1; #endif rcu_read_lock(); switch (pf) { case NFPROTO_IPV4: hook_head = rcu_dereference(net->nf.hooks_ipv4[hook]); break; case NFPROTO_IPV6: hook_head = rcu_dereference(net->nf.hooks_ipv6[hook]); break; case NFPROTO_ARP: #ifdef CONFIG_NETFILTER_FAMILY_ARP hook_head = rcu_dereference(net->nf.hooks_arp[hook]); #endif break; case NFPROTO_BRIDGE: #ifdef CONFIG_NETFILTER_FAMILY_BRIDGE hook_head = rcu_dereference(net->nf.hooks_bridge[hook]); #endif break; #if IS_ENABLED(CONFIG_DECNET) case NFPROTO_DECNET: hook_head = rcu_dereference(net->nf.hooks_decnet[hook]); break; #endif default: WARN_ON_ONCE(1); break; } if (hook_head) { struct nf_hook_state state; nf_hook_state_init(&state, hook, pf, indev, outdev, sk, net, okfn); ret = nf_hook_slow(skb, &state, hook_head, 0); } rcu_read_unlock(); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Florian Westphal15254.87%522.73%
David S. Miller3512.64%418.18%
Patrick McHardy3311.91%14.55%
Eric W. Biedermann227.94%522.73%
Aaron Conole186.50%29.09%
Linus Torvalds124.33%14.55%
Pablo Neira Ayuso31.08%29.09%
Jan Engelhardt10.36%14.55%
Herbert Xu10.36%14.55%
Total277100.00%22100.00%

/* Activate hook; either okfn or kfree_skb called, unless a hook returns NF_STOLEN (in which case, it's up to the hook to deal with the consequences). Returns -ERRNO if packet dropped. Zero means queued, stolen or accepted. */ /* RR: > I don't want nf_hook to return anything because people might forget > about async and trust the return value to mean "packet was ok". AK: Just document it clearly, then you can expect some sense from kernel coders :) */
static inline int NF_HOOK_COND(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, struct sk_buff *skb, struct net_device *in, struct net_device *out, int (*okfn)(struct net *, struct sock *, struct sk_buff *), bool cond) { int ret; if (!cond || ((ret = nf_hook(pf, hook, net, sk, skb, in, out, okfn)) == 1)) ret = okfn(net, sk, skb); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Jan Engelhardt6660.00%111.11%
Patrick McHardy1513.64%222.22%
Eric W. Biedermann1311.82%333.33%
David S. Miller1311.82%111.11%
Eric Paris21.82%111.11%
Pablo Neira Ayuso10.91%111.11%
Total110100.00%9100.00%


static inline int NF_HOOK(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, struct sk_buff *skb, struct net_device *in, struct net_device *out, int (*okfn)(struct net *, struct sock *, struct sk_buff *)) { int ret = nf_hook(pf, hook, net, sk, skb, in, out, okfn); if (ret == 1) ret = okfn(net, sk, skb); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Jan Engelhardt4343.00%114.29%
Pablo Neira Ayuso2323.00%114.29%
David S. Miller1111.00%114.29%
Eric W. Biedermann1111.00%228.57%
Linus Torvalds (pre-git)1010.00%114.29%
Patrick McHardy22.00%114.29%
Total100100.00%7100.00%

/* Call setsockopt() */ int nf_setsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt, unsigned int len); int nf_getsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt, int *len); #ifdef CONFIG_COMPAT int compat_nf_setsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt, unsigned int len); int compat_nf_getsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt, int *len); #endif /* Call this before modifying an existing packet: ensures it is modifiable and linear to the point you care about (writable_len). Returns true or false. */ int skb_make_writable(struct sk_buff *skb, unsigned int writable_len); struct flowi; struct nf_queue_entry; __sum16 nf_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, u_int8_t protocol, unsigned short family); __sum16 nf_checksum_partial(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, unsigned int len, u_int8_t protocol, unsigned short family); int nf_route(struct net *net, struct dst_entry **dst, struct flowi *fl, bool strict, unsigned short family); int nf_reroute(struct sk_buff *skb, struct nf_queue_entry *entry); #include <net/flow.h> extern void (*nf_nat_decode_session_hook)(struct sk_buff *, struct flowi *);
static inline void nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family) { #ifdef CONFIG_NF_NAT_NEEDED void (*decodefn)(struct sk_buff *, struct flowi *); rcu_read_lock(); decodefn = rcu_dereference(nf_nat_decode_session_hook); if (decodefn) decodefn(skb, fl); rcu_read_unlock(); #endif }

Contributors

PersonTokensPropCommitsCommitProp
Patrick McHardy6296.88%360.00%
Jozsef Kadlecsik11.56%120.00%
Jan Engelhardt11.56%120.00%
Total64100.00%5100.00%

#else /* !CONFIG_NETFILTER */
static inline int NF_HOOK_COND(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, struct sk_buff *skb, struct net_device *in, struct net_device *out, int (*okfn)(struct net *, struct sock *, struct sk_buff *), bool cond) { return okfn(net, sk, skb); }

Contributors

PersonTokensPropCommitsCommitProp
Arnd Bergmann5678.87%125.00%
Linus Torvalds (pre-git)1115.49%125.00%
Eric W. Biedermann22.82%125.00%
David S. Miller22.82%125.00%
Total71100.00%4100.00%


static inline int NF_HOOK(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, struct sk_buff *skb, struct net_device *in, struct net_device *out, int (*okfn)(struct net *, struct sock *, struct sk_buff *)) { return okfn(net, sk, skb); }

Contributors

PersonTokensPropCommitsCommitProp
Arnd Bergmann5276.47%125.00%
Patrick McHardy1217.65%125.00%
David S. Miller22.94%125.00%
Eric W. Biedermann22.94%125.00%
Total68100.00%4100.00%


static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net, struct sock *sk, struct sk_buff *skb, struct net_device *indev, struct net_device *outdev, int (*okfn)(struct net *, struct sock *, struct sk_buff *)) { return 1; }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller4980.33%228.57%
Eric W. Biedermann914.75%228.57%
Patrick McHardy11.64%114.29%
Herbert Xu11.64%114.29%
Jan Engelhardt11.64%114.29%
Total61100.00%7100.00%

struct flowi;
static inline void nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family) { }

Contributors

PersonTokensPropCommitsCommitProp
Patrick McHardy1789.47%150.00%
Jan Engelhardt210.53%150.00%
Total19100.00%2100.00%

#endif /*CONFIG_NETFILTER*/ #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) #include <linux/netfilter/nf_conntrack_zones_common.h> extern void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *) __rcu; void nf_ct_attach(struct sk_buff *, const struct sk_buff *); extern void (*nf_ct_destroy)(struct nf_conntrack *) __rcu; #else
static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {}

Contributors

PersonTokensPropCommitsCommitProp
Pablo Neira Ayuso16100.00%1100.00%
Total16100.00%1100.00%

#endif struct nf_conn; enum ip_conntrack_info; struct nlattr; struct nfnl_ct_hook { struct nf_conn *(*get_ct)(const struct sk_buff *skb, enum ip_conntrack_info *ctinfo); size_t (*build_size)(const struct nf_conn *ct); int (*build)(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, u_int16_t ct_attr, u_int16_t ct_info_attr); int (*parse)(const struct nlattr *attr, struct nf_conn *ct); int (*attach_expect)(const struct nlattr *attr, struct nf_conn *ct, u32 portid, u32 report); void (*seq_adjust)(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, s32 off); }; extern struct nfnl_ct_hook __rcu *nfnl_ct_hook; /** * nf_skb_duplicated - TEE target has sent a packet * * When a xtables target sends a packet, the OUTPUT and POSTROUTING * hooks are traversed again, i.e. nft and xtables are invoked recursively. * * This is used by xtables TEE target to prevent the duplicated skb from * being duplicated again. */ DECLARE_PER_CPU(bool, nf_skb_duplicated); #endif /*__LINUX_NETFILTER_H*/

Overall Contributors

PersonTokensPropCommitsCommitProp
Patrick McHardy31414.92%1718.68%
David S. Miller24911.83%66.59%
Pablo Neira Ayuso23611.22%1516.48%
Linus Torvalds (pre-git)2039.65%55.49%
Florian Westphal1959.27%99.89%
Aaron Conole1898.98%44.40%
Eric W. Biedermann1547.32%66.59%
Jan Engelhardt1255.94%33.30%
Arnd Bergmann1085.13%11.10%
Denys Fedoryshchenko1085.13%11.10%
Dmitry Mishin934.42%11.10%
Eric Dumazet291.38%22.20%
Yasuyuki Kozakai261.24%22.20%
Alexey Dobriyan150.71%11.10%
Harald Welte140.67%22.20%
Linus Torvalds120.57%11.10%
Zhouyi Zhou60.29%11.10%
Al Viro50.24%22.20%
Ken-ichirou MATSUZAWA40.19%22.20%
Herbert Xu40.19%22.20%
Daniel Borkmann30.14%11.10%
Neil Horman30.14%11.10%
Eric Paris20.10%11.10%
Bart De Schuymer20.10%11.10%
Hideaki Yoshifuji / 吉藤英明20.10%11.10%
Jozsef Kadlecsik10.05%11.10%
Greg Kroah-Hartman10.05%11.10%
Ingo Molnar10.05%11.10%
Total2104100.00%91100.00%
Directory: include/linux
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.