Contributors: 21
Author |
Tokens |
Token Proportion |
Commits |
Commit Proportion |
Yasuyuki Kozakai |
127 |
30.53% |
1 |
2.38% |
Lorenzo Bianconi |
72 |
17.31% |
2 |
4.76% |
Pablo Neira Ayuso |
34 |
8.17% |
3 |
7.14% |
Florian Westphal |
34 |
8.17% |
6 |
14.29% |
Gao Feng |
25 |
6.01% |
3 |
7.14% |
Alexey Dobriyan |
15 |
3.61% |
3 |
7.14% |
Martin Josefsson |
14 |
3.37% |
3 |
7.14% |
Linus Torvalds (pre-git) |
14 |
3.37% |
4 |
9.52% |
Kumar Kartikeya Dwivedi |
12 |
2.88% |
1 |
2.38% |
Jesper Dangaard Brouer |
12 |
2.88% |
2 |
4.76% |
Tzung-Bi Shih |
12 |
2.88% |
1 |
2.38% |
Vladimir Davydov |
9 |
2.16% |
1 |
2.38% |
Patrick McHardy |
8 |
1.92% |
3 |
7.14% |
Sasha Levin |
8 |
1.92% |
1 |
2.38% |
Harald Welte |
6 |
1.44% |
1 |
2.38% |
Herbert Xu |
5 |
1.20% |
1 |
2.38% |
Daniel Borkmann |
4 |
0.96% |
1 |
2.38% |
Jan Engelhardt |
2 |
0.48% |
2 |
4.76% |
Joe Perches |
1 |
0.24% |
1 |
2.38% |
Jeremy Sowden |
1 |
0.24% |
1 |
2.38% |
Greg Kroah-Hartman |
1 |
0.24% |
1 |
2.38% |
Total |
416 |
|
42 |
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* This header is used to share core functionality between the
* standalone connection tracking module, and the compatibility layer's use
* of connection tracking.
*
* 16 Dec 2003: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
* - generalize L3 protocol dependent part.
*
* Derived from include/linux/netfiter_ipv4/ip_conntrack_core.h
*/
#ifndef _NF_CONNTRACK_CORE_H
#define _NF_CONNTRACK_CORE_H
#include <linux/netfilter.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_ecache.h>
#include <net/netfilter/nf_conntrack_l4proto.h>
/* This header is used to share core functionality between the
standalone connection tracking module, and the compatibility layer's use
of connection tracking. */
unsigned int nf_conntrack_in(struct sk_buff *skb,
const struct nf_hook_state *state);
int nf_conntrack_init_net(struct net *net);
void nf_conntrack_cleanup_net(struct net *net);
void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list);
void nf_conntrack_proto_pernet_init(struct net *net);
int nf_conntrack_proto_init(void);
void nf_conntrack_proto_fini(void);
int nf_conntrack_init_start(void);
void nf_conntrack_cleanup_start(void);
void nf_conntrack_init_end(void);
void nf_conntrack_cleanup_end(void);
bool nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
const struct nf_conntrack_tuple *orig);
/* Find a connection corresponding to a tuple. */
struct nf_conntrack_tuple_hash *
nf_conntrack_find_get(struct net *net,
const struct nf_conntrack_zone *zone,
const struct nf_conntrack_tuple *tuple);
int __nf_conntrack_confirm(struct sk_buff *skb);
/* Confirm a connection: returns NF_DROP if packet must be dropped. */
static inline int nf_conntrack_confirm(struct sk_buff *skb)
{
struct nf_conn *ct = (struct nf_conn *)skb_nfct(skb);
int ret = NF_ACCEPT;
if (ct) {
if (!nf_ct_is_confirmed(ct)) {
ret = __nf_conntrack_confirm(skb);
if (ret == NF_ACCEPT)
ct = (struct nf_conn *)skb_nfct(skb);
}
if (ret == NF_ACCEPT && nf_ct_ecache_exist(ct))
nf_ct_deliver_cached_events(ct);
}
return ret;
}
unsigned int nf_confirm(void *priv, struct sk_buff *skb, const struct nf_hook_state *state);
void print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple,
const struct nf_conntrack_l4proto *proto);
#define CONNTRACK_LOCKS 1024
extern spinlock_t nf_conntrack_locks[CONNTRACK_LOCKS];
void nf_conntrack_lock(spinlock_t *lock);
extern spinlock_t nf_conntrack_expect_lock;
/* ctnetlink code shared by both ctnetlink and nf_conntrack_bpf */
static inline void __nf_ct_set_timeout(struct nf_conn *ct, u64 timeout)
{
if (timeout > INT_MAX)
timeout = INT_MAX;
if (nf_ct_is_confirmed(ct))
WRITE_ONCE(ct->timeout, nfct_time_stamp + (u32)timeout);
else
ct->timeout = (u32)timeout;
}
int __nf_ct_change_timeout(struct nf_conn *ct, u64 cta_timeout);
void __nf_ct_change_status(struct nf_conn *ct, unsigned long on, unsigned long off);
int nf_ct_change_status_common(struct nf_conn *ct, unsigned int status);
#endif /* _NF_CONNTRACK_CORE_H */