Contributors: 33
Author |
Tokens |
Token Proportion |
Commits |
Commit Proportion |
Jakub Kiciński |
315 |
25.30% |
7 |
9.21% |
Joe Damato |
261 |
20.96% |
3 |
3.95% |
Eric Dumazet |
131 |
10.52% |
6 |
7.89% |
Linus Torvalds (pre-git) |
124 |
9.96% |
12 |
15.79% |
Sebastian Andrzej Siewior |
50 |
4.02% |
3 |
3.95% |
Xin Long |
48 |
3.86% |
1 |
1.32% |
Paolo Abeni |
46 |
3.69% |
3 |
3.95% |
Petr Machata |
39 |
3.13% |
2 |
2.63% |
Jiri Pirko |
33 |
2.65% |
4 |
5.26% |
Florian Fainelli |
27 |
2.17% |
1 |
1.32% |
Hangbin Liu |
19 |
1.53% |
2 |
2.63% |
Américo Wang |
17 |
1.37% |
5 |
6.58% |
Martin Karsten |
16 |
1.29% |
1 |
1.32% |
Stephen Hemminger |
15 |
1.20% |
3 |
3.95% |
Joe Perches |
14 |
1.12% |
2 |
2.63% |
Arnd Bergmann |
12 |
0.96% |
1 |
1.32% |
Ansuel Smith |
12 |
0.96% |
1 |
1.32% |
Peter P. Waskiewicz Jr |
10 |
0.80% |
1 |
1.32% |
Thomas Graf |
10 |
0.80% |
1 |
1.32% |
Björn Töpel |
7 |
0.56% |
3 |
3.95% |
Kory Maincent |
6 |
0.48% |
1 |
1.32% |
Pavel Emelyanov |
6 |
0.48% |
2 |
2.63% |
Stefan Rompf |
5 |
0.40% |
1 |
1.32% |
Nicolas Dichtel |
4 |
0.32% |
1 |
1.32% |
Miroslav Lichvar |
4 |
0.32% |
1 |
1.32% |
Ahmed S. Darwish |
3 |
0.24% |
1 |
1.32% |
Magnus Karlsson |
2 |
0.16% |
1 |
1.32% |
Harvey Harrison |
2 |
0.16% |
1 |
1.32% |
Edward Cree |
2 |
0.16% |
1 |
1.32% |
Patrick McHardy |
2 |
0.16% |
1 |
1.32% |
Thomas Gleixner |
1 |
0.08% |
1 |
1.32% |
Matthias Tafelmeier |
1 |
0.08% |
1 |
1.32% |
Tommy S. Christensen |
1 |
0.08% |
1 |
1.32% |
Total |
1245 |
|
76 |
|
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315
/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef _NET_CORE_DEV_H
#define _NET_CORE_DEV_H
#include <linux/types.h>
#include <linux/rwsem.h>
#include <linux/netdevice.h>
struct net;
struct netlink_ext_ack;
struct cpumask;
/* Random bits of netdevice that don't need to be exposed */
#define FLOW_LIMIT_HISTORY (1 << 7) /* must be ^2 and !overflow buckets */
struct sd_flow_limit {
u64 count;
unsigned int num_buckets;
unsigned int history_head;
u16 history[FLOW_LIMIT_HISTORY];
u8 buckets[];
};
extern int netdev_flow_limit_table_len;
struct napi_struct *netdev_napi_by_id(struct net *net, unsigned int napi_id);
#ifdef CONFIG_PROC_FS
int __init dev_proc_init(void);
#else
#define dev_proc_init() 0
#endif
void linkwatch_init_dev(struct net_device *dev);
void linkwatch_run_queue(void);
void dev_addr_flush(struct net_device *dev);
int dev_addr_init(struct net_device *dev);
void dev_addr_check(struct net_device *dev);
#if IS_ENABLED(CONFIG_NET_SHAPER)
void net_shaper_flush_netdev(struct net_device *dev);
void net_shaper_set_real_num_tx_queues(struct net_device *dev,
unsigned int txq);
#else
static inline void net_shaper_flush_netdev(struct net_device *dev) {}
static inline void net_shaper_set_real_num_tx_queues(struct net_device *dev,
unsigned int txq) {}
#endif
/* sysctls not referred to from outside net/core/ */
extern int netdev_unregister_timeout_secs;
extern int weight_p;
extern int dev_weight_rx_bias;
extern int dev_weight_tx_bias;
extern struct rw_semaphore dev_addr_sem;
/* rtnl helpers */
extern struct list_head net_todo_list;
void netdev_run_todo(void);
/* netdev management, shared between various uAPI entry points */
struct netdev_name_node {
struct hlist_node hlist;
struct list_head list;
struct net_device *dev;
const char *name;
struct rcu_head rcu;
};
int netdev_get_name(struct net *net, char *name, int ifindex);
int dev_change_name(struct net_device *dev, const char *newname);
#define netdev_for_each_altname(dev, namenode) \
list_for_each_entry((namenode), &(dev)->name_node->list, list)
#define netdev_for_each_altname_safe(dev, namenode, next) \
list_for_each_entry_safe((namenode), (next), &(dev)->name_node->list, \
list)
int netdev_name_node_alt_create(struct net_device *dev, const char *name);
int netdev_name_node_alt_destroy(struct net_device *dev, const char *name);
int dev_validate_mtu(struct net_device *dev, int mtu,
struct netlink_ext_ack *extack);
int dev_set_mtu_ext(struct net_device *dev, int mtu,
struct netlink_ext_ack *extack);
int dev_get_phys_port_id(struct net_device *dev,
struct netdev_phys_item_id *ppid);
int dev_get_phys_port_name(struct net_device *dev,
char *name, size_t len);
int dev_change_proto_down(struct net_device *dev, bool proto_down);
void dev_change_proto_down_reason(struct net_device *dev, unsigned long mask,
u32 value);
typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf);
int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
int fd, int expected_fd, u32 flags);
int dev_change_tx_queue_len(struct net_device *dev, unsigned long new_len);
void dev_set_group(struct net_device *dev, int new_group);
int dev_change_carrier(struct net_device *dev, bool new_carrier);
void __dev_set_rx_mode(struct net_device *dev);
void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
unsigned int gchanges, u32 portid,
const struct nlmsghdr *nlh);
void unregister_netdevice_many_notify(struct list_head *head,
u32 portid, const struct nlmsghdr *nlh);
static inline void netif_set_gso_max_size(struct net_device *dev,
unsigned int size)
{
/* dev->gso_max_size is read locklessly from sk_setup_caps() */
WRITE_ONCE(dev->gso_max_size, size);
if (size <= GSO_LEGACY_MAX_SIZE)
WRITE_ONCE(dev->gso_ipv4_max_size, size);
}
static inline void netif_set_gso_max_segs(struct net_device *dev,
unsigned int segs)
{
/* dev->gso_max_segs is read locklessly from sk_setup_caps() */
WRITE_ONCE(dev->gso_max_segs, segs);
}
static inline void netif_set_gro_max_size(struct net_device *dev,
unsigned int size)
{
/* This pairs with the READ_ONCE() in skb_gro_receive() */
WRITE_ONCE(dev->gro_max_size, size);
if (size <= GRO_LEGACY_MAX_SIZE)
WRITE_ONCE(dev->gro_ipv4_max_size, size);
}
static inline void netif_set_gso_ipv4_max_size(struct net_device *dev,
unsigned int size)
{
/* dev->gso_ipv4_max_size is read locklessly from sk_setup_caps() */
WRITE_ONCE(dev->gso_ipv4_max_size, size);
}
static inline void netif_set_gro_ipv4_max_size(struct net_device *dev,
unsigned int size)
{
/* This pairs with the READ_ONCE() in skb_gro_receive() */
WRITE_ONCE(dev->gro_ipv4_max_size, size);
}
/**
* napi_get_defer_hard_irqs - get the NAPI's defer_hard_irqs
* @n: napi struct to get the defer_hard_irqs field from
*
* Return: the per-NAPI value of the defar_hard_irqs field.
*/
static inline u32 napi_get_defer_hard_irqs(const struct napi_struct *n)
{
return READ_ONCE(n->defer_hard_irqs);
}
/**
* napi_set_defer_hard_irqs - set the defer_hard_irqs for a napi
* @n: napi_struct to set the defer_hard_irqs field
* @defer: the value the field should be set to
*/
static inline void napi_set_defer_hard_irqs(struct napi_struct *n, u32 defer)
{
WRITE_ONCE(n->defer_hard_irqs, defer);
}
/**
* netdev_set_defer_hard_irqs - set defer_hard_irqs for all NAPIs of a netdev
* @netdev: the net_device for which all NAPIs will have defer_hard_irqs set
* @defer: the defer_hard_irqs value to set
*/
static inline void netdev_set_defer_hard_irqs(struct net_device *netdev,
u32 defer)
{
unsigned int count = max(netdev->num_rx_queues,
netdev->num_tx_queues);
struct napi_struct *napi;
int i;
WRITE_ONCE(netdev->napi_defer_hard_irqs, defer);
list_for_each_entry(napi, &netdev->napi_list, dev_list)
napi_set_defer_hard_irqs(napi, defer);
for (i = 0; i < count; i++)
netdev->napi_config[i].defer_hard_irqs = defer;
}
/**
* napi_get_gro_flush_timeout - get the gro_flush_timeout
* @n: napi struct to get the gro_flush_timeout from
*
* Return: the per-NAPI value of the gro_flush_timeout field.
*/
static inline unsigned long
napi_get_gro_flush_timeout(const struct napi_struct *n)
{
return READ_ONCE(n->gro_flush_timeout);
}
/**
* napi_set_gro_flush_timeout - set the gro_flush_timeout for a napi
* @n: napi struct to set the gro_flush_timeout
* @timeout: timeout value to set
*
* napi_set_gro_flush_timeout sets the per-NAPI gro_flush_timeout
*/
static inline void napi_set_gro_flush_timeout(struct napi_struct *n,
unsigned long timeout)
{
WRITE_ONCE(n->gro_flush_timeout, timeout);
}
/**
* netdev_set_gro_flush_timeout - set gro_flush_timeout of a netdev's NAPIs
* @netdev: the net_device for which all NAPIs will have gro_flush_timeout set
* @timeout: the timeout value to set
*/
static inline void netdev_set_gro_flush_timeout(struct net_device *netdev,
unsigned long timeout)
{
unsigned int count = max(netdev->num_rx_queues,
netdev->num_tx_queues);
struct napi_struct *napi;
int i;
WRITE_ONCE(netdev->gro_flush_timeout, timeout);
list_for_each_entry(napi, &netdev->napi_list, dev_list)
napi_set_gro_flush_timeout(napi, timeout);
for (i = 0; i < count; i++)
netdev->napi_config[i].gro_flush_timeout = timeout;
}
/**
* napi_get_irq_suspend_timeout - get the irq_suspend_timeout
* @n: napi struct to get the irq_suspend_timeout from
*
* Return: the per-NAPI value of the irq_suspend_timeout field.
*/
static inline unsigned long
napi_get_irq_suspend_timeout(const struct napi_struct *n)
{
return READ_ONCE(n->irq_suspend_timeout);
}
/**
* napi_set_irq_suspend_timeout - set the irq_suspend_timeout for a napi
* @n: napi struct to set the irq_suspend_timeout
* @timeout: timeout value to set
*
* napi_set_irq_suspend_timeout sets the per-NAPI irq_suspend_timeout
*/
static inline void napi_set_irq_suspend_timeout(struct napi_struct *n,
unsigned long timeout)
{
WRITE_ONCE(n->irq_suspend_timeout, timeout);
}
int rps_cpumask_housekeeping(struct cpumask *mask);
#if defined(CONFIG_DEBUG_NET) && defined(CONFIG_BPF_SYSCALL)
void xdp_do_check_flushed(struct napi_struct *napi);
#else
static inline void xdp_do_check_flushed(struct napi_struct *napi) { }
#endif
void kick_defer_list_purge(struct softnet_data *sd, unsigned int cpu);
#define XMIT_RECURSION_LIMIT 8
#ifndef CONFIG_PREEMPT_RT
static inline bool dev_xmit_recursion(void)
{
return unlikely(__this_cpu_read(softnet_data.xmit.recursion) >
XMIT_RECURSION_LIMIT);
}
static inline void dev_xmit_recursion_inc(void)
{
__this_cpu_inc(softnet_data.xmit.recursion);
}
static inline void dev_xmit_recursion_dec(void)
{
__this_cpu_dec(softnet_data.xmit.recursion);
}
#else
static inline bool dev_xmit_recursion(void)
{
return unlikely(current->net_xmit.recursion > XMIT_RECURSION_LIMIT);
}
static inline void dev_xmit_recursion_inc(void)
{
current->net_xmit.recursion++;
}
static inline void dev_xmit_recursion_dec(void)
{
current->net_xmit.recursion--;
}
#endif
int dev_set_hwtstamp_phylib(struct net_device *dev,
struct kernel_hwtstamp_config *cfg,
struct netlink_ext_ack *extack);
#endif