Contributors: 17
Author |
Tokens |
Token Proportion |
Commits |
Commit Proportion |
Eric Dumazet |
125 |
30.34% |
4 |
10.53% |
David S. Miller |
87 |
21.12% |
8 |
21.05% |
Linus Torvalds (pre-git) |
83 |
20.15% |
9 |
23.68% |
Alexey Dobriyan |
49 |
11.89% |
1 |
2.63% |
Hideaki Yoshifuji / 吉藤英明 |
17 |
4.13% |
2 |
5.26% |
Eric W. Biedermann |
13 |
3.16% |
2 |
5.26% |
Herbert Xu |
11 |
2.67% |
2 |
5.26% |
Alexey Kuznetsov |
6 |
1.46% |
1 |
2.63% |
Julian Anastasov |
4 |
0.97% |
1 |
2.63% |
Daniel Lezcano |
4 |
0.97% |
1 |
2.63% |
Hangbin Liu |
3 |
0.73% |
1 |
2.63% |
Paul Mundt |
3 |
0.73% |
1 |
2.63% |
Christoph Lameter |
2 |
0.49% |
1 |
2.63% |
Tejun Heo |
2 |
0.49% |
1 |
2.63% |
Steffen Klassert |
1 |
0.24% |
1 |
2.63% |
Greg Kroah-Hartman |
1 |
0.24% |
1 |
2.63% |
Jon Maxwell |
1 |
0.24% |
1 |
2.63% |
Total |
412 |
|
38 |
|
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _NET_DST_OPS_H
#define _NET_DST_OPS_H
#include <linux/types.h>
#include <linux/percpu_counter.h>
#include <linux/cache.h>
struct dst_entry;
struct kmem_cachep;
struct net_device;
struct sk_buff;
struct sock;
struct net;
struct dst_ops {
unsigned short family;
unsigned int gc_thresh;
void (*gc)(struct dst_ops *ops);
struct dst_entry * (*check)(struct dst_entry *, __u32 cookie);
unsigned int (*default_advmss)(const struct dst_entry *);
unsigned int (*mtu)(const struct dst_entry *);
u32 * (*cow_metrics)(struct dst_entry *, unsigned long);
void (*destroy)(struct dst_entry *);
void (*ifdown)(struct dst_entry *,
struct net_device *dev);
struct dst_entry * (*negative_advice)(struct dst_entry *);
void (*link_failure)(struct sk_buff *);
void (*update_pmtu)(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb, u32 mtu,
bool confirm_neigh);
void (*redirect)(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb);
int (*local_out)(struct net *net, struct sock *sk, struct sk_buff *skb);
struct neighbour * (*neigh_lookup)(const struct dst_entry *dst,
struct sk_buff *skb,
const void *daddr);
void (*confirm_neigh)(const struct dst_entry *dst,
const void *daddr);
struct kmem_cache *kmem_cachep;
struct percpu_counter pcpuc_entries ____cacheline_aligned_in_smp;
};
static inline int dst_entries_get_fast(struct dst_ops *dst)
{
return percpu_counter_read_positive(&dst->pcpuc_entries);
}
static inline int dst_entries_get_slow(struct dst_ops *dst)
{
return percpu_counter_sum_positive(&dst->pcpuc_entries);
}
#define DST_PERCPU_COUNTER_BATCH 32
static inline void dst_entries_add(struct dst_ops *dst, int val)
{
percpu_counter_add_batch(&dst->pcpuc_entries, val,
DST_PERCPU_COUNTER_BATCH);
}
static inline int dst_entries_init(struct dst_ops *dst)
{
return percpu_counter_init(&dst->pcpuc_entries, 0, GFP_KERNEL);
}
static inline void dst_entries_destroy(struct dst_ops *dst)
{
percpu_counter_destroy(&dst->pcpuc_entries);
}
#endif