Contributors: 18
Author |
Tokens |
Token Proportion |
Commits |
Commit Proportion |
Vlad Buslov |
165 |
47.69% |
4 |
11.76% |
Saeed Mahameed |
42 |
12.14% |
6 |
17.65% |
Hadar Hen Zion |
31 |
8.96% |
1 |
2.94% |
Amir Vadai |
26 |
7.51% |
2 |
5.88% |
Ariel Levkovich |
12 |
3.47% |
1 |
2.94% |
Gal Pressman |
11 |
3.18% |
1 |
2.94% |
Roi Dayan |
10 |
2.89% |
1 |
2.94% |
Paul Blakey |
9 |
2.60% |
4 |
11.76% |
Oz Shlomo |
9 |
2.60% |
2 |
5.88% |
Jiri Pirko |
7 |
2.02% |
3 |
8.82% |
Maor Gottlieb |
6 |
1.73% |
1 |
2.94% |
Or Gerlitz |
4 |
1.16% |
2 |
5.88% |
Tariq Toukan |
4 |
1.16% |
1 |
2.94% |
Bodong Wang |
3 |
0.87% |
1 |
2.94% |
Eli Britstein |
2 |
0.58% |
1 |
2.94% |
Martin KaFai Lau |
2 |
0.58% |
1 |
2.94% |
Eran Ben Elisha |
2 |
0.58% |
1 |
2.94% |
Pablo Neira Ayuso |
1 |
0.29% |
1 |
2.94% |
Total |
346 |
|
34 |
|
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2020 Mellanox Technologies. */
#ifndef __MLX5_EN_REP_TC_H__
#define __MLX5_EN_REP_TC_H__
#include <linux/skbuff.h>
#include "en_tc.h"
#include "en_rep.h"
#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
int mlx5e_rep_tc_init(struct mlx5e_rep_priv *rpriv);
void mlx5e_rep_tc_cleanup(struct mlx5e_rep_priv *rpriv);
int mlx5e_rep_tc_netdevice_event_register(struct mlx5e_rep_priv *rpriv);
void mlx5e_rep_tc_netdevice_event_unregister(struct mlx5e_rep_priv *rpriv);
void mlx5e_rep_tc_enable(struct mlx5e_priv *priv);
void mlx5e_rep_tc_disable(struct mlx5e_priv *priv);
int mlx5e_rep_tc_event_port_affinity(struct mlx5e_priv *priv);
void mlx5e_rep_update_flows(struct mlx5e_priv *priv,
struct mlx5e_encap_entry *e,
bool neigh_connected,
unsigned char ha[ETH_ALEN]);
int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv,
struct mlx5e_encap_entry *e,
struct mlx5e_neigh *m_neigh,
struct net_device *neigh_dev);
void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv,
struct mlx5e_encap_entry *e);
int mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data);
void mlx5e_rep_tc_receive(struct mlx5_cqe64 *cqe, struct mlx5e_rq *rq,
struct sk_buff *skb);
#else /* CONFIG_MLX5_CLS_ACT */
struct mlx5e_rep_priv;
static inline int
mlx5e_rep_tc_init(struct mlx5e_rep_priv *rpriv) { return 0; }
static inline void
mlx5e_rep_tc_cleanup(struct mlx5e_rep_priv *rpriv) {}
static inline int
mlx5e_rep_tc_netdevice_event_register(struct mlx5e_rep_priv *rpriv) { return 0; }
static inline void
mlx5e_rep_tc_netdevice_event_unregister(struct mlx5e_rep_priv *rpriv) {}
static inline void
mlx5e_rep_tc_enable(struct mlx5e_priv *priv) {}
static inline void
mlx5e_rep_tc_disable(struct mlx5e_priv *priv) {}
static inline int
mlx5e_rep_tc_event_port_affinity(struct mlx5e_priv *priv) { return NOTIFY_DONE; }
static inline int
mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data) { return -EOPNOTSUPP; }
static inline void
mlx5e_rep_tc_receive(struct mlx5_cqe64 *cqe, struct mlx5e_rq *rq,
struct sk_buff *skb) { napi_gro_receive(rq->cq.napi, skb); }
#endif /* CONFIG_MLX5_CLS_ACT */
#endif /* __MLX5_EN_REP_TC_H__ */