Contributors: 12
Author |
Tokens |
Token Proportion |
Commits |
Commit Proportion |
Jianbo Liu |
179 |
57.01% |
5 |
20.00% |
Leon Romanovsky |
51 |
16.24% |
8 |
32.00% |
Huy Nguyen |
24 |
7.64% |
2 |
8.00% |
Maor Gottlieb |
20 |
6.37% |
2 |
8.00% |
Raed Salem |
10 |
3.18% |
1 |
4.00% |
Paul Blakey |
10 |
3.18% |
1 |
4.00% |
Ilan Tayari |
8 |
2.55% |
1 |
4.00% |
Maxim Mikityanskiy |
4 |
1.27% |
1 |
4.00% |
Amir Vadai |
3 |
0.96% |
1 |
4.00% |
Mark Bloch |
2 |
0.64% |
1 |
4.00% |
Aviv Heller |
2 |
0.64% |
1 |
4.00% |
Yishai Hadas |
1 |
0.32% |
1 |
4.00% |
Total |
314 |
|
25 |
|
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
#ifndef __MLX5_ESW_IPSEC_FS_H__
#define __MLX5_ESW_IPSEC_FS_H__
struct mlx5e_ipsec;
struct mlx5e_ipsec_sa_entry;
#ifdef CONFIG_MLX5_ESWITCH
void mlx5_esw_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx);
int mlx5_esw_ipsec_rx_status_create(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx,
struct mlx5_flow_destination *dest);
void mlx5_esw_ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx_create_attr *attr);
int mlx5_esw_ipsec_rx_status_pass_dest_get(struct mlx5e_ipsec *ipsec,
struct mlx5_flow_destination *dest);
int mlx5_esw_ipsec_rx_setup_modify_header(struct mlx5e_ipsec_sa_entry *sa_entry,
struct mlx5_flow_act *flow_act);
void mlx5_esw_ipsec_rx_id_mapping_remove(struct mlx5e_ipsec_sa_entry *sa_entry);
int mlx5_esw_ipsec_rx_ipsec_obj_id_search(struct mlx5e_priv *priv, u32 id,
u32 *ipsec_obj_id);
void mlx5_esw_ipsec_tx_create_attr_set(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_tx_create_attr *attr);
void mlx5_esw_ipsec_restore_dest_uplink(struct mlx5_core_dev *mdev);
#else
static inline void mlx5_esw_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx) {}
static inline int mlx5_esw_ipsec_rx_status_create(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx,
struct mlx5_flow_destination *dest)
{
return -EINVAL;
}
static inline void mlx5_esw_ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx_create_attr *attr) {}
static inline int mlx5_esw_ipsec_rx_status_pass_dest_get(struct mlx5e_ipsec *ipsec,
struct mlx5_flow_destination *dest)
{
return -EINVAL;
}
static inline int mlx5_esw_ipsec_rx_setup_modify_header(struct mlx5e_ipsec_sa_entry *sa_entry,
struct mlx5_flow_act *flow_act)
{
return -EINVAL;
}
static inline void mlx5_esw_ipsec_rx_id_mapping_remove(struct mlx5e_ipsec_sa_entry *sa_entry) {}
static inline int mlx5_esw_ipsec_rx_ipsec_obj_id_search(struct mlx5e_priv *priv, u32 id,
u32 *ipsec_obj_id)
{
return -EINVAL;
}
static inline void mlx5_esw_ipsec_tx_create_attr_set(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_tx_create_attr *attr) {}
static inline void mlx5_esw_ipsec_restore_dest_uplink(struct mlx5_core_dev *mdev) {}
#endif /* CONFIG_MLX5_ESWITCH */
#endif /* __MLX5_ESW_IPSEC_FS_H__ */