Contributors: 14
Author |
Tokens |
Token Proportion |
Commits |
Commit Proportion |
Lior Nahmanson |
226 |
62.26% |
4 |
15.38% |
Raed Salem |
38 |
10.47% |
2 |
7.69% |
Ilan Tayari |
30 |
8.26% |
1 |
3.85% |
Saeed Mahameed |
15 |
4.13% |
6 |
23.08% |
Amir Vadai |
14 |
3.86% |
2 |
7.69% |
Maor Gottlieb |
8 |
2.20% |
2 |
7.69% |
Eli Cohen |
8 |
2.20% |
1 |
3.85% |
Hadar Hen Zion |
7 |
1.93% |
1 |
3.85% |
Mark Bloch |
4 |
1.10% |
1 |
3.85% |
Huy Nguyen |
4 |
1.10% |
2 |
7.69% |
Or Gerlitz |
3 |
0.83% |
1 |
3.85% |
Fan Li |
2 |
0.55% |
1 |
3.85% |
Aviv Heller |
2 |
0.55% |
1 |
3.85% |
Boris Pismenny |
2 |
0.55% |
1 |
3.85% |
Total |
363 |
|
26 |
|
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
#ifndef __MLX5_EN_ACCEL_MACSEC_H__
#define __MLX5_EN_ACCEL_MACSEC_H__
#ifdef CONFIG_MLX5_EN_MACSEC
#include <linux/mlx5/driver.h>
#include <net/macsec.h>
#include <net/dst_metadata.h>
/* Bit31 - 30: MACsec marker, Bit15-0: MACsec id */
#define MLX5_MACEC_RX_FS_ID_MAX USHRT_MAX /* Must be power of two */
#define MLX5_MACSEC_RX_FS_ID_MASK MLX5_MACEC_RX_FS_ID_MAX
#define MLX5_MACSEC_METADATA_MARKER(metadata) ((((metadata) >> 30) & 0x3) == 0x1)
#define MLX5_MACSEC_RX_METADAT_HANDLE(metadata) ((metadata) & MLX5_MACSEC_RX_FS_ID_MASK)
struct mlx5e_priv;
struct mlx5e_macsec;
struct mlx5e_macsec_stats {
u64 macsec_rx_pkts;
u64 macsec_rx_bytes;
u64 macsec_rx_pkts_drop;
u64 macsec_rx_bytes_drop;
u64 macsec_tx_pkts;
u64 macsec_tx_bytes;
u64 macsec_tx_pkts_drop;
u64 macsec_tx_bytes_drop;
};
void mlx5e_macsec_build_netdev(struct mlx5e_priv *priv);
int mlx5e_macsec_init(struct mlx5e_priv *priv);
void mlx5e_macsec_cleanup(struct mlx5e_priv *priv);
bool mlx5e_macsec_handle_tx_skb(struct mlx5e_macsec *macsec, struct sk_buff *skb);
void mlx5e_macsec_tx_build_eseg(struct mlx5e_macsec *macsec,
struct sk_buff *skb,
struct mlx5_wqe_eth_seg *eseg);
static inline bool mlx5e_macsec_skb_is_offload(struct sk_buff *skb)
{
struct metadata_dst *md_dst = skb_metadata_dst(skb);
return md_dst && (md_dst->type == METADATA_MACSEC);
}
static inline bool mlx5e_macsec_is_rx_flow(struct mlx5_cqe64 *cqe)
{
return MLX5_MACSEC_METADATA_MARKER(be32_to_cpu(cqe->ft_metadata));
}
void mlx5e_macsec_offload_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb,
struct mlx5_cqe64 *cqe);
bool mlx5e_is_macsec_device(const struct mlx5_core_dev *mdev);
void mlx5e_macsec_get_stats_fill(struct mlx5e_macsec *macsec, void *macsec_stats);
struct mlx5e_macsec_stats *mlx5e_macsec_get_stats(struct mlx5e_macsec *macsec);
#else
static inline void mlx5e_macsec_build_netdev(struct mlx5e_priv *priv) {}
static inline int mlx5e_macsec_init(struct mlx5e_priv *priv) { return 0; }
static inline void mlx5e_macsec_cleanup(struct mlx5e_priv *priv) {}
static inline bool mlx5e_macsec_skb_is_offload(struct sk_buff *skb) { return false; }
static inline bool mlx5e_macsec_is_rx_flow(struct mlx5_cqe64 *cqe) { return false; }
static inline void mlx5e_macsec_offload_handle_rx_skb(struct net_device *netdev,
struct sk_buff *skb,
struct mlx5_cqe64 *cqe)
{}
static inline bool mlx5e_is_macsec_device(const struct mlx5_core_dev *mdev) { return false; }
#endif /* CONFIG_MLX5_EN_MACSEC */
#endif /* __MLX5_ACCEL_EN_MACSEC_H__ */