Contributors: 7
Author Tokens Token Proportion Commits Commit Proportion
Roi Dayan 180 37.34% 4 16.00%
Mark Bloch 139 28.84% 11 44.00%
Eli Cohen 119 24.69% 3 12.00%
Jianbo Liu 17 3.53% 1 4.00%
Maor Gottlieb 13 2.70% 4 16.00%
Changcheng Liu 7 1.45% 1 4.00%
Erez Alfasi 7 1.45% 1 4.00%
Total 482 25


/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2019 Mellanox Technologies. */

#ifndef __MLX5_LAG_H__
#define __MLX5_LAG_H__

#include <linux/debugfs.h>

#define MLX5_LAG_MAX_HASH_BUCKETS 16
#include "mlx5_core.h"
#include "mp.h"
#include "port_sel.h"
#include "mpesw.h"

enum {
	MLX5_LAG_P1,
	MLX5_LAG_P2,
};

enum {
	MLX5_LAG_FLAG_NDEVS_READY,
};

enum {
	MLX5_LAG_MODE_FLAG_HASH_BASED,
	MLX5_LAG_MODE_FLAG_SHARED_FDB,
	MLX5_LAG_MODE_FLAG_FDB_SEL_MODE_NATIVE,
};

enum mlx5_lag_mode {
	MLX5_LAG_MODE_NONE,
	MLX5_LAG_MODE_ROCE,
	MLX5_LAG_MODE_SRIOV,
	MLX5_LAG_MODE_MULTIPATH,
	MLX5_LAG_MODE_MPESW,
};

struct lag_func {
	struct mlx5_core_dev *dev;
	struct net_device    *netdev;
	bool has_drop;
};

/* Used for collection of netdev event info. */
struct lag_tracker {
	enum   netdev_lag_tx_type           tx_type;
	struct netdev_lag_lower_state_info  netdev_state[MLX5_MAX_PORTS];
	unsigned int is_bonded:1;
	unsigned int has_inactive:1;
	enum netdev_lag_hash hash_type;
};

enum mpesw_op {
	MLX5_MPESW_OP_ENABLE,
	MLX5_MPESW_OP_DISABLE,
};

struct mlx5_mpesw_work_st {
	struct work_struct work;
	struct mlx5_lag    *lag;
	enum mpesw_op	   op;
	struct completion  comp;
	int result;
};

/* LAG data of a ConnectX card.
 * It serves both its phys functions.
 */
struct mlx5_lag {
	enum mlx5_lag_mode        mode;
	unsigned long		  mode_flags;
	unsigned long		  state_flags;
	u8			  ports;
	u8			  buckets;
	int			  mode_changes_in_progress;
	u8			  v2p_map[MLX5_MAX_PORTS * MLX5_LAG_MAX_HASH_BUCKETS];
	struct kref               ref;
	struct lag_func           pf[MLX5_MAX_PORTS];
	struct lag_tracker        tracker;
	struct workqueue_struct   *wq;
	struct delayed_work       bond_work;
	struct notifier_block     nb;
	struct lag_mp             lag_mp;
	struct mlx5_lag_port_sel  port_sel;
	/* Protect lag fields/state changes */
	struct mutex		  lock;
	struct lag_mpesw	  lag_mpesw;
};

static inline bool mlx5_is_lag_supported(struct mlx5_core_dev *dev)
{
	if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
	    !MLX5_CAP_GEN(dev, lag_master) ||
	    MLX5_CAP_GEN(dev, num_lag_ports) < 2 ||
	    MLX5_CAP_GEN(dev, num_lag_ports) > MLX5_MAX_PORTS)
		return false;
	return true;
}

static inline struct mlx5_lag *
mlx5_lag_dev(struct mlx5_core_dev *dev)
{
	return dev->priv.lag;
}

static inline bool
__mlx5_lag_is_active(struct mlx5_lag *ldev)
{
	return ldev->mode != MLX5_LAG_MODE_NONE;
}

static inline bool
mlx5_lag_is_ready(struct mlx5_lag *ldev)
{
	return test_bit(MLX5_LAG_FLAG_NDEVS_READY, &ldev->state_flags);
}

void mlx5_modify_lag(struct mlx5_lag *ldev,
		     struct lag_tracker *tracker);
int mlx5_activate_lag(struct mlx5_lag *ldev,
		      struct lag_tracker *tracker,
		      enum mlx5_lag_mode mode,
		      bool shared_fdb);
int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev,
				struct net_device *ndev);
bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev);
void mlx5_lag_del_mpesw_rule(struct mlx5_core_dev *dev);
int mlx5_lag_add_mpesw_rule(struct mlx5_core_dev *dev);

char *mlx5_get_str_port_sel_mode(enum mlx5_lag_mode mode, unsigned long flags);
void mlx5_infer_tx_enabled(struct lag_tracker *tracker, u8 num_ports,
			   u8 *ports, int *num_enabled);

void mlx5_ldev_add_debugfs(struct mlx5_core_dev *dev);
void mlx5_ldev_remove_debugfs(struct dentry *dbg);
void mlx5_disable_lag(struct mlx5_lag *ldev);

#endif /* __MLX5_LAG_H__ */