Contributors: 24
Author |
Tokens |
Token Proportion |
Commits |
Commit Proportion |
Saeed Mahameed |
219 |
40.11% |
10 |
26.32% |
Eli Cohen |
179 |
32.78% |
2 |
5.26% |
Tariq Toukan |
33 |
6.04% |
1 |
2.63% |
Artemy Kovalyov |
17 |
3.11% |
1 |
2.63% |
Yuval Avnery |
17 |
3.11% |
3 |
7.89% |
Matan Barak |
17 |
3.11% |
1 |
2.63% |
Eran Ben Elisha |
12 |
2.20% |
2 |
5.26% |
Moni Shoua |
10 |
1.83% |
1 |
2.63% |
Shay Drory |
9 |
1.65% |
1 |
2.63% |
Aviv Heller |
4 |
0.73% |
2 |
5.26% |
Allen Pais |
4 |
0.73% |
1 |
2.63% |
Maor Gottlieb |
3 |
0.55% |
1 |
2.63% |
Daniel Jurgens |
3 |
0.55% |
1 |
2.63% |
Mark Bloch |
3 |
0.55% |
1 |
2.63% |
Majd Dibbiny |
3 |
0.55% |
1 |
2.63% |
Mohamad Haj Yahia |
2 |
0.37% |
1 |
2.63% |
Doron Tsur |
2 |
0.37% |
1 |
2.63% |
Feras Daoud |
2 |
0.37% |
1 |
2.63% |
Eugenia Emantayev |
2 |
0.37% |
1 |
2.63% |
Leon Romanovsky |
1 |
0.18% |
1 |
2.63% |
Yishai Hadas |
1 |
0.18% |
1 |
2.63% |
Greg Kroah-Hartman |
1 |
0.18% |
1 |
2.63% |
Maher Sanalla |
1 |
0.18% |
1 |
2.63% |
Jack Morgenstein |
1 |
0.18% |
1 |
2.63% |
Total |
546 |
|
38 |
|
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2018-2021, Mellanox Technologies inc. All rights reserved. */
#ifndef __LIB_MLX5_EQ_H__
#define __LIB_MLX5_EQ_H__
#include <linux/mlx5/driver.h>
#include <linux/mlx5/eq.h>
#include <linux/mlx5/cq.h>
#define MLX5_EQE_SIZE (sizeof(struct mlx5_eqe))
struct mlx5_eq_tasklet {
struct list_head list;
struct list_head process_list;
struct tasklet_struct task;
spinlock_t lock; /* lock completion tasklet list */
};
struct mlx5_cq_table {
spinlock_t lock; /* protect radix tree */
struct radix_tree_root tree;
};
struct mlx5_eq {
struct mlx5_frag_buf_ctrl fbc;
struct mlx5_frag_buf frag_buf;
struct mlx5_core_dev *dev;
struct mlx5_cq_table cq_table;
__be32 __iomem *doorbell;
u32 cons_index;
unsigned int vecidx;
unsigned int irqn;
u8 eqn;
struct mlx5_rsc_debug *dbg;
struct mlx5_irq *irq;
};
struct mlx5_eq_async {
struct mlx5_eq core;
struct notifier_block irq_nb;
spinlock_t lock; /* To avoid irq EQ handle races with resiliency flows */
};
struct mlx5_eq_comp {
struct mlx5_eq core;
struct notifier_block irq_nb;
struct mlx5_eq_tasklet tasklet_ctx;
struct list_head list;
};
static inline u32 eq_get_size(struct mlx5_eq *eq)
{
return eq->fbc.sz_m1 + 1;
}
static inline struct mlx5_eqe *get_eqe(struct mlx5_eq *eq, u32 entry)
{
return mlx5_frag_buf_get_wqe(&eq->fbc, entry);
}
static inline struct mlx5_eqe *next_eqe_sw(struct mlx5_eq *eq)
{
struct mlx5_eqe *eqe = get_eqe(eq, eq->cons_index & eq->fbc.sz_m1);
return (eqe->owner ^ (eq->cons_index >> eq->fbc.log_sz)) & 1 ? NULL : eqe;
}
static inline void eq_update_ci(struct mlx5_eq *eq, int arm)
{
__be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2);
u32 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24);
__raw_writel((__force u32)cpu_to_be32(val), addr);
/* We still want ordering, just not swabbing, so add a barrier */
mb();
}
int mlx5_eq_table_init(struct mlx5_core_dev *dev);
void mlx5_eq_table_cleanup(struct mlx5_core_dev *dev);
int mlx5_eq_table_create(struct mlx5_core_dev *dev);
void mlx5_eq_table_destroy(struct mlx5_core_dev *dev);
int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq);
void mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq);
struct mlx5_eq_comp *mlx5_eqn2comp_eq(struct mlx5_core_dev *dev, int eqn);
struct mlx5_eq *mlx5_get_async_eq(struct mlx5_core_dev *dev);
void mlx5_cq_tasklet_cb(struct tasklet_struct *t);
u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq_comp *eq);
void mlx5_cmd_eq_recover(struct mlx5_core_dev *dev);
void mlx5_eq_synchronize_async_irq(struct mlx5_core_dev *dev);
void mlx5_eq_synchronize_cmd_irq(struct mlx5_core_dev *dev);
int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
void mlx5_eq_debugfs_init(struct mlx5_core_dev *dev);
void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev);
/* This function should only be called after mlx5_cmd_force_teardown_hca */
void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev);
#ifdef CONFIG_RFS_ACCEL
struct cpu_rmap *mlx5_eq_table_get_rmap(struct mlx5_core_dev *dev);
#endif
int mlx5_comp_irqn_get(struct mlx5_core_dev *dev, int vector, unsigned int *irqn);
#endif