Release 4.18 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
/*
* Copyright (c) 2015, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/irq.h>
#include "en.h"
static inline bool mlx5e_channel_no_affinity_change(struct mlx5e_channel *c)
{
int current_cpu = smp_processor_id();
const struct cpumask *aff;
struct irq_data *idata;
idata = irq_desc_get_irq_data(c->irq_desc);
aff = irq_data_get_affinity_mask(idata);
return cpumask_test_cpu(current_cpu, aff);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tariq Toukan | 53 | 100.00% | 1 | 100.00% |
Total | 53 | 100.00% | 1 | 100.00% |
static void mlx5e_handle_tx_dim(struct mlx5e_txqsq *sq)
{
struct mlx5e_sq_stats *stats = sq->stats;
struct net_dim_sample dim_sample;
if (unlikely(!test_bit(MLX5E_SQ_STATE_AM, &sq->state)))
return;
net_dim_sample(sq->cq.event_ctr, stats->packets, stats->bytes,
&dim_sample);
net_dim(&sq->dim, dim_sample);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tal Gilboa | 56 | 78.87% | 1 | 33.33% |
Eran Ben Elisha | 11 | 15.49% | 1 | 33.33% |
Gal Pressman | 4 | 5.63% | 1 | 33.33% |
Total | 71 | 100.00% | 3 | 100.00% |
static void mlx5e_handle_rx_dim(struct mlx5e_rq *rq)
{
struct mlx5e_rq_stats *stats = rq->stats;
struct net_dim_sample dim_sample;
if (unlikely(!test_bit(MLX5E_RQ_STATE_AM, &rq->state)))
return;
net_dim_sample(rq->cq.event_ctr, stats->packets, stats->bytes,
&dim_sample);
net_dim(&rq->dim, dim_sample);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tal Gilboa | 56 | 78.87% | 1 | 33.33% |
Eran Ben Elisha | 11 | 15.49% | 1 | 33.33% |
Gal Pressman | 4 | 5.63% | 1 | 33.33% |
Total | 71 | 100.00% | 3 | 100.00% |
int mlx5e_napi_poll(struct napi_struct *napi, int budget)
{
struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel,
napi);
bool busy = false;
int work_done = 0;
int i;
for (i = 0; i < c->num_tc; i++)
busy |= mlx5e_poll_tx_cq(&c->sq[i].cq, budget);
if (c->xdp)
busy |= mlx5e_poll_xdpsq_cq(&c->rq.xdpsq.cq);
if (likely(budget)) { /* budget=0 means: don't poll rx rings */
work_done = mlx5e_poll_rx_cq(&c->rq.cq, budget);
busy |= work_done == budget;
}
busy |= c->rq.post_wqes(&c->rq);
if (busy) {
if (likely(mlx5e_channel_no_affinity_change(c)))
return budget;
if (budget && work_done == budget)
work_done--;
}
if (unlikely(!napi_complete_done(napi, work_done)))
return work_done;
for (i = 0; i < c->num_tc; i++) {
mlx5e_handle_tx_dim(&c->sq[i]);
mlx5e_cq_arm(&c->sq[i].cq);
}
mlx5e_handle_rx_dim(&c->rq);
mlx5e_cq_arm(&c->rq.cq);
mlx5e_cq_arm(&c->icosq.cq);
return work_done;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Amir Vadai | 123 | 48.24% | 1 | 5.88% |
Saeed Mahameed | 54 | 21.18% | 5 | 29.41% |
Tariq Toukan | 43 | 16.86% | 4 | 23.53% |
Andy Gospodarek | 11 | 4.31% | 2 | 11.76% |
Tal Gilboa | 10 | 3.92% | 1 | 5.88% |
Eric Dumazet | 8 | 3.14% | 1 | 5.88% |
Gil Rockah | 3 | 1.18% | 1 | 5.88% |
Jesper Dangaard Brouer | 2 | 0.78% | 1 | 5.88% |
Achiad Shochat | 1 | 0.39% | 1 | 5.88% |
Total | 255 | 100.00% | 17 | 100.00% |
void mlx5e_completion_event(struct mlx5_core_cq *mcq)
{
struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq);
cq->event_ctr++;
napi_schedule(cq->napi);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Amir Vadai | 32 | 86.49% | 1 | 50.00% |
Gil Rockah | 5 | 13.51% | 1 | 50.00% |
Total | 37 | 100.00% | 2 | 100.00% |
void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event)
{
struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq);
struct mlx5e_channel *c = cq->channel;
struct net_device *netdev = c->netdev;
netdev_err(netdev, "%s: cqn=0x%.6x event=0x%.2x\n",
__func__, mcq->cqn, event);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Amir Vadai | 61 | 98.39% | 1 | 50.00% |
Saeed Mahameed | 1 | 1.61% | 1 | 50.00% |
Total | 62 | 100.00% | 2 | 100.00% |
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Amir Vadai | 220 | 39.57% | 1 | 5.00% |
Tal Gilboa | 122 | 21.94% | 1 | 5.00% |
Tariq Toukan | 99 | 17.81% | 4 | 20.00% |
Saeed Mahameed | 55 | 9.89% | 6 | 30.00% |
Eran Ben Elisha | 22 | 3.96% | 1 | 5.00% |
Andy Gospodarek | 11 | 1.98% | 2 | 10.00% |
Gal Pressman | 8 | 1.44% | 1 | 5.00% |
Eric Dumazet | 8 | 1.44% | 1 | 5.00% |
Gil Rockah | 8 | 1.44% | 1 | 5.00% |
Jesper Dangaard Brouer | 2 | 0.36% | 1 | 5.00% |
Achiad Shochat | 1 | 0.18% | 1 | 5.00% |
Total | 556 | 100.00% | 20 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.