cregit-Linux how code gets into the kernel

Release 4.8 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c

/*
 * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */

#include <linux/tcp.h>
#include <linux/if_vlan.h>
#include "en.h"


#define MLX5E_SQ_NOPS_ROOM  MLX5_SEND_WQE_MAX_WQEBBS

#define MLX5E_SQ_STOP_ROOM (MLX5_SEND_WQE_MAX_WQEBBS +\
                            MLX5E_SQ_NOPS_ROOM)


void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw) { struct mlx5_wq_cyc *wq = &sq->wq; u16 pi = sq->pc & wq->sz_m1; struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi); struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; memset(cseg, 0, sizeof(*cseg)); cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_NOP); cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | 0x01); sq->skb[pi] = NULL; sq->pc++; sq->stats.nop++; if (notify_hw) { cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE; mlx5e_tx_notify_hw(sq, &wqe->ctrl, 0); } }

Contributors

PersonTokensPropCommitsCommitProp
saeed mahameedsaeed mahameed13691.89%125.00%
tariq toukantariq toukan106.76%250.00%
achiad shochatachiad shochat21.35%125.00%
Total148100.00%4100.00%


static inline void mlx5e_tx_dma_unmap(struct device *pdev, struct mlx5e_sq_dma *dma) { switch (dma->type) { case MLX5E_DMA_MAP_SINGLE: dma_unmap_single(pdev, dma->addr, dma->size, DMA_TO_DEVICE); break; case MLX5E_DMA_MAP_PAGE: dma_unmap_page(pdev, dma->addr, dma->size, DMA_TO_DEVICE); break; default: WARN_ONCE(true, "mlx5e_tx_dma_unmap unknown DMA type!\n"); } }

Contributors

PersonTokensPropCommitsCommitProp
achiad shochatachiad shochat4056.34%150.00%
amir vadaiamir vadai3143.66%150.00%
Total71100.00%2100.00%


static inline void mlx5e_dma_push(struct mlx5e_sq *sq, dma_addr_t addr, u32 size, enum mlx5e_dma_map_type map_type) { sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].addr = addr; sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].size = size; sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].type = map_type; sq->dma_fifo_pc++; }

Contributors

PersonTokensPropCommitsCommitProp
amir vadaiamir vadai5773.08%150.00%
achiad shochatachiad shochat2126.92%150.00%
Total78100.00%2100.00%


static inline struct mlx5e_sq_dma *mlx5e_dma_get(struct mlx5e_sq *sq, u32 i) { return &sq->dma_fifo[i & sq->dma_fifo_mask]; }

Contributors

PersonTokensPropCommitsCommitProp
amir vadaiamir vadai2376.67%150.00%
achiad shochatachiad shochat723.33%150.00%
Total30100.00%2100.00%


static void mlx5e_dma_unmap_wqe_err(struct mlx5e_sq *sq, u8 num_dma) { int i; for (i = 0; i < num_dma; i++) { struct mlx5e_sq_dma *last_pushed_dma = mlx5e_dma_get(sq, --sq->dma_fifo_pc); mlx5e_tx_dma_unmap(sq->pdev, last_pushed_dma); } }

Contributors

PersonTokensPropCommitsCommitProp
achiad shochatachiad shochat5089.29%266.67%
amir vadaiamir vadai610.71%133.33%
Total56100.00%3100.00%


u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, void *accel_priv, select_queue_fallback_t fallback) { struct mlx5e_priv *priv = netdev_priv(dev); int channel_ix = fallback(dev, skb); int up = 0; if (!netdev_get_num_tc(dev)) return channel_ix; if (skb_vlan_tag_present(skb)) up = skb->vlan_tci >> VLAN_PRIO_SHIFT; /* channel_ix can be larger than num_channels since * dev->num_real_tx_queues = num_channels * num_tc */ if (channel_ix >= priv->params.num_channels) channel_ix = reciprocal_scale(channel_ix, priv->params.num_channels); return priv->channeltc_to_txq_map[channel_ix][up]; }

Contributors

PersonTokensPropCommitsCommitProp
amir vadaiamir vadai5954.63%120.00%
rana shahoutrana shahout4037.04%240.00%
saeed mahameedsaeed mahameed98.33%240.00%
Total108100.00%5100.00%


static inline int mlx5e_skb_l2_header_offset(struct sk_buff *skb) { #define MLX5E_MIN_INLINE (ETH_HLEN + VLAN_HLEN) return max(skb_network_offset(skb), MLX5E_MIN_INLINE); }

Contributors

PersonTokensPropCommitsCommitProp
hadar hen zionhadar hen zion27100.00%1100.00%
Total27100.00%1100.00%


static inline int mlx5e_skb_l3_header_offset(struct sk_buff *skb) { struct flow_keys keys; if (skb_transport_header_was_set(skb)) return skb_transport_offset(skb); else if (skb_flow_dissect_flow_keys(skb, &keys, 0)) return keys.control.thoff; else return mlx5e_skb_l2_header_offset(skb); }

Contributors

PersonTokensPropCommitsCommitProp
hadar hen zionhadar hen zion56100.00%1100.00%
Total56100.00%1100.00%


static inline unsigned int mlx5e_calc_min_inline(enum mlx5_inline_modes mode, struct sk_buff *skb) { int hlen; switch (mode) { case MLX5_INLINE_MODE_TCP_UDP: hlen = eth_get_headlen(skb->data, skb_headlen(skb)); if (hlen == ETH_HLEN && !skb_vlan_tag_present(skb)) hlen += VLAN_HLEN; return hlen; case MLX5_INLINE_MODE_IP: /* When transport header is set to zero, it means no transport * header. When transport header is set to 0xff's, it means * transport header wasn't set. */ if (skb_transport_offset(skb)) return mlx5e_skb_l3_header_offset(skb); /* fall through */ case MLX5_INLINE_MODE_L2: default: return mlx5e_skb_l2_header_offset(skb); } }

Contributors

PersonTokensPropCommitsCommitProp
hadar hen zionhadar hen zion90100.00%1100.00%
Total90100.00%1100.00%


static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq, struct sk_buff *skb, bool bf) { /* Some NIC TX decisions, e.g loopback, are based on the packet * headers and occur before the data gather. * Therefore these headers must be copied into the WQE */ if (bf) { u16 ihs = skb_headlen(skb); if (skb_vlan_tag_present(skb)) ihs += VLAN_HLEN; if (ihs <= sq->max_inline) return skb_headlen(skb); } return mlx5e_calc_min_inline(sq->min_inline_mode, skb); }

Contributors

PersonTokensPropCommitsCommitProp
achiad shochatachiad shochat2231.43%233.33%
saeed mahameedsaeed mahameed2130.00%116.67%
amir vadaiamir vadai1927.14%116.67%
hadar hen zionhadar hen zion57.14%116.67%
matthew finlaymatthew finlay34.29%116.67%
Total70100.00%6100.00%


static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data, unsigned int *skb_len, unsigned int len) { *skb_len -= len; *skb_data += len; }

Contributors

PersonTokensPropCommitsCommitProp
achiad shochatachiad shochat32100.00%1100.00%
Total32100.00%1100.00%


static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs, unsigned char **skb_data, unsigned int *skb_len) { struct vlan_ethhdr *vhdr = (struct vlan_ethhdr *)start; int cpy1_sz = 2 * ETH_ALEN; int cpy2_sz = ihs - cpy1_sz; memcpy(vhdr, *skb_data, cpy1_sz); mlx5e_tx_skb_pull_inline(skb_data, skb_len, cpy1_sz); vhdr->h_vlan_proto = skb->vlan_proto; vhdr->h_vlan_TCI = cpu_to_be16(skb_vlan_tag_get(skb)); memcpy(&vhdr->h_vlan_encapsulated_proto, *skb_data, cpy2_sz); mlx5e_tx_skb_pull_inline(skb_data, skb_len, cpy2_sz); }

Contributors

PersonTokensPropCommitsCommitProp
achiad shochatachiad shochat117100.00%2100.00%
Total117100.00%2100.00%


static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) { struct mlx5_wq_cyc *wq = &sq->wq; u16 pi = sq->pc & wq->sz_m1; struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi); struct mlx5e_tx_wqe_info *wi = &sq->wqe_info[pi]; struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; struct mlx5_wqe_eth_seg *eseg = &wqe->eth; struct mlx5_wqe_data_seg *dseg; unsigned char *skb_data = skb->data; unsigned int skb_len = skb->len; u8 opcode = MLX5_OPCODE_SEND; dma_addr_t dma_addr = 0; unsigned int num_bytes; bool bf = false; u16 headlen; u16 ds_cnt; u16 ihs; int i; memset(wqe, 0, sizeof(*wqe)); if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM; if (skb->encapsulation) { eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM | MLX5_ETH_WQE_L4_INNER_CSUM; sq->stats.csum_partial_inner++; } else { eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM; } } else sq->stats.csum_none++; if (sq->cc != sq->prev_cc) { sq->prev_cc = sq->cc; sq->bf_budget = (sq->cc == sq->pc) ? MLX5E_SQ_BF_BUDGET : 0; } if (skb_is_gso(skb)) { eseg->mss = cpu_to_be16(skb_shinfo(skb)->gso_size); opcode = MLX5_OPCODE_LSO; if (skb->encapsulation) { ihs = skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb); sq->stats.tso_inner_packets++; sq->stats.tso_inner_bytes += skb->len - ihs; } else { ihs = skb_transport_offset(skb) + tcp_hdrlen(skb); sq->stats.tso_packets++; sq->stats.tso_bytes += skb->len - ihs; } num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs; } else { bf = sq->bf_budget && !skb->xmit_more && !skb_shinfo(skb)->nr_frags; ihs = mlx5e_get_inline_hdr_size(sq, skb, bf); num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN); } wi->num_bytes = num_bytes; if (skb_vlan_tag_present(skb)) { mlx5e_insert_vlan(eseg->inline_hdr_start, skb, ihs, &skb_data, &skb_len); ihs += VLAN_HLEN; } else { memcpy(eseg->inline_hdr_start, skb_data, ihs); mlx5e_tx_skb_pull_inline(&skb_data, &skb_len, ihs); } eseg->inline_hdr_sz = cpu_to_be16(ihs); ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS; ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr_start), MLX5_SEND_WQE_DS); dseg = (struct mlx5_wqe_data_seg *)cseg + ds_cnt; wi->num_dma = 0; headlen = skb_len - skb->data_len; if (headlen) { dma_addr = dma_map_single(sq->pdev, skb_data, headlen, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(sq->pdev, dma_addr))) goto dma_unmap_wqe_err; dseg->addr = cpu_to_be64(dma_addr); dseg->lkey = sq->mkey_be; dseg->byte_count = cpu_to_be32(headlen); mlx5e_dma_push(sq, dma_addr, headlen, MLX5E_DMA_MAP_SINGLE); wi->num_dma++; dseg++; } for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; int fsz = skb_frag_size(frag); dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(sq->pdev, dma_addr))) goto dma_unmap_wqe_err; dseg->addr = cpu_to_be64(dma_addr); dseg->lkey = sq->mkey_be; dseg->byte_count = cpu_to_be32(fsz); mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE); wi->num_dma++; dseg++; } ds_cnt += wi->num_dma; cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode); cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); sq->skb[pi] = skb; wi->num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); sq->pc += wi->num_wqebbs; netdev_tx_sent_queue(sq->txq, wi->num_bytes); if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; if (unlikely(!mlx5e_sq_has_room_for(sq, MLX5E_SQ_STOP_ROOM))) { netif_tx_stop_queue(sq->txq); sq->stats.stopped++; } sq->stats.xmit_more += skb->xmit_more; if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) { int bf_sz = 0; if (bf && test_bit(MLX5E_SQ_STATE_BF_ENABLE, &sq->state)) bf_sz = wi->num_wqebbs << 3; cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE; mlx5e_tx_notify_hw(sq, &wqe->ctrl, bf_sz); } /* fill sq edge with nops to avoid wqe wrap around */ while ((sq->pc & wq->sz_m1) > sq->edge) mlx5e_send_nop(sq, false); if (bf) sq->bf_budget--; sq->stats.packets++; sq->stats.bytes += num_bytes; return NETDEV_TX_OK; dma_unmap_wqe_err: sq->stats.dropped++; mlx5e_dma_unmap_wqe_err(sq, wi->num_dma); dev_kfree_skb_any(skb); return NETDEV_TX_OK; }

Contributors

PersonTokensPropCommitsCommitProp
amir vadaiamir vadai63862.92%15.56%
achiad shochatachiad shochat19218.93%633.33%
matthew finlaymatthew finlay868.48%211.11%
saeed mahameedsaeed mahameed313.06%211.11%
eran ben elishaeran ben elisha232.27%15.56%
gal pressmangal pressman201.97%211.11%
tariq toukantariq toukan131.28%211.11%
moshe lazermoshe lazer70.69%15.56%
eli coheneli cohen40.39%15.56%
Total1014100.00%18100.00%


netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev) { struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5e_sq *sq = priv->txq_to_sq_map[skb_get_queue_mapping(skb)]; return mlx5e_sq_xmit(sq, skb); }

Contributors

PersonTokensPropCommitsCommitProp
amir vadaiamir vadai4593.75%150.00%
saeed mahameedsaeed mahameed36.25%150.00%
Total48100.00%2100.00%


bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) { struct mlx5e_sq *sq; u32 dma_fifo_cc; u32 nbytes; u16 npkts; u16 sqcc; int i; sq = container_of(cq, struct mlx5e_sq, cq); if (unlikely(test_bit(MLX5E_SQ_STATE_FLUSH, &sq->state))) return false; npkts = 0; nbytes = 0; /* sq->cc must be updated only after mlx5_cqwq_update_db_record(), * otherwise a cq overrun may occur */ sqcc = sq->cc; /* avoid dirtying sq cache line every cqe */ dma_fifo_cc = sq->dma_fifo_cc; for (i = 0; i < MLX5E_TX_CQ_POLL_BUDGET; i++) { struct mlx5_cqe64 *cqe; u16 wqe_counter; bool last_wqe; cqe = mlx5e_get_cqe(cq); if (!cqe) break; mlx5_cqwq_pop(&cq->wq); wqe_counter = be16_to_cpu(cqe->wqe_counter); do { struct mlx5e_tx_wqe_info *wi; struct sk_buff *skb; u16 ci; int j; last_wqe = (sqcc == wqe_counter); ci = sqcc & sq->wq.sz_m1; skb = sq->skb[ci]; wi = &sq->wqe_info[ci]; if (unlikely(!skb)) { /* nop */ sqcc++; continue; } if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { struct skb_shared_hwtstamps hwts = {}; mlx5e_fill_hwstamp(sq->tstamp, get_cqe_ts(cqe), &hwts); skb_tstamp_tx(skb, &hwts); } for (j = 0; j < wi->num_dma; j++) { struct mlx5e_sq_dma *dma = mlx5e_dma_get(sq, dma_fifo_cc++); mlx5e_tx_dma_unmap(sq->pdev, dma); } npkts++; nbytes += wi->num_bytes; sqcc += wi->num_wqebbs; napi_consume_skb(skb, napi_budget); } while (!last_wqe); } mlx5_cqwq_update_db_record(&cq->wq); /* ensure cq space is freed before enabling more cqes */ wmb(); sq->dma_fifo_cc = dma_fifo_cc; sq->cc = sqcc; netdev_tx_completed_queue(sq->txq, npkts, nbytes); if (netif_tx_queue_stopped(sq->txq) && mlx5e_sq_has_room_for(sq, MLX5E_SQ_STOP_ROOM)) { netif_tx_wake_queue(sq->txq); sq->stats.wake++; } return (i == MLX5E_TX_CQ_POLL_BUDGET); }

Contributors

PersonTokensPropCommitsCommitProp
amir vadaiamir vadai24160.55%17.69%
achiad shochatachiad shochat8421.11%646.15%
eran ben elishaeran ben elisha4511.31%17.69%
daniel jurgensdaniel jurgens174.27%17.69%
jesper dangaard brouerjesper dangaard brouer61.51%17.69%
tariq toukantariq toukan30.75%17.69%
saeed mahameedsaeed mahameed20.50%215.38%
Total398100.00%13100.00%


void mlx5e_free_tx_descs(struct mlx5e_sq *sq) { struct mlx5e_tx_wqe_info *wi; struct sk_buff *skb; u16 ci; int i; while (sq->cc != sq->pc) { ci = sq->cc & sq->wq.sz_m1; skb = sq->skb[ci]; wi = &sq->wqe_info[ci]; if (!skb) { /* nop */ sq->cc++; continue; } for (i = 0; i < wi->num_dma; i++) { struct mlx5e_sq_dma *dma = mlx5e_dma_get(sq, sq->dma_fifo_cc++); mlx5e_tx_dma_unmap(sq->pdev, dma); } dev_kfree_skb_any(skb); sq->cc += wi->num_wqebbs; } }

Contributors

PersonTokensPropCommitsCommitProp
saeed mahameedsaeed mahameed137100.00%1100.00%
Total137100.00%1100.00%


Overall Contributors

PersonTokensPropCommitsCommitProp
amir vadaiamir vadai112845.16%12.94%
achiad shochatachiad shochat56722.70%1029.41%
saeed mahameedsaeed mahameed34713.89%617.65%
hadar hen zionhadar hen zion1787.13%12.94%
matthew finlaymatthew finlay903.60%38.82%
eran ben elishaeran ben elisha682.72%12.94%
rana shahoutrana shahout401.60%25.88%
tariq toukantariq toukan261.04%411.76%
gal pressmangal pressman200.80%25.88%
daniel jurgensdaniel jurgens170.68%12.94%
moshe lazermoshe lazer70.28%12.94%
jesper dangaard brouerjesper dangaard brouer60.24%12.94%
eli coheneli cohen40.16%12.94%
Total2498100.00%34100.00%
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.