cregit-Linux how code gets into the kernel

Release 4.7 drivers/infiniband/hw/hfi1/qp.c

/*
 * Copyright(c) 2015, 2016 Intel Corporation.
 *
 * This file is provided under a dual BSD/GPLv2 license.  When using or
 * redistributing this file, you may do so under either license.
 *
 * GPL LICENSE SUMMARY
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of version 2 of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * BSD LICENSE
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 *
 *  - Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 *  - Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in
 *    the documentation and/or other materials provided with the
 *    distribution.
 *  - Neither the name of Intel Corporation nor the names of its
 *    contributors may be used to endorse or promote products derived
 *    from this software without specific prior written permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 *
 */

#include <linux/err.h>
#include <linux/vmalloc.h>
#include <linux/hash.h>
#include <linux/module.h>
#include <linux/seq_file.h>
#include <rdma/rdma_vt.h>
#include <rdma/rdmavt_qp.h>

#include "hfi.h"
#include "qp.h"
#include "trace.h"
#include "verbs_txreq.h"


unsigned int hfi1_qp_table_size = 256;
module_param_named(qp_table_size, hfi1_qp_table_size, uint, S_IRUGO);
MODULE_PARM_DESC(qp_table_size, "QP table size");

static void flush_tx_list(struct rvt_qp *qp);
static int iowait_sleep(
	struct sdma_engine *sde,
	struct iowait *wait,
	struct sdma_txreq *stx,
	unsigned seq);
static void iowait_wakeup(struct iowait *wait, int reason);
static void iowait_sdma_drained(struct iowait *wait);
static void qp_pio_drain(struct rvt_qp *qp);


static inline unsigned mk_qpn(struct rvt_qpn_table *qpt, struct rvt_qpn_map *map, unsigned off) { return (map - qpt->map) * RVT_BITS_PER_PAGE + off; }

Contributors

PersonTokensPropCommitsCommitProp
mike marciniszynmike marciniszyn3090.91%150.00%
dennis dalessandrodennis dalessandro39.09%150.00%
Total33100.00%2100.00%

/* * Convert the AETH credit code into the number of credits. */ static const u16 credit_table[31] = { 0, /* 0 */ 1, /* 1 */ 2, /* 2 */ 3, /* 3 */ 4, /* 4 */ 6, /* 5 */ 8, /* 6 */ 12, /* 7 */ 16, /* 8 */ 24, /* 9 */ 32, /* A */ 48, /* B */ 64, /* C */ 96, /* D */ 128, /* E */ 192, /* F */ 256, /* 10 */ 384, /* 11 */ 512, /* 12 */ 768, /* 13 */ 1024, /* 14 */ 1536, /* 15 */ 2048, /* 16 */ 3072, /* 17 */ 4096, /* 18 */ 6144, /* 19 */ 8192, /* 1A */ 12288, /* 1B */ 16384, /* 1C */ 24576, /* 1D */ 32768 /* 1E */ };
static void flush_tx_list(struct rvt_qp *qp) { struct hfi1_qp_priv *priv = qp->priv; while (!list_empty(&priv->s_iowait.tx_head)) { struct sdma_txreq *tx; tx = list_first_entry( &priv->s_iowait.tx_head, struct sdma_txreq, list); list_del_init(&tx->list); hfi1_put_txreq( container_of(tx, struct verbs_txreq, txreq)); } }

Contributors

PersonTokensPropCommitsCommitProp
dennis dalessandrodennis dalessandro6279.49%266.67%
mike marciniszynmike marciniszyn1620.51%133.33%
Total78100.00%3100.00%


static void flush_iowait(struct rvt_qp *qp) { struct hfi1_qp_priv *priv = qp->priv; struct hfi1_ibdev *dev = to_idev(qp->ibqp.device); unsigned long flags; write_seqlock_irqsave(&dev->iowait_lock, flags); if (!list_empty(&priv->s_iowait.list)) { list_del_init(&priv->s_iowait.list); if (atomic_dec_and_test(&qp->refcount)) wake_up(&qp->wait); } write_sequnlock_irqrestore(&dev->iowait_lock, flags); }

Contributors

PersonTokensPropCommitsCommitProp
dennis dalessandrodennis dalessandro7069.31%266.67%
mike marciniszynmike marciniszyn3130.69%133.33%
Total101100.00%3100.00%


static inline int opa_mtu_enum_to_int(int mtu) { switch (mtu) { case OPA_MTU_8192: return 8192; case OPA_MTU_10240: return 10240; default: return -1; } }

Contributors

PersonTokensPropCommitsCommitProp
dennis dalessandrodennis dalessandro3296.97%266.67%
mike marciniszynmike marciniszyn13.03%133.33%
Total33100.00%3100.00%

/** * This function is what we would push to the core layer if we wanted to be a * "first class citizen". Instead we hide this here and rely on Verbs ULPs * to blindly pass the MTU enum value from the PathRecord to us. */
static inline int verbs_mtu_enum_to_int(struct ib_device *dev, enum ib_mtu mtu) { int val; /* Constraining 10KB packets to 8KB packets */ if (mtu == (enum ib_mtu)OPA_MTU_10240) mtu = OPA_MTU_8192; val = opa_mtu_enum_to_int((int)mtu); if (val > 0) return val; return ib_mtu_enum_to_int(mtu); }

Contributors

PersonTokensPropCommitsCommitProp
dennis dalessandrodennis dalessandro3966.10%250.00%
sebastian sanchezsebastian sanchez1932.20%125.00%
mike marciniszynmike marciniszyn11.69%125.00%
Total59100.00%4100.00%


int hfi1_check_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata) { struct ib_qp *ibqp = &qp->ibqp; struct hfi1_ibdev *dev = to_idev(ibqp->device); struct hfi1_devdata *dd = dd_from_dev(dev); u8 sc; if (attr_mask & IB_QP_AV) { sc = ah_to_sc(ibqp->device, &attr->ah_attr); if (sc == 0xf) return -EINVAL; if (!qp_to_sdma_engine(qp, sc) && dd->flags & HFI1_HAS_SEND_DMA) return -EINVAL; if (!qp_to_send_context(qp, sc)) return -EINVAL; } if (attr_mask & IB_QP_ALT_PATH) { sc = ah_to_sc(ibqp->device, &attr->alt_ah_attr); if (sc == 0xf) return -EINVAL; if (!qp_to_sdma_engine(qp, sc) && dd->flags & HFI1_HAS_SEND_DMA) return -EINVAL; if (!qp_to_send_context(qp, sc)) return -EINVAL; } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
dennis dalessandrodennis dalessandro13368.91%240.00%
jubin johnjubin john2814.51%120.00%
ira weinyira weiny2010.36%120.00%
mike marciniszynmike marciniszyn126.22%120.00%
Total193100.00%5100.00%


void hfi1_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata) { struct ib_qp *ibqp = &qp->ibqp; struct hfi1_qp_priv *priv = qp->priv; if (attr_mask & IB_QP_AV) { priv->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr); priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc); priv->s_sendcontext = qp_to_send_context(qp, priv->s_sc); } if (attr_mask & IB_QP_PATH_MIG_STATE && attr->path_mig_state == IB_MIG_MIGRATED && qp->s_mig_state == IB_MIG_ARMED) { qp->s_flags |= RVT_S_AHG_CLEAR; priv->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr); priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc); priv->s_sendcontext = qp_to_send_context(qp, priv->s_sc); } }

Contributors

PersonTokensPropCommitsCommitProp
dennis dalessandrodennis dalessandro11068.75%360.00%
mike marciniszynmike marciniszyn5031.25%240.00%
Total160100.00%5100.00%

/** * hfi1_check_send_wqe - validate wqe * @qp - The qp * @wqe - The built wqe * * validate wqe. This is called * prior to inserting the wqe into * the ring but after the wqe has been * setup. * * Returns 0 on success, -EINVAL on failure * */
int hfi1_check_send_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe) { struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); struct rvt_ah *ah; switch (qp->ibqp.qp_type) { case IB_QPT_RC: case IB_QPT_UC: if (wqe->length > 0x80000000U) return -EINVAL; break; case IB_QPT_SMI: ah = ibah_to_rvtah(wqe->ud_wr.ah); if (wqe->length > (1 << ah->log_pmtu)) return -EINVAL; break; case IB_QPT_GSI: case IB_QPT_UD: ah = ibah_to_rvtah(wqe->ud_wr.ah); if (wqe->length > (1 << ah->log_pmtu)) return -EINVAL; if (ibp->sl_to_sc[ah->attr.sl] == 0xf) return -EINVAL; default: break; } return wqe->length <= piothreshold; }

Contributors

PersonTokensPropCommitsCommitProp
mike marciniszynmike marciniszyn9759.51%266.67%
ira weinyira weiny6640.49%133.33%
Total163100.00%3100.00%

/** * hfi1_compute_aeth - compute the AETH (syndrome + MSN) * @qp: the queue pair to compute the AETH for * * Returns the AETH. */
__be32 hfi1_compute_aeth(struct rvt_qp *qp) { u32 aeth = qp->r_msn & HFI1_MSN_MASK; if (qp->ibqp.srq) { /* * Shared receive queues don't generate credits. * Set the credit field to the invalid value. */ aeth |= HFI1_AETH_CREDIT_INVAL << HFI1_AETH_CREDIT_SHIFT; } else { u32 min, max, x; u32 credits; struct rvt_rwq *wq = qp->r_rq.wq; u32 head; u32 tail; /* sanity check pointers before trusting them */ head = wq->head; if (head >= qp->r_rq.size) head = 0; tail = wq->tail; if (tail >= qp->r_rq.size) tail = 0; /* * Compute the number of credits available (RWQEs). * There is a small chance that the pair of reads are * not atomic, which is OK, since the fuzziness is * resolved as further ACKs go out. */ credits = head - tail; if ((int)credits < 0) credits += qp->r_rq.size; /* * Binary search the credit table to find the code to * use. */ min = 0; max = 31; for (;;) { x = (min + max) / 2; if (credit_table[x] == credits) break; if (credit_table[x] > credits) { max = x; } else { if (min == x) break; min = x; } } aeth |= x << HFI1_AETH_CREDIT_SHIFT; } return cpu_to_be32(aeth); }

Contributors

PersonTokensPropCommitsCommitProp
dennis dalessandrodennis dalessandro18086.54%360.00%
mike marciniszynmike marciniszyn2311.06%120.00%
jubin johnjubin john52.40%120.00%
Total208100.00%5100.00%

/** * _hfi1_schedule_send - schedule progress * @qp: the QP * * This schedules qp progress w/o regard to the s_flags. * * It is only used in the post send, which doesn't hold * the s_lock. */
void _hfi1_schedule_send(struct rvt_qp *qp) { struct hfi1_qp_priv *priv = qp->priv; struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); iowait_schedule(&priv->s_iowait, ppd->hfi1_wq, priv->s_sde ? priv->s_sde->cpu : cpumask_first(cpumask_of_node(dd->node))); }

Contributors

PersonTokensPropCommitsCommitProp
mike marciniszynmike marciniszyn93100.00%1100.00%
Total93100.00%1100.00%


static void qp_pio_drain(struct rvt_qp *qp) { struct hfi1_ibdev *dev; struct hfi1_qp_priv *priv = qp->priv; if (!priv->s_sendcontext) return; dev = to_idev(qp->ibqp.device); while (iowait_pio_pending(&priv->s_iowait)) { write_seqlock_irq(&dev->iowait_lock); hfi1_sc_wantpiobuf_intr(priv->s_sendcontext, 1); write_sequnlock_irq(&dev->iowait_lock); iowait_pio_drain(&priv->s_iowait); write_seqlock_irq(&dev->iowait_lock); hfi1_sc_wantpiobuf_intr(priv->s_sendcontext, 0); write_sequnlock_irq(&dev->iowait_lock); } }

Contributors

PersonTokensPropCommitsCommitProp
mike marciniszynmike marciniszyn114100.00%1100.00%
Total114100.00%1100.00%

/** * hfi1_schedule_send - schedule progress * @qp: the QP * * This schedules qp progress and caller should hold * the s_lock. */
void hfi1_schedule_send(struct rvt_qp *qp) { if (hfi1_send_ok(qp)) _hfi1_schedule_send(qp); }

Contributors

PersonTokensPropCommitsCommitProp
mike marciniszynmike marciniszyn22100.00%1100.00%
Total22100.00%1100.00%

/** * hfi1_get_credit - flush the send work queue of a QP * @qp: the qp who's send work queue to flush * @aeth: the Acknowledge Extended Transport Header * * The QP s_lock should be held. */
void hfi1_get_credit(struct rvt_qp *qp, u32 aeth) { u32 credit = (aeth >> HFI1_AETH_CREDIT_SHIFT) & HFI1_AETH_CREDIT_MASK; /* * If the credit is invalid, we can send * as many packets as we like. Otherwise, we have to * honor the credit field. */ if (credit == HFI1_AETH_CREDIT_INVAL) { if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) { qp->s_flags |= RVT_S_UNLIMITED_CREDIT; if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) { qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT; hfi1_schedule_send(qp); } } } else if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) { /* Compute new LSN (i.e., MSN + credit) */ credit = (aeth + credit_table[credit]) & HFI1_MSN_MASK; if (cmp_msn(credit, qp->s_lsn) > 0) { qp->s_lsn = credit; if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) { qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT; hfi1_schedule_send(qp); } } } }

Contributors

PersonTokensPropCommitsCommitProp
dennis dalessandrodennis dalessandro12384.83%266.67%
mike marciniszynmike marciniszyn2215.17%133.33%
Total145100.00%3100.00%


void hfi1_qp_wakeup(struct rvt_qp *qp, u32 flag) { unsigned long flags; spin_lock_irqsave(&qp->s_lock, flags); if (qp->s_flags & flag) { qp->s_flags &= ~flag; trace_hfi1_qpwakeup(qp, flag); hfi1_schedule_send(qp); } spin_unlock_irqrestore(&qp->s_lock, flags); /* Notify hfi1_destroy_qp() if it is waiting. */ if (atomic_dec_and_test(&qp->refcount)) wake_up(&qp->wait); }

Contributors

PersonTokensPropCommitsCommitProp
dennis dalessandrodennis dalessandro8397.65%266.67%
mike marciniszynmike marciniszyn22.35%133.33%
Total85100.00%3100.00%


static int iowait_sleep( struct sdma_engine *sde, struct iowait *wait, struct sdma_txreq *stx, unsigned seq) { struct verbs_txreq *tx = container_of(stx, struct verbs_txreq, txreq); struct rvt_qp *qp; struct hfi1_qp_priv *priv; unsigned long flags; int ret = 0; struct hfi1_ibdev *dev; qp = tx->qp; priv = qp->priv; spin_lock_irqsave(&qp->s_lock, flags); if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) { /* * If we couldn't queue the DMA request, save the info * and try again later rather than destroying the * buffer and undoing the side effects of the copy. */ /* Make a common routine? */ dev = &sde->dd->verbs_dev; list_add_tail(&stx->list, &wait->tx_head); write_seqlock(&dev->iowait_lock); if (sdma_progress(sde, seq, stx)) goto eagain; if (list_empty(&priv->s_iowait.list)) { struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); ibp->rvp.n_dmawait++; qp->s_flags |= RVT_S_WAIT_DMA_DESC; list_add_tail(&priv->s_iowait.list, &sde->dmawait); trace_hfi1_qpsleep(qp, RVT_S_WAIT_DMA_DESC); atomic_inc(&qp->refcount); } write_sequnlock(&dev->iowait_lock); qp->s_flags &= ~RVT_S_BUSY; spin_unlock_irqrestore(&qp->s_lock, flags); ret = -EBUSY; } else { spin_unlock_irqrestore(&qp->s_lock, flags); hfi1_put_txreq(tx); } return ret; eagain: write_sequnlock(&dev->iowait_lock); spin_unlock_irqrestore(&qp->s_lock, flags); list_del_init(&stx->list); return -EAGAIN; }

Contributors

PersonTokensPropCommitsCommitProp
dennis dalessandrodennis dalessandro25584.44%266.67%
mike marciniszynmike marciniszyn4715.56%133.33%
Total302100.00%3100.00%


static void iowait_wakeup(struct iowait *wait, int reason) { struct rvt_qp *qp = iowait_to_qp(wait); WARN_ON(reason != SDMA_AVAIL_REASON); hfi1_qp_wakeup(qp, RVT_S_WAIT_DMA_DESC); }

Contributors

PersonTokensPropCommitsCommitProp
dennis dalessandrodennis dalessandro3181.58%266.67%
mike marciniszynmike marciniszyn718.42%133.33%
Total38100.00%3100.00%


static void iowait_sdma_drained(struct iowait *wait) { struct rvt_qp *qp = iowait_to_qp(wait); unsigned long flags; /* * This happens when the send engine notes * a QP in the error state and cannot * do the flush work until that QP's * sdma work has finished. */ spin_lock_irqsave(&qp->s_lock, flags); if (qp->s_flags & RVT_S_WAIT_DMA) { qp->s_flags &= ~RVT_S_WAIT_DMA; hfi1_schedule_send(qp); } spin_unlock_irqrestore(&qp->s_lock, flags); }

Contributors

PersonTokensPropCommitsCommitProp
mike marciniszynmike marciniszyn68100.00%3100.00%
Total68100.00%3100.00%

/** * * qp_to_sdma_engine - map a qp to a send engine * @qp: the QP * @sc5: the 5 bit sc * * Return: * A send engine for the qp or NULL for SMI type qp. */
struct sdma_engine *qp_to_sdma_engine(struct rvt_qp *qp, u8 sc5) { struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); struct sdma_engine *sde; if (!(dd->flags & HFI1_HAS_SEND_DMA)) return NULL; switch (qp->ibqp.qp_type) { case IB_QPT_SMI: return NULL; default: break; } sde = sdma_select_engine_sc(dd, qp->ibqp.qp_num >> dd->qos_shift, sc5); return sde; }

Contributors

PersonTokensPropCommitsCommitProp
dennis dalessandrodennis dalessandro7989.77%266.67%
mike marciniszynmike marciniszyn910.23%133.33%
Total88100.00%3100.00%

/* * qp_to_send_context - map a qp to a send context * @qp: the QP * @sc5: the 5 bit sc * * Return: * A send context for the qp */
struct send_context *qp_to_send_context(struct rvt_qp *qp, u8 sc5) { struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); switch (qp->ibqp.qp_type) { case IB_QPT_SMI: /* SMA packets to VL15 */ return dd->vld[15].sc; default: break; } return pio_select_send_context_sc(dd, qp->ibqp.qp_num >> dd->qos_shift, sc5); }

Contributors

PersonTokensPropCommitsCommitProp
jubin johnjubin john73100.00%1100.00%
Total73100.00%1100.00%

struct qp_iter { struct hfi1_ibdev *dev; struct rvt_qp *qp; int specials; int n; };
struct qp_iter *qp_iter_init(struct hfi1_ibdev *dev) { struct qp_iter *iter; iter = kzalloc(sizeof(*iter), GFP_KERNEL); if (!iter) return NULL; iter->dev = dev; iter->specials = dev->rdi.ibdev.phys_port_cnt * 2; if (qp_iter_next(iter)) { kfree(iter); return NULL; } return iter; }

Contributors

PersonTokensPropCommitsCommitProp
dennis dalessandrodennis dalessandro7494.87%375.00%
mike marciniszynmike marciniszyn45.13%125.00%
Total78100.00%4100.00%


int qp_iter_next(struct qp_iter *iter) { struct hfi1_ibdev *dev = iter->dev; int n = iter->n; int ret = 1; struct rvt_qp *pqp = iter->qp; struct rvt_qp *qp; /* * The approach is to consider the special qps * as an additional table entries before the * real hash table. Since the qp code sets * the qp->next hash link to NULL, this works just fine. * * iter->specials is 2 * # ports * * n = 0..iter->specials is the special qp indices * * n = iter->specials..dev->rdi.qp_dev->qp_table_size+iter->specials are * the potential hash bucket entries * */ for (; n < dev->rdi.qp_dev->qp_table_size + iter->specials; n++) { if (pqp) { qp = rcu_dereference(pqp->next); } else { if (n < iter->specials) { struct hfi1_pportdata *ppd; struct hfi1_ibport *ibp; int pidx; pidx = n % dev->rdi.ibdev.phys_port_cnt; ppd = &dd_from_dev(dev)->pport[pidx]; ibp = &ppd->ibport_data; if (!(n & 1)) qp = rcu_dereference(ibp->rvp.qp[0]); else qp = rcu_dereference(ibp->rvp.qp[1]); } else { qp = rcu_dereference( dev->rdi.qp_dev->qp_table[ (n - iter->specials)]); } } pqp = qp; if (qp) { iter->qp = qp; iter->n = n; return 0; } } return ret; }

Contributors

PersonTokensPropCommitsCommitProp
dennis dalessandrodennis dalessandro20990.09%266.67%
mike marciniszynmike marciniszyn239.91%133.33%
Total232100.00%3100.00%

static const char * const qp_type_str[] = { "SMI", "GSI", "RC", "UC", "UD", };
static int qp_idle(struct rvt_qp *qp) { return qp->s_last == qp->s_acked && qp->s_acked == qp->s_cur && qp->s_cur == qp->s_tail && qp->s_tail == qp->s_head; }

Contributors

PersonTokensPropCommitsCommitProp
dennis dalessandrodennis dalessandro3272.73%266.67%
mike marciniszynmike marciniszyn1227.27%133.33%
Total44100.00%3100.00%


void qp_iter_print(struct seq_file *s, struct qp_iter *iter) { struct rvt_swqe *wqe; struct rvt_qp *qp = iter->qp; struct hfi1_qp_priv *priv = qp->priv; struct sdma_engine *sde; struct send_context *send_context; sde = qp_to_sdma_engine(qp, priv->s_sc); wqe = rvt_get_swqe_ptr(qp, qp->s_last); send_context = qp_to_send_context(qp, priv->s_sc); seq_printf(s, "N %d %s QP %x R %u %s %u %u %u f=%x %u %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u %u) RQP %x LID %x SL %u MTU %u %u %u %u SDE %p,%u SC %p,%u SCQ %u %u PID %d\n", iter->n, qp_idle(qp) ? "I" : "B", qp->ibqp.qp_num, atomic_read(&qp->refcount), qp_type_str[qp->ibqp.qp_type], qp->state, wqe ? wqe->wr.opcode : 0, qp->s_hdrwords, qp->s_flags, iowait_sdma_pending(&priv->s_iowait), iowait_pio_pending(&priv->s_iowait), !list_empty(&priv->s_iowait.list), qp->timeout, wqe ? wqe->ssn : 0, qp->s_lsn, qp->s_last_psn, qp->s_psn, qp->s_next_psn, qp->s_sending_psn, qp->s_sending_hpsn, qp->s_last, qp->s_acked, qp->s_cur, qp->s_tail, qp->s_head, qp->s_size, qp->s_avail, qp->remote_qpn, qp->remote_ah_attr.dlid, qp->remote_ah_attr.sl, qp->pmtu, qp->s_retry, qp->s_retry_cnt, qp->s_rnr_retry_cnt, sde, sde ? sde->this_idx : 0, send_context, send_context ? send_context->sw_index : 0, ibcq_to_rvtcq(qp->ibqp.send_cq)->queue->head, ibcq_to_rvtcq(qp->ibqp.send_cq)->queue->tail, qp->pid); }

Contributors

PersonTokensPropCommitsCommitProp
dennis dalessandrodennis dalessandro20764.89%325.00%
mike marciniszynmike marciniszyn6018.81%650.00%
vennila megavannanvennila megavannan268.15%18.33%
jubin johnjubin john268.15%216.67%
Total319100.00%12100.00%


void qp_comm_est(struct rvt_qp *qp) { qp->r_flags |= RVT_R_COMM_EST; if (qp->ibqp.event_handler) { struct ib_event ev; ev.device = qp->ibqp.device; ev.element.qp = &qp->ibqp; ev.event = IB_EVENT_COMM_EST; qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); } }

Contributors

PersonTokensPropCommitsCommitProp
dennis dalessandrodennis dalessandro4561.64%266.67%
mike marciniszynmike marciniszyn2838.36%133.33%
Total73100.00%3100.00%


void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp, gfp_t gfp) { struct hfi1_qp_priv *priv; priv = kzalloc_node(sizeof(*priv), gfp, rdi->dparms.node); if (!priv) return ERR_PTR(-ENOMEM); priv->owner = qp; priv->s_hdr = kzalloc_node(sizeof(*priv->s_hdr), gfp, rdi->dparms.node); if (!priv->s_hdr) { kfree(priv); return ERR_PTR(-ENOMEM); } setup_timer(&priv->s_rnr_timer, hfi1_rc_rnr_retry, (unsigned long)qp); qp->s_timer.function = hfi1_rc_timeout; return priv; }

Contributors

PersonTokensPropCommitsCommitProp
mike marciniszynmike marciniszyn7053.03%350.00%
dennis dalessandrodennis dalessandro4836.36%233.33%
mitko haralanovmitko haralanov1410.61%116.67%
Total132100.00%6100.00%


void qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp) { struct hfi1_qp_priv *priv = qp->priv; kfree(priv->s_hdr); kfree(priv); }

Contributors

PersonTokensPropCommitsCommitProp
dennis dalessandrodennis dalessandro2569.44%150.00%
mike marciniszynmike marciniszyn1130.56%150.00%
Total36100.00%2100.00%


unsigned free_all_qps(struct rvt_dev_info *rdi) { struct hfi1_ibdev *verbs_dev = container_of(rdi, struct hfi1_ibdev, rdi); struct hfi1_devdata *dd = container_of(verbs_dev, struct hfi1_devdata, verbs_dev); int n; unsigned qp_inuse = 0; for (n = 0; n < dd->num_pports; n++) { struct hfi1_ibport *ibp = &dd->pport[n].ibport_data; rcu_read_lock(); if (rcu_dereference(ibp->rvp.qp[0])) qp_inuse++; if (rcu_dereference(ibp->rvp.qp[1])) qp_inuse++; rcu_read_unlock(); } return qp_inuse; }

Contributors

PersonTokensPropCommitsCommitProp
dennis dalessandrodennis dalessandro6855.28%266.67%
mike marciniszynmike marciniszyn5544.72%133.33%
Total123100.00%3100.00%


void flush_qp_waiters(struct rvt_qp *qp) { flush_iowait(qp); hfi1_stop_rc_timers(qp); }

Contributors

PersonTokensPropCommitsCommitProp
mike marciniszynmike marciniszyn1365.00%250.00%
dennis dalessandrodennis dalessandro735.00%250.00%
Total20100.00%4100.00%


void stop_send_queue(struct rvt_qp *qp) { struct hfi1_qp_priv *priv = qp->priv; cancel_work_sync(&priv->s_iowait.iowork); hfi1_del_timers_sync(qp); }

Contributors

PersonTokensPropCommitsCommitProp
dennis dalessandrodennis dalessandro2058.82%133.33%
mike marciniszynmike marciniszyn1441.18%266.67%
Total34100.00%3100.00%


void quiesce_qp(struct rvt_qp *qp) { struct hfi1_qp_priv *priv = qp->priv; iowait_sdma_drain(&priv->s_iowait); qp_pio_drain(qp); flush_tx_list(qp); }

Contributors

PersonTokensPropCommitsCommitProp
dennis dalessandrodennis dalessandro2054.05%360.00%
mike marciniszynmike marciniszyn1745.95%240.00%
Total37100.00%5100.00%


void notify_qp_reset(struct rvt_qp *qp) { struct hfi1_qp_priv *priv = qp->priv; iowait_init( &priv->s_iowait, 1, _hfi1_do_send, iowait_sleep, iowait_wakeup, iowait_sdma_drained); priv->r_adefered = 0; clear_ahg(qp); }

Contributors

PersonTokensPropCommitsCommitProp
dennis dalessandrodennis dalessandro3470.83%250.00%
mike marciniszynmike marciniszyn1429.17%250.00%
Total48100.00%4100.00%

/* * Switch to alternate path. * The QP s_lock should be held and interrupts disabled. */
void hfi1_migrate_qp(struct rvt_qp *qp) { struct hfi1_qp_priv *priv = qp->priv; struct ib_event ev; qp->s_mig_state = IB_MIG_MIGRATED; qp->remote_ah_attr = qp->alt_ah_attr; qp->port_num = qp->alt_ah_attr.port_num; qp->s_pkey_index = qp->s_alt_pkey_index; qp->s_flags |= RVT_S_AHG_CLEAR; priv->s_sc = ah_to_sc(qp->ibqp.device, &qp->remote_ah_attr); priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc); ev.device = qp->ibqp.device; ev.element.qp = &qp->ibqp; ev.event = IB_EVENT_PATH_MIG; qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); }

Contributors

PersonTokensPropCommitsCommitProp
dennis dalessandrodennis dalessandro11685.93%375.00%
mike marciniszynmike marciniszyn1914.07%125.00%
Total135100.00%4100.00%


int mtu_to_path_mtu(u32 mtu) { return mtu_to_enum(mtu, OPA_MTU_8192); }

Contributors

PersonTokensPropCommitsCommitProp
dennis dalessandrodennis dalessandro16100.00%1100.00%
Total16100.00%1100.00%


u32 mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu) { u32 mtu; struct hfi1_ibdev *verbs_dev = container_of(rdi, struct hfi1_ibdev, rdi); struct hfi1_devdata *dd = container_of(verbs_dev, struct hfi1_devdata, verbs_dev); struct hfi1_ibport *ibp; u8 sc, vl; ibp = &dd->pport[qp->port_num - 1].ibport_data; sc = ibp->sl_to_sc[qp->remote_ah_attr.sl]; vl = sc_to_vlt(dd, sc); mtu = verbs_mtu_enum_to_int(qp->ibqp.device, pmtu); if (vl < PER_VL_SEND_CONTEXTS) mtu = min_t(u32, mtu, dd->vld[vl].mtu); return mtu; }

Contributors

PersonTokensPropCommitsCommitProp
dennis dalessandrodennis dalessandro139100.00%2100.00%
Total139100.00%2100.00%


int get_pmtu_from_attr(struct rvt_dev_info *rdi, struct rvt_qp *qp, struct ib_qp_attr *attr) { int mtu, pidx = qp->port_num - 1; struct hfi1_ibdev *verbs_dev = container_of(rdi, struct hfi1_ibdev, rdi); struct hfi1_devdata *dd = container_of(verbs_dev, struct hfi1_devdata, verbs_dev); mtu = verbs_mtu_enum_to_int(qp->ibqp.device, attr->path_mtu); if (mtu == -1) return -1; /* values less than 0 are error */ if (mtu > dd->pport[pidx].ibmtu) return mtu_to_enum(dd->pport[pidx].ibmtu, IB_MTU_2048); else return attr->path_mtu; }

Contributors

PersonTokensPropCommitsCommitProp
dennis dalessandrodennis dalessandro11291.80%266.67%
mike marciniszynmike marciniszyn108.20%133.33%
Total122100.00%3100.00%


void notify_error_qp(struct rvt_qp *qp) { struct hfi1_ibdev *dev = to_idev(qp->ibqp.device); struct hfi1_qp_priv *priv = qp->priv; write_seqlock(&dev->iowait_lock); if (!list_empty(&priv->s_iowait.list) && !(qp->s_flags & RVT_S_BUSY)) { qp->s_flags &= ~RVT_S_ANY_WAIT_IO; list_del_init(&priv->s_iowait.list); if (atomic_dec_and_test(&qp->refcount)) wake_up(&qp->wait); } write_sequnlock(&dev->iowait_lock); if (!(qp->s_flags & RVT_S_BUSY)) { qp->s_hdrwords = 0; if (qp->s_rdma_mr) { rvt_put_mr(qp->s_rdma_mr); qp->s_rdma_mr = NULL; } flush_tx_list(qp); } }

Contributors

PersonTokensPropCommitsCommitProp
dennis dalessandrodennis dalessandro10367.32%457.14%
mike marciniszynmike marciniszyn5032.68%342.86%
Total153100.00%7100.00%

/** * hfi1_error_port_qps - put a port's RC/UC qps into error state * @ibp: the ibport. * @sl: the service level. * * This function places all RC/UC qps with a given service level into error * state. It is generally called to force upper lay apps to abandon stale qps * after an sl->sc mapping change. */
void hfi1_error_port_qps(struct hfi1_ibport *ibp, u8 sl) { struct rvt_qp *qp = NULL; struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); struct hfi1_ibdev *dev = &ppd->dd->verbs_dev; int n; int lastwqe; struct ib_event ev; rcu_read_lock(); /* Deal only with RC/UC qps that use the given SL. */ for (n = 0; n < dev->rdi.qp_dev->qp_table_size; n++) { for (qp = rcu_dereference(dev->rdi.qp_dev->qp_table[n]); qp; qp = rcu_dereference(qp->next)) { if (qp->port_num == ppd->port && (qp->ibqp.qp_type == IB_QPT_UC || qp->ibqp.qp_type == IB_QPT_RC) && qp->remote_ah_attr.sl == sl && (ib_rvt_state_ops[qp->state] & RVT_POST_SEND_OK)) { spin_lock_irq(&qp->r_lock); spin_lock(&qp->s_hlock); spin_lock(&qp->s_lock); lastwqe = rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR); spin_unlock(&qp->s_lock); spin_unlock(&qp->s_hlock); spin_unlock_irq(&qp->r_lock); if (lastwqe) { ev.device = qp->ibqp.device; ev.element.qp = &qp->ibqp; ev.event = IB_EVENT_QP_LAST_WQE_REACHED; qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); } } } } rcu_read_unlock(); }

Contributors

PersonTokensPropCommitsCommitProp
kaike wankaike wan266100.00%1100.00%
Total266100.00%1100.00%


Overall Contributors

PersonTokensPropCommitsCommitProp
dennis dalessandrodennis dalessandro252658.18%617.65%
mike marciniszynmike marciniszyn126929.23%1750.00%
kaike wankaike wan2676.15%12.94%
jubin johnjubin john1343.09%514.71%
ira weinyira weiny872.00%25.88%
vennila megavannanvennila megavannan260.60%12.94%
sebastian sanchezsebastian sanchez190.44%12.94%
mitko haralanovmitko haralanov140.32%12.94%
Total4342100.00%34100.00%
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
{% endraw %}