cregit-Linux how code gets into the kernel

Release 4.18 drivers/infiniband/core/cq.c

/*
 * Copyright (c) 2015 HGST, a Western Digital Company.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 */
#include <linux/module.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <rdma/ib_verbs.h>

/* # of WCs to poll for with a single call to ib_poll_cq */

#define IB_POLL_BATCH			16

#define IB_POLL_BATCH_DIRECT		8

/* # of WCs to iterate over before yielding */

#define IB_POLL_BUDGET_IRQ		256

#define IB_POLL_BUDGET_WORKQUEUE	65536


#define IB_POLL_FLAGS \
	(IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS)


static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *wcs, int batch) { int i, n, completed = 0; /* * budget might be (-1) if the caller does not * want to bound this call, thus we need unsigned * minimum here. */ while ((n = ib_poll_cq(cq, min_t(u32, batch, budget - completed), wcs)) > 0) { for (i = 0; i < n; i++) { struct ib_wc *wc = &wcs[i]; if (wc->wr_cqe) wc->wr_cqe->done(cq, wc); else WARN_ON_ONCE(wc->status == IB_WC_SUCCESS); } completed += n; if (n != batch || (budget != -1 && completed >= budget)) break; } return completed; }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig11684.06%125.00%
Sagi Grimberg1611.59%250.00%
Max Gurtovoy64.35%125.00%
Total138100.00%4100.00%

/** * ib_process_direct_cq - process a CQ in caller context * @cq: CQ to process * @budget: number of CQEs to poll for * * This function is used to process all outstanding CQ entries. * It does not offload CQ processing to a different context and does * not ask for completion interrupts from the HCA. * Using direct processing on CQ with non IB_POLL_DIRECT type may trigger * concurrent processing. * * Note: do not pass -1 as %budget unless it is guaranteed that the number * of completions that will be processed is small. */
int ib_process_cq_direct(struct ib_cq *cq, int budget) { struct ib_wc wcs[IB_POLL_BATCH_DIRECT]; return __ib_process_cq(cq, budget, wcs, IB_POLL_BATCH_DIRECT); }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig2165.62%133.33%
Sagi Grimberg825.00%133.33%
Max Gurtovoy39.38%133.33%
Total32100.00%3100.00%

EXPORT_SYMBOL(ib_process_cq_direct);
static void ib_cq_completion_direct(struct ib_cq *cq, void *private) { WARN_ONCE(1, "got unsolicited completion for CQ 0x%p\n", cq); }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig24100.00%1100.00%
Total24100.00%1100.00%


static int ib_poll_handler(struct irq_poll *iop, int budget) { struct ib_cq *cq = container_of(iop, struct ib_cq, iop); int completed; completed = __ib_process_cq(cq, budget, cq->wc, IB_POLL_BATCH); if (completed < budget) { irq_poll_complete(&cq->iop); if (ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0) irq_poll_sched(&cq->iop); } return completed; }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig7992.94%133.33%
Max Gurtovoy55.88%133.33%
Sagi Grimberg11.18%133.33%
Total85100.00%3100.00%


static void ib_cq_completion_softirq(struct ib_cq *cq, void *private) { irq_poll_sched(&cq->iop); }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig23100.00%1100.00%
Total23100.00%1100.00%


static void ib_cq_poll_work(struct work_struct *work) { struct ib_cq *cq = container_of(work, struct ib_cq, work); int completed; completed = __ib_process_cq(cq, IB_POLL_BUDGET_WORKQUEUE, cq->wc, IB_POLL_BATCH); if (completed >= IB_POLL_BUDGET_WORKQUEUE || ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0) queue_work(ib_comp_wq, &cq->work); }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig6391.30%133.33%
Max Gurtovoy57.25%133.33%
Sagi Grimberg11.45%133.33%
Total69100.00%3100.00%


static void ib_cq_completion_workqueue(struct ib_cq *cq, void *private) { queue_work(ib_comp_wq, &cq->work); }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig25100.00%1100.00%
Total25100.00%1100.00%

/** * __ib_alloc_cq - allocate a completion queue * @dev: device to allocate the CQ for * @private: driver private data, accessible from cq->cq_context * @nr_cqe: number of CQEs to allocate * @comp_vector: HCA completion vectors for this CQ * @poll_ctx: context to poll the CQ from. * @caller: module owner name. * * This is the proper interface to allocate a CQ for in-kernel users. A * CQ allocated with this interface will automatically be polled from the * specified context. The ULP must use wr->wr_cqe instead of wr->wr_id * to use this CQ abstraction. */
struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private, int nr_cqe, int comp_vector, enum ib_poll_context poll_ctx, const char *caller) { struct ib_cq_init_attr cq_attr = { .cqe = nr_cqe, .comp_vector = comp_vector, }; struct ib_cq *cq; int ret = -ENOMEM; cq = dev->create_cq(dev, &cq_attr, NULL, NULL); if (IS_ERR(cq)) return cq; cq->device = dev; cq->uobject = NULL; cq->event_handler = NULL; cq->cq_context = private; cq->poll_ctx = poll_ctx; atomic_set(&cq->usecnt, 0); cq->wc = kmalloc_array(IB_POLL_BATCH, sizeof(*cq->wc), GFP_KERNEL); if (!cq->wc) goto out_destroy_cq; cq->res.type = RDMA_RESTRACK_CQ; cq->res.kern_name = caller; rdma_restrack_add(&cq->res); switch (cq->poll_ctx) { case IB_POLL_DIRECT: cq->comp_handler = ib_cq_completion_direct; break; case IB_POLL_SOFTIRQ: cq->comp_handler = ib_cq_completion_softirq; irq_poll_init(&cq->iop, IB_POLL_BUDGET_IRQ, ib_poll_handler); ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); break; case IB_POLL_WORKQUEUE: cq->comp_handler = ib_cq_completion_workqueue; INIT_WORK(&cq->work, ib_cq_poll_work); ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); break; default: ret = -EINVAL; goto out_free_wc; } return cq; out_free_wc: kfree(cq->wc); rdma_restrack_del(&cq->res); out_destroy_cq: cq->device->destroy_cq(cq); return ERR_PTR(ret); }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig25987.21%133.33%
Leon Romanovsky3812.79%266.67%
Total297100.00%3100.00%

EXPORT_SYMBOL(__ib_alloc_cq); /** * ib_free_cq - free a completion queue * @cq: completion queue to free. */
void ib_free_cq(struct ib_cq *cq) { int ret; if (WARN_ON_ONCE(atomic_read(&cq->usecnt))) return; switch (cq->poll_ctx) { case IB_POLL_DIRECT: break; case IB_POLL_SOFTIRQ: irq_poll_disable(&cq->iop); break; case IB_POLL_WORKQUEUE: cancel_work_sync(&cq->work); break; default: WARN_ON_ONCE(1); } kfree(cq->wc); rdma_restrack_del(&cq->res); ret = cq->device->destroy_cq(cq); WARN_ON_ONCE(ret); }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig9191.00%133.33%
Leon Romanovsky88.00%133.33%
Sagi Grimberg11.00%133.33%
Total100100.00%3100.00%

EXPORT_SYMBOL(ib_free_cq);

Overall Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig74888.31%114.29%
Leon Romanovsky485.67%228.57%
Sagi Grimberg283.31%342.86%
Max Gurtovoy232.72%114.29%
Total847100.00%7100.00%
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.