Contributors: 8
Author |
Tokens |
Token Proportion |
Commits |
Commit Proportion |
Moni Shoua |
593 |
69.36% |
1 |
10.00% |
Bob Pearson |
111 |
12.98% |
3 |
30.00% |
Andrew Boyer |
89 |
10.41% |
1 |
10.00% |
Jason Gunthorpe |
23 |
2.69% |
1 |
10.00% |
Allen Pais |
13 |
1.52% |
1 |
10.00% |
Bart Van Assche |
13 |
1.52% |
1 |
10.00% |
shamir rabinovitch |
9 |
1.05% |
1 |
10.00% |
Zhu Yanjun |
4 |
0.47% |
1 |
10.00% |
Total |
855 |
|
10 |
|
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
* Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
* Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
*/
#include <linux/vmalloc.h>
#include "rxe.h"
#include "rxe_loc.h"
#include "rxe_queue.h"
int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq,
int cqe, int comp_vector)
{
int count;
if (cqe <= 0) {
pr_warn("cqe(%d) <= 0\n", cqe);
goto err1;
}
if (cqe > rxe->attr.max_cqe) {
pr_warn("cqe(%d) > max_cqe(%d)\n",
cqe, rxe->attr.max_cqe);
goto err1;
}
if (cq) {
if (cq->is_user)
count = queue_count(cq->queue, QUEUE_TYPE_TO_USER);
else
count = queue_count(cq->queue, QUEUE_TYPE_KERNEL);
if (cqe < count) {
pr_warn("cqe(%d) < current # elements in queue (%d)",
cqe, count);
goto err1;
}
}
return 0;
err1:
return -EINVAL;
}
static void rxe_send_complete(struct tasklet_struct *t)
{
struct rxe_cq *cq = from_tasklet(cq, t, comp_task);
unsigned long flags;
spin_lock_irqsave(&cq->cq_lock, flags);
if (cq->is_dying) {
spin_unlock_irqrestore(&cq->cq_lock, flags);
return;
}
spin_unlock_irqrestore(&cq->cq_lock, flags);
cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
}
int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
int comp_vector, struct ib_udata *udata,
struct rxe_create_cq_resp __user *uresp)
{
int err;
enum queue_type type;
type = uresp ? QUEUE_TYPE_TO_USER : QUEUE_TYPE_KERNEL;
cq->queue = rxe_queue_init(rxe, &cqe,
sizeof(struct rxe_cqe), type);
if (!cq->queue) {
pr_warn("unable to create cq\n");
return -ENOMEM;
}
err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, udata,
cq->queue->buf, cq->queue->buf_size, &cq->queue->ip);
if (err) {
vfree(cq->queue->buf);
kfree(cq->queue);
return err;
}
if (uresp)
cq->is_user = 1;
cq->is_dying = false;
tasklet_setup(&cq->comp_task, rxe_send_complete);
spin_lock_init(&cq->cq_lock);
cq->ibcq.cqe = cqe;
return 0;
}
int rxe_cq_resize_queue(struct rxe_cq *cq, int cqe,
struct rxe_resize_cq_resp __user *uresp,
struct ib_udata *udata)
{
int err;
err = rxe_queue_resize(cq->queue, (unsigned int *)&cqe,
sizeof(struct rxe_cqe), udata,
uresp ? &uresp->mi : NULL, NULL, &cq->cq_lock);
if (!err)
cq->ibcq.cqe = cqe;
return err;
}
int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited)
{
struct ib_event ev;
unsigned long flags;
int full;
void *addr;
spin_lock_irqsave(&cq->cq_lock, flags);
if (cq->is_user)
full = queue_full(cq->queue, QUEUE_TYPE_TO_USER);
else
full = queue_full(cq->queue, QUEUE_TYPE_KERNEL);
if (unlikely(full)) {
spin_unlock_irqrestore(&cq->cq_lock, flags);
if (cq->ibcq.event_handler) {
ev.device = cq->ibcq.device;
ev.element.cq = &cq->ibcq;
ev.event = IB_EVENT_CQ_ERR;
cq->ibcq.event_handler(&ev, cq->ibcq.cq_context);
}
return -EBUSY;
}
if (cq->is_user)
addr = producer_addr(cq->queue, QUEUE_TYPE_TO_USER);
else
addr = producer_addr(cq->queue, QUEUE_TYPE_KERNEL);
memcpy(addr, cqe, sizeof(*cqe));
if (cq->is_user)
advance_producer(cq->queue, QUEUE_TYPE_TO_USER);
else
advance_producer(cq->queue, QUEUE_TYPE_KERNEL);
spin_unlock_irqrestore(&cq->cq_lock, flags);
if ((cq->notify == IB_CQ_NEXT_COMP) ||
(cq->notify == IB_CQ_SOLICITED && solicited)) {
cq->notify = 0;
tasklet_schedule(&cq->comp_task);
}
return 0;
}
void rxe_cq_disable(struct rxe_cq *cq)
{
unsigned long flags;
spin_lock_irqsave(&cq->cq_lock, flags);
cq->is_dying = true;
spin_unlock_irqrestore(&cq->cq_lock, flags);
}
void rxe_cq_cleanup(struct rxe_pool_entry *arg)
{
struct rxe_cq *cq = container_of(arg, typeof(*cq), pelem);
if (cq->queue)
rxe_queue_cleanup(cq->queue);
}