Release 4.7 drivers/infiniband/ulp/iser/iser_verbs.c
  
  
/*
 * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
 * Copyright (c) 2005, 2006 Cisco Systems.  All rights reserved.
 * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include "iscsi_iser.h"
#define ISCSI_ISER_MAX_CONN	8
#define ISER_MAX_RX_LEN		(ISER_QP_MAX_RECV_DTOS * ISCSI_ISER_MAX_CONN)
#define ISER_MAX_TX_LEN		(ISER_QP_MAX_REQ_DTOS  * ISCSI_ISER_MAX_CONN)
#define ISER_MAX_CQ_LEN		(ISER_MAX_RX_LEN + ISER_MAX_TX_LEN + \
                                 ISCSI_ISER_MAX_CONN)
static void iser_qp_event_callback(struct ib_event *cause, void *context)
{
	iser_err("qp event %s (%d)\n",
		 ib_event_msg(cause->event), cause->event);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| or gerlitz | or gerlitz | 23 | 74.19% | 1 | 50.00% | 
| sagi grimberg | sagi grimberg | 8 | 25.81% | 1 | 50.00% | 
 | Total | 31 | 100.00% | 2 | 100.00% | 
static void iser_event_handler(struct ib_event_handler *handler,
				struct ib_event *event)
{
	iser_err("async event %s (%d) on device %s port %d\n",
		 ib_event_msg(event->event), event->event,
		 event->device->name, event->element.port_num);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| or gerlitz | or gerlitz | 36 | 81.82% | 1 | 50.00% | 
| sagi grimberg | sagi grimberg | 8 | 18.18% | 1 | 50.00% | 
 | Total | 44 | 100.00% | 2 | 100.00% | 
/**
 * iser_create_device_ib_res - creates Protection Domain (PD), Completion
 * Queue (CQ), DMA Memory Region (DMA MR) with the device associated with
 * the adapator.
 *
 * returns 0 on success, -1 on failure
 */
static int iser_create_device_ib_res(struct iser_device *device)
{
	struct ib_device *ib_dev = device->ib_device;
	int ret, i, max_cqe;
	ret = iser_assign_reg_ops(device);
	if (ret)
		return ret;
	device->comps_used = min_t(int, num_online_cpus(),
				 ib_dev->num_comp_vectors);
	device->comps = kcalloc(device->comps_used, sizeof(*device->comps),
				GFP_KERNEL);
	if (!device->comps)
		goto comps_err;
	max_cqe = min(ISER_MAX_CQ_LEN, ib_dev->attrs.max_cqe);
	iser_info("using %d CQs, device %s supports %d vectors max_cqe %d\n",
		  device->comps_used, ib_dev->name,
		  ib_dev->num_comp_vectors, max_cqe);
	device->pd = ib_alloc_pd(ib_dev);
	if (IS_ERR(device->pd))
		goto pd_err;
	for (i = 0; i < device->comps_used; i++) {
		struct iser_comp *comp = &device->comps[i];
		comp->cq = ib_alloc_cq(ib_dev, comp, max_cqe, i,
				       IB_POLL_SOFTIRQ);
		if (IS_ERR(comp->cq)) {
			comp->cq = NULL;
			goto cq_err;
		}
	}
	if (!iser_always_reg) {
		int access = IB_ACCESS_LOCAL_WRITE |
			     IB_ACCESS_REMOTE_WRITE |
			     IB_ACCESS_REMOTE_READ;
		device->mr = ib_get_dma_mr(device->pd, access);
		if (IS_ERR(device->mr))
			goto cq_err;
	}
	INIT_IB_EVENT_HANDLER(&device->event_handler, ib_dev,
			      iser_event_handler);
	if (ib_register_event_handler(&device->event_handler))
		goto handler_err;
	return 0;
handler_err:
	if (device->mr)
		ib_dereg_mr(device->mr);
cq_err:
	for (i = 0; i < device->comps_used; i++) {
		struct iser_comp *comp = &device->comps[i];
		if (comp->cq)
			ib_free_cq(comp->cq);
	}
	ib_dealloc_pd(device->pd);
pd_err:
	kfree(device->comps);
comps_err:
	iser_err("failed to allocate an IB resource\n");
	return -1;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| or gerlitz | or gerlitz | 139 | 38.19% | 4 | 22.22% | 
| sagi grimberg | sagi grimberg | 128 | 35.16% | 8 | 44.44% | 
| alex tabachnik | alex tabachnik | 61 | 16.76% | 1 | 5.56% | 
| minh duc tran | minh duc tran | 15 | 4.12% | 1 | 5.56% | 
| roi dayan | roi dayan | 13 | 3.57% | 2 | 11.11% | 
| christoph hellwig | christoph hellwig | 7 | 1.92% | 1 | 5.56% | 
| doug ledford | doug ledford | 1 | 0.27% | 1 | 5.56% | 
 | Total | 364 | 100.00% | 18 | 100.00% | 
/**
 * iser_free_device_ib_res - destroy/dealloc/dereg the DMA MR,
 * CQ and PD created with the device associated with the adapator.
 */
static void iser_free_device_ib_res(struct iser_device *device)
{
	int i;
	for (i = 0; i < device->comps_used; i++) {
		struct iser_comp *comp = &device->comps[i];
		ib_free_cq(comp->cq);
		comp->cq = NULL;
	}
	(void)ib_unregister_event_handler(&device->event_handler);
	if (device->mr)
		(void)ib_dereg_mr(device->mr);
	ib_dealloc_pd(device->pd);
	kfree(device->comps);
	device->comps = NULL;
	device->mr = NULL;
	device->pd = NULL;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| or gerlitz | or gerlitz | 52 | 44.83% | 3 | 33.33% | 
| alex tabachnik | alex tabachnik | 32 | 27.59% | 1 | 11.11% | 
| sagi grimberg | sagi grimberg | 31 | 26.72% | 4 | 44.44% | 
| christoph hellwig | christoph hellwig | 1 | 0.86% | 1 | 11.11% | 
 | Total | 116 | 100.00% | 9 | 100.00% | 
/**
 * iser_alloc_fmr_pool - Creates FMR pool and page_vector
 *
 * returns 0 on success, or errno code on failure
 */
int iser_alloc_fmr_pool(struct ib_conn *ib_conn,
			unsigned cmds_max,
			unsigned int size)
{
	struct iser_device *device = ib_conn->device;
	struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
	struct iser_page_vec *page_vec;
	struct iser_fr_desc *desc;
	struct ib_fmr_pool *fmr_pool;
	struct ib_fmr_pool_param params;
	int ret;
	INIT_LIST_HEAD(&fr_pool->list);
	spin_lock_init(&fr_pool->lock);
	desc = kzalloc(sizeof(*desc), GFP_KERNEL);
	if (!desc)
		return -ENOMEM;
	page_vec = kmalloc(sizeof(*page_vec) + (sizeof(u64) * size),
			   GFP_KERNEL);
	if (!page_vec) {
		ret = -ENOMEM;
		goto err_frpl;
	}
	page_vec->pages = (u64 *)(page_vec + 1);
	params.page_shift        = SHIFT_4K;
	params.max_pages_per_fmr = size;
	/* make the pool size twice the max number of SCSI commands *
         * the ML is expected to queue, watermark for unmap at 50%  */
	params.pool_size	 = cmds_max * 2;
	params.dirty_watermark	 = cmds_max;
	params.cache		 = 0;
	params.flush_function	 = NULL;
	params.access		 = (IB_ACCESS_LOCAL_WRITE  |
				    IB_ACCESS_REMOTE_WRITE |
				    IB_ACCESS_REMOTE_READ);
	fmr_pool = ib_create_fmr_pool(device->pd, ¶ms);
	if (IS_ERR(fmr_pool)) {
		ret = PTR_ERR(fmr_pool);
		iser_err("FMR allocation failed, err %d\n", ret);
		goto err_fmr;
	}
	desc->rsc.page_vec = page_vec;
	desc->rsc.fmr_pool = fmr_pool;
	list_add(&desc->list, &fr_pool->list);
	return 0;
err_fmr:
	kfree(page_vec);
err_frpl:
	kfree(desc);
	return ret;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| or gerlitz | or gerlitz | 117 | 41.05% | 3 | 20.00% | 
| sagi grimberg | sagi grimberg | 79 | 27.72% | 7 | 46.67% | 
| adir lev | adir lev | 71 | 24.91% | 1 | 6.67% | 
| shlomo pongratz | shlomo pongratz | 15 | 5.26% | 2 | 13.33% | 
| alex tabachnik | alex tabachnik | 2 | 0.70% | 1 | 6.67% | 
| erez zilber | erez zilber | 1 | 0.35% | 1 | 6.67% | 
 | Total | 285 | 100.00% | 15 | 100.00% | 
/**
 * iser_free_fmr_pool - releases the FMR pool and page vec
 */
void iser_free_fmr_pool(struct ib_conn *ib_conn)
{
	struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
	struct iser_fr_desc *desc;
	desc = list_first_entry(&fr_pool->list,
				struct iser_fr_desc, list);
	list_del(&desc->list);
	iser_info("freeing conn %p fmr pool %p\n",
		  ib_conn, desc->rsc.fmr_pool);
	ib_destroy_fmr_pool(desc->rsc.fmr_pool);
	kfree(desc->rsc.page_vec);
	kfree(desc);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| sagi grimberg | sagi grimberg | 44 | 52.38% | 3 | 75.00% | 
| adir lev | adir lev | 40 | 47.62% | 1 | 25.00% | 
 | Total | 84 | 100.00% | 4 | 100.00% | 
static int
iser_alloc_reg_res(struct iser_device *device,
		   struct ib_pd *pd,
		   struct iser_reg_resources *res,
		   unsigned int size)
{
	struct ib_device *ib_dev = device->ib_device;
	enum ib_mr_type mr_type;
	int ret;
	if (ib_dev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG)
		mr_type = IB_MR_TYPE_SG_GAPS;
	else
		mr_type = IB_MR_TYPE_MEM_REG;
	res->mr = ib_alloc_mr(pd, mr_type, size);
	if (IS_ERR(res->mr)) {
		ret = PTR_ERR(res->mr);
		iser_err("Failed to allocate ib_fast_reg_mr err=%d\n", ret);
		return ret;
	}
	res->mr_valid = 0;
	return 0;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| sagi grimberg | sagi grimberg | 111 | 99.11% | 5 | 83.33% | 
| jenny derzhavetz | jenny derzhavetz | 1 | 0.89% | 1 | 16.67% | 
 | Total | 112 | 100.00% | 6 | 100.00% | 
static void
iser_free_reg_res(struct iser_reg_resources *rsc)
{
	ib_dereg_mr(rsc->mr);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| sagi grimberg | sagi grimberg | 18 | 100.00% | 1 | 100.00% | 
 | Total | 18 | 100.00% | 1 | 100.00% | 
static int
iser_alloc_pi_ctx(struct iser_device *device,
		  struct ib_pd *pd,
		  struct iser_fr_desc *desc,
		  unsigned int size)
{
	struct iser_pi_context *pi_ctx = NULL;
	int ret;
	desc->pi_ctx = kzalloc(sizeof(*desc->pi_ctx), GFP_KERNEL);
	if (!desc->pi_ctx)
		return -ENOMEM;
	pi_ctx = desc->pi_ctx;
	ret = iser_alloc_reg_res(device, pd, &pi_ctx->rsc, size);
	if (ret) {
		iser_err("failed to allocate reg_resources\n");
		goto alloc_reg_res_err;
	}
	pi_ctx->sig_mr = ib_alloc_mr(pd, IB_MR_TYPE_SIGNATURE, 2);
	if (IS_ERR(pi_ctx->sig_mr)) {
		ret = PTR_ERR(pi_ctx->sig_mr);
		goto sig_mr_failure;
	}
	pi_ctx->sig_mr_valid = 0;
	desc->pi_ctx->sig_protected = 0;
	return 0;
sig_mr_failure:
	iser_free_reg_res(&pi_ctx->rsc);
alloc_reg_res_err:
	kfree(desc->pi_ctx);
	return ret;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| alex tabachnik | alex tabachnik | 94 | 54.02% | 1 | 11.11% | 
| sagi grimberg | sagi grimberg | 79 | 45.40% | 7 | 77.78% | 
| jenny derzhavetz | jenny derzhavetz | 1 | 0.57% | 1 | 11.11% | 
 | Total | 174 | 100.00% | 9 | 100.00% | 
static void
iser_free_pi_ctx(struct iser_pi_context *pi_ctx)
{
	iser_free_reg_res(&pi_ctx->rsc);
	ib_dereg_mr(pi_ctx->sig_mr);
	kfree(pi_ctx);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| sagi grimberg | sagi grimberg | 31 | 100.00% | 3 | 100.00% | 
 | Total | 31 | 100.00% | 3 | 100.00% | 
static struct iser_fr_desc *
iser_create_fastreg_desc(struct iser_device *device,
			 struct ib_pd *pd,
			 bool pi_enable,
			 unsigned int size)
{
	struct iser_fr_desc *desc;
	int ret;
	desc = kzalloc(sizeof(*desc), GFP_KERNEL);
	if (!desc)
		return ERR_PTR(-ENOMEM);
	ret = iser_alloc_reg_res(device, pd, &desc->rsc, size);
	if (ret)
		goto reg_res_alloc_failure;
	if (pi_enable) {
		ret = iser_alloc_pi_ctx(device, pd, desc, size);
		if (ret)
			goto pi_ctx_alloc_failure;
	}
	return desc;
pi_ctx_alloc_failure:
	iser_free_reg_res(&desc->rsc);
reg_res_alloc_failure:
	kfree(desc);
	return ERR_PTR(ret);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| sagi grimberg | sagi grimberg | 131 | 98.50% | 7 | 87.50% | 
| alex tabachnik | alex tabachnik | 2 | 1.50% | 1 | 12.50% | 
 | Total | 133 | 100.00% | 8 | 100.00% | 
/**
 * iser_alloc_fastreg_pool - Creates pool of fast_reg descriptors
 * for fast registration work requests.
 * returns 0 on success, or errno code on failure
 */
int iser_alloc_fastreg_pool(struct ib_conn *ib_conn,
			    unsigned cmds_max,
			    unsigned int size)
{
	struct iser_device *device = ib_conn->device;
	struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
	struct iser_fr_desc *desc;
	int i, ret;
	INIT_LIST_HEAD(&fr_pool->list);
	spin_lock_init(&fr_pool->lock);
	fr_pool->size = 0;
	for (i = 0; i < cmds_max; i++) {
		desc = iser_create_fastreg_desc(device, device->pd,
						ib_conn->pi_support, size);
		if (IS_ERR(desc)) {
			ret = PTR_ERR(desc);
			goto err;
		}
		list_add_tail(&desc->list, &fr_pool->list);
		fr_pool->size++;
	}
	return 0;
err:
	iser_free_fastreg_pool(ib_conn);
	return ret;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| sagi grimberg | sagi grimberg | 132 | 88.00% | 9 | 64.29% | 
| shlomo pongratz | shlomo pongratz | 9 | 6.00% | 1 | 7.14% | 
| adir lev | adir lev | 4 | 2.67% | 1 | 7.14% | 
| or gerlitz | or gerlitz | 3 | 2.00% | 2 | 14.29% | 
| alex tabachnik | alex tabachnik | 2 | 1.33% | 1 | 7.14% | 
 | Total | 150 | 100.00% | 14 | 100.00% | 
/**
 * iser_free_fastreg_pool - releases the pool of fast_reg descriptors
 */
void iser_free_fastreg_pool(struct ib_conn *ib_conn)
{
	struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
	struct iser_fr_desc *desc, *tmp;
	int i = 0;
	if (list_empty(&fr_pool->list))
		return;
	iser_info("freeing conn %p fr pool\n", ib_conn);
	list_for_each_entry_safe(desc, tmp, &fr_pool->list, list) {
		list_del(&desc->list);
		iser_free_reg_res(&desc->rsc);
		if (desc->pi_ctx)
			iser_free_pi_ctx(desc->pi_ctx);
		kfree(desc);
		++i;
	}
	if (i < fr_pool->size)
		iser_warn("pool still has %d regions registered\n",
			  fr_pool->size - i);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| sagi grimberg | sagi grimberg | 82 | 68.91% | 7 | 53.85% | 
| shlomo pongratz | shlomo pongratz | 14 | 11.76% | 1 | 7.69% | 
| alex tabachnik | alex tabachnik | 12 | 10.08% | 1 | 7.69% | 
| or gerlitz | or gerlitz | 6 | 5.04% | 2 | 15.38% | 
| adir lev | adir lev | 4 | 3.36% | 1 | 7.69% | 
| dan carpenter | dan carpenter | 1 | 0.84% | 1 | 7.69% | 
 | Total | 119 | 100.00% | 13 | 100.00% | 
/**
 * iser_create_ib_conn_res - Queue-Pair (QP)
 *
 * returns 0 on success, -1 on failure
 */
static int iser_create_ib_conn_res(struct ib_conn *ib_conn)
{
	struct iser_conn *iser_conn = to_iser_conn(ib_conn);
	struct iser_device	*device;
	struct ib_device	*ib_dev;
	struct ib_qp_init_attr	init_attr;
	int			ret = -ENOMEM;
	int index, min_index = 0;
	BUG_ON(ib_conn->device == NULL);
	device = ib_conn->device;
	ib_dev = device->ib_device;
	memset(&init_attr, 0, sizeof init_attr);
	mutex_lock(&ig.connlist_mutex);
	/* select the CQ with the minimal number of usages */
	for (index = 0; index < device->comps_used; index++) {
		if (device->comps[index].active_qps <
		    device->comps[min_index].active_qps)
			min_index = index;
	}
	ib_conn->comp = &device->comps[min_index];
	ib_conn->comp->active_qps++;
	mutex_unlock(&ig.connlist_mutex);
	iser_info("cq index %d used for ib_conn %p\n", min_index, ib_conn);
	init_attr.event_handler = iser_qp_event_callback;
	init_attr.qp_context	= (void *)ib_conn;
	init_attr.send_cq	= ib_conn->comp->cq;
	init_attr.recv_cq	= ib_conn->comp->cq;
	init_attr.cap.max_recv_wr  = ISER_QP_MAX_RECV_DTOS;
	init_attr.cap.max_send_sge = 2;
	init_attr.cap.max_recv_sge = 1;
	init_attr.sq_sig_type	= IB_SIGNAL_REQ_WR;
	init_attr.qp_type	= IB_QPT_RC;
	if (ib_conn->pi_support) {
		init_attr.cap.max_send_wr = ISER_QP_SIG_MAX_REQ_DTOS + 1;
		init_attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN;
		iser_conn->max_cmds =
			ISER_GET_MAX_XMIT_CMDS(ISER_QP_SIG_MAX_REQ_DTOS);
	} else {
		if (ib_dev->attrs.max_qp_wr > ISER_QP_MAX_REQ_DTOS) {
			init_attr.cap.max_send_wr  = ISER_QP_MAX_REQ_DTOS + 1;
			iser_conn->max_cmds =
				ISER_GET_MAX_XMIT_CMDS(ISER_QP_MAX_REQ_DTOS);
		} else {
			init_attr.cap.max_send_wr = ib_dev->attrs.max_qp_wr;
			iser_conn->max_cmds =
				ISER_GET_MAX_XMIT_CMDS(ib_dev->attrs.max_qp_wr);
			iser_dbg("device %s supports max_send_wr %d\n",
				 device->ib_device->name, ib_dev->attrs.max_qp_wr);
		}
	}
	ret = rdma_create_qp(ib_conn->cma_id, device->pd, &init_attr);
	if (ret)
		goto out_err;
	ib_conn->qp = ib_conn->cma_id->qp;
	iser_info("setting conn %p cma_id %p qp %p\n",
		  ib_conn, ib_conn->cma_id,
		  ib_conn->cma_id->qp);
	return ret;
out_err:
	mutex_lock(&ig.connlist_mutex);
	ib_conn->comp->active_qps--;
	mutex_unlock(&ig.connlist_mutex);
	iser_err("unable to alloc mem or create resource, err %d\n", ret);
	return ret;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| or gerlitz | or gerlitz | 146 | 33.33% | 4 | 25.00% | 
| alex tabachnik | alex tabachnik | 93 | 21.23% | 2 | 12.50% | 
| minh duc tran | minh duc tran | 79 | 18.04% | 1 | 6.25% | 
| sagi grimberg | sagi grimberg | 72 | 16.44% | 6 | 37.50% | 
| shlomo pongratz | shlomo pongratz | 44 | 10.05% | 1 | 6.25% | 
| roi dayan | roi dayan | 2 | 0.46% | 1 | 6.25% | 
| dan carpenter | dan carpenter | 2 | 0.46% | 1 | 6.25% | 
 | Total | 438 | 100.00% | 16 | 100.00% | 
/**
 * based on the resolved device node GUID see if there already allocated
 * device for this device. If there's no such, create one.
 */
static
struct iser_device *iser_device_find_by_ib_device(struct rdma_cm_id *cma_id)
{
	struct iser_device *device;
	mutex_lock(&ig.device_list_mutex);
	list_for_each_entry(device, &ig.device_list, ig_list)
		/* find if there's a match using the node GUID */
		if (device->ib_device->node_guid == cma_id->device->node_guid)
			goto inc_refcnt;
	device = kzalloc(sizeof *device, GFP_KERNEL);
	if (device == NULL)
		goto out;
	/* assign this device to the device */
	device->ib_device = cma_id->device;
	/* init the device and link it into ig device list */
	if (iser_create_device_ib_res(device)) {
		kfree(device);
		device = NULL;
		goto out;
	}
	list_add(&device->ig_list, &ig.device_list);
inc_refcnt:
	device->refcount++;
out:
	mutex_unlock(&ig.device_list_mutex);
	return device;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| or gerlitz | or gerlitz | 126 | 92.65% | 1 | 33.33% | 
| arne redlich | arne redlich | 10 | 7.35% | 2 | 66.67% | 
 | Total | 136 | 100.00% | 3 | 100.00% | 
/* if there's no demand for this device, release it */
static void iser_device_try_release(struct iser_device *device)
{
	mutex_lock(&ig.device_list_mutex);
	device->refcount--;
	iser_info("device %p refcount %d\n", device, device->refcount);
	if (!device->refcount) {
		iser_free_device_ib_res(device);
		list_del(&device->ig_list);
		kfree(device);
	}
	mutex_unlock(&ig.device_list_mutex);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| or gerlitz | or gerlitz | 69 | 98.57% | 1 | 50.00% | 
| roi dayan | roi dayan | 1 | 1.43% | 1 | 50.00% | 
 | Total | 70 | 100.00% | 2 | 100.00% | 
/**
 * Called with state mutex held
 **/
static int iser_conn_state_comp_exch(struct iser_conn *iser_conn,
				     enum iser_conn_state comp,
				     enum iser_conn_state exch)
{
	int ret;
	ret = (iser_conn->state == comp);
	if (ret)
		iser_conn->state = exch;
	return ret;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| or gerlitz | or gerlitz | 36 | 80.00% | 1 | 50.00% | 
| sagi grimberg | sagi grimberg | 9 | 20.00% | 1 | 50.00% | 
 | Total | 45 | 100.00% | 2 | 100.00% | 
void iser_release_work(struct work_struct *work)
{
	struct iser_conn *iser_conn;
	iser_conn = container_of(work, struct iser_conn, release_work);
	/* Wait for conn_stop to complete */
	wait_for_completion(&iser_conn->stop_completion);
	/* Wait for IB resouces cleanup to complete */
	wait_for_completion(&iser_conn->ib_completion);
	mutex_lock(&iser_conn->state_mutex);
	iser_conn->state = ISER_CONN_DOWN;
	mutex_unlock(&iser_conn->state_mutex);
	iser_conn_release(iser_conn);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| ariel nahum | ariel nahum | 58 | 80.56% | 3 | 50.00% | 
| sagi grimberg | sagi grimberg | 14 | 19.44% | 3 | 50.00% | 
 | Total | 72 | 100.00% | 6 | 100.00% | 
/**
 * iser_free_ib_conn_res - release IB related resources
 * @iser_conn: iser connection struct
 * @destroy: indicator if we need to try to release the
 *     iser device and memory regoins pool (only iscsi
 *     shutdown and DEVICE_REMOVAL will use this).
 *
 * This routine is called with the iser state mutex held
 * so the cm_id removal is out of here. It is Safe to
 * be invoked multiple times.
 */
static void iser_free_ib_conn_res(struct iser_conn *iser_conn,
				  bool destroy)
{
	struct ib_conn *ib_conn = &iser_conn->ib_conn;
	struct iser_device *device = ib_conn->device;
	iser_info("freeing conn %p cma_id %p qp %p\n",
		  iser_conn, ib_conn->cma_id, ib_conn->qp);
	if (ib_conn->qp != NULL) {
		ib_conn->comp->active_qps--;
		rdma_destroy_qp(ib_conn->cma_id);
		ib_conn->qp = NULL;
	}
	if (destroy) {
		if (iser_conn->rx_descs)
			iser_free_rx_descriptors(iser_conn);
		if (device != NULL) {
			iser_device_try_release(device);
			ib_conn->device = NULL;
		}
	}
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| sagi grimberg | sagi grimberg | 99 | 86.84% | 6 | 85.71% | 
| roland dreier | roland dreier | 15 | 13.16% | 1 | 14.29% | 
 | Total | 114 | 100.00% | 7 | 100.00% | 
/**
 * Frees all conn objects and deallocs conn descriptor
 */
void iser_conn_release(struct iser_conn *iser_conn)
{
	struct ib_conn *ib_conn = &iser_conn->ib_conn;
	mutex_lock(&ig.connlist_mutex);
	list_del(&iser_conn->conn_list);
	mutex_unlock(&ig.connlist_mutex);
	mutex_lock(&iser_conn->state_mutex);
	/* In case we endup here without ep_disconnect being invoked. */
	if (iser_conn->state != ISER_CONN_DOWN) {
		iser_warn("iser conn %p state %d, expected state down.\n",
			  iser_conn, iser_conn->state);
		iscsi_destroy_endpoint(iser_conn->ep);
		iser_conn->state = ISER_CONN_DOWN;
	}
	/*
         * In case we never got to bind stage, we still need to
         * release IB resources (which is safe to call more than once).
         */
	iser_free_ib_conn_res(iser_conn, true);
	mutex_unlock(&iser_conn->state_mutex);
	if (ib_conn->cma_id != NULL) {
		rdma_destroy_id(ib_conn->cma_id);
		ib_conn->cma_id = NULL;
	}
	kfree(iser_conn);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| ariel nahum | ariel nahum | 49 | 37.40% | 5 | 45.45% | 
| sagi grimberg | sagi grimberg | 31 | 23.66% | 4 | 36.36% | 
| roland dreier | roland dreier | 31 | 23.66% | 1 | 9.09% | 
| roi dayan | roi dayan | 20 | 15.27% | 1 | 9.09% | 
 | Total | 131 | 100.00% | 11 | 100.00% | 
/**
 * triggers start of the disconnect procedures and wait for them to be done
 * Called with state mutex held
 */
int iser_conn_terminate(struct iser_conn *iser_conn)
{
	struct ib_conn *ib_conn = &iser_conn->ib_conn;
	int err = 0;
	/* terminate the iser conn only if the conn state is UP */
	if (!iser_conn_state_comp_exch(iser_conn, ISER_CONN_UP,
				       ISER_CONN_TERMINATING))
		return 0;
	iser_info("iser_conn %p state %d\n", iser_conn, iser_conn->state);
	/* suspend queuing of new iscsi commands */
	if (iser_conn->iscsi_conn)
		iscsi_suspend_queue(iser_conn->iscsi_conn);
	/*
         * In case we didn't already clean up the cma_id (peer initiated
         * a disconnection), we need to Cause the CMA to change the QP
         * state to ERROR.
         */
	if (ib_conn->cma_id) {
		err = rdma_disconnect(ib_conn->cma_id);
		if (err)
			iser_err("Failed to disconnect, conn: 0x%p err %d\n",
				 iser_conn, err);
		/* block until all flush errors are consumed */
		ib_drain_sq(ib_conn->qp);
	}
	return 1;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| sagi grimberg | sagi grimberg | 65 | 60.19% | 4 | 66.67% | 
| or gerlitz | or gerlitz | 41 | 37.96% | 1 | 16.67% | 
| steve wise | steve wise | 2 | 1.85% | 1 | 16.67% | 
 | Total | 108 | 100.00% | 6 | 100.00% | 
/**
 * Called with state mutex held
 **/
static void iser_connect_error(struct rdma_cm_id *cma_id)
{
	struct iser_conn *iser_conn;
	iser_conn = (struct iser_conn *)cma_id->context;
	iser_conn->state = ISER_CONN_TERMINATING;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| or gerlitz | or gerlitz | 28 | 84.85% | 1 | 25.00% | 
| sagi grimberg | sagi grimberg | 4 | 12.12% | 2 | 50.00% | 
| ariel nahum | ariel nahum | 1 | 3.03% | 1 | 25.00% | 
 | Total | 33 | 100.00% | 4 | 100.00% | 
static void
iser_calc_scsi_params(struct iser_conn *iser_conn,
		      unsigned int max_sectors)
{
	struct iser_device *device = iser_conn->ib_conn.device;
	unsigned short sg_tablesize, sup_sg_tablesize;
	sg_tablesize = DIV_ROUND_UP(max_sectors * 512, SIZE_4K);
	sup_sg_tablesize = min_t(unsigned, ISCSI_ISER_MAX_SG_TABLESIZE,
				 device->ib_device->attrs.max_fast_reg_page_list_len);
	if (sg_tablesize > sup_sg_tablesize) {
		sg_tablesize = sup_sg_tablesize;
		iser_conn->scsi_max_sectors = sg_tablesize * SIZE_4K / 512;
	} else {
		iser_conn->scsi_max_sectors = max_sectors;
	}
	iser_conn->scsi_sg_tablesize = sg_tablesize;
	iser_dbg("iser_conn %p, sg_tablesize %u, max_sectors %u\n",
		 iser_conn, iser_conn->scsi_sg_tablesize,
		 iser_conn->scsi_max_sectors);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| sagi grimberg | sagi grimberg | 109 | 97.32% | 1 | 50.00% | 
| or gerlitz | or gerlitz | 3 | 2.68% | 1 | 50.00% | 
 | Total | 112 | 100.00% | 2 | 100.00% | 
/**
 * Called with state mutex held
 **/
static void iser_addr_handler(struct rdma_cm_id *cma_id)
{
	struct iser_device *device;
	struct iser_conn   *iser_conn;
	struct ib_conn   *ib_conn;
	int    ret;
	iser_conn = (struct iser_conn *)cma_id->context;
	if (iser_conn->state != ISER_CONN_PENDING)
		/* bailout */
		return;
	ib_conn = &iser_conn->ib_conn;
	device = iser_device_find_by_ib_device(cma_id);
	if (!device) {
		iser_err("device lookup/creation failed\n");
		iser_connect_error(cma_id);
		return;
	}
	ib_conn->device = device;
	/* connection T10-PI support */
	if (iser_pi_enable) {
		if (!(device->ib_device->attrs.device_cap_flags &
		      IB_DEVICE_SIGNATURE_HANDOVER)) {
			iser_warn("T10-PI requested but not supported on %s, "
				  "continue without T10-PI\n",
				  ib_conn->device->ib_device->name);
			ib_conn->pi_support = false;
		} else {
			ib_conn->pi_support = true;
		}
	}
	iser_calc_scsi_params(iser_conn, iser_max_sectors);
	ret = rdma_resolve_route(cma_id, 1000);
	if (ret) {
		iser_err("resolve route failed: %d\n", ret);
		iser_connect_error(cma_id);
		return;
	}
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| or gerlitz | or gerlitz | 63 | 35.80% | 2 | 22.22% | 
| alex tabachnik | alex tabachnik | 47 | 26.70% | 1 | 11.11% | 
| sagi grimberg | sagi grimberg | 26 | 14.77% | 3 | 33.33% | 
| ariel nahum | ariel nahum | 24 | 13.64% | 2 | 22.22% | 
| arne redlich | arne redlich | 16 | 9.09% | 1 | 11.11% | 
 | Total | 176 | 100.00% | 9 | 100.00% | 
/**
 * Called with state mutex held
 **/
static void iser_route_handler(struct rdma_cm_id *cma_id)
{
	struct rdma_conn_param conn_param;
	int    ret;
	struct iser_cm_hdr req_hdr;
	struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context;
	struct ib_conn *ib_conn = &iser_conn->ib_conn;
	struct iser_device *device = ib_conn->device;
	if (iser_conn->state != ISER_CONN_PENDING)
		/* bailout */
		return;
	ret = iser_create_ib_conn_res(ib_conn);
	if (ret)
		goto failure;
	memset(&conn_param, 0, sizeof conn_param);
	conn_param.responder_resources = device->ib_device->attrs.max_qp_rd_atom;
	conn_param.initiator_depth     = 1;
	conn_param.retry_count	       = 7;
	conn_param.rnr_retry_count     = 6;
	memset(&req_hdr, 0, sizeof(req_hdr));
	req_hdr.flags = ISER_ZBVA_NOT_SUP;
	if (!device->remote_inv_sup)
		req_hdr.flags |= ISER_SEND_W_INV_NOT_SUP;
	conn_param.private_data	= (void *)&req_hdr;
	conn_param.private_data_len = sizeof(struct iser_cm_hdr);
	ret = rdma_connect(cma_id, &conn_param);
	if (ret) {
		iser_err("failure connecting: %d\n", ret);
		goto failure;
	}
	return;
failure:
	iser_connect_error(cma_id);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| or gerlitz | or gerlitz | 142 | 68.60% | 3 | 30.00% | 
| sagi grimberg | sagi grimberg | 41 | 19.81% | 4 | 40.00% | 
| jenny derzhavetz | jenny derzhavetz | 12 | 5.80% | 1 | 10.00% | 
| ariel nahum | ariel nahum | 12 | 5.80% | 2 | 20.00% | 
 | Total | 207 | 100.00% | 10 | 100.00% | 
static void iser_connected_handler(struct rdma_cm_id *cma_id,
				   const void *private_data)
{
	struct iser_conn *iser_conn;
	struct ib_qp_attr attr;
	struct ib_qp_init_attr init_attr;
	iser_conn = (struct iser_conn *)cma_id->context;
	if (iser_conn->state != ISER_CONN_PENDING)
		/* bailout */
		return;
	(void)ib_query_qp(cma_id->qp, &attr, ~0, &init_attr);
	iser_info("remote qpn:%x my qpn:%x\n", attr.dest_qp_num, cma_id->qp->qp_num);
	if (private_data) {
		u8 flags = *(u8 *)private_data;
		iser_conn->snd_w_inv = !(flags & ISER_SEND_W_INV_NOT_SUP);
	}
	iser_info("conn %p: negotiated %s invalidation\n",
		  iser_conn, iser_conn->snd_w_inv ? "remote" : "local");
	iser_conn->state = ISER_CONN_UP;
	complete(&iser_conn->up_completion);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| or gerlitz | or gerlitz | 64 | 45.71% | 2 | 33.33% | 
| jenny derzhavetz | jenny derzhavetz | 47 | 33.57% | 1 | 16.67% | 
| ariel nahum | ariel nahum | 24 | 17.14% | 2 | 33.33% | 
| sagi grimberg | sagi grimberg | 5 | 3.57% | 1 | 16.67% | 
 | Total | 140 | 100.00% | 6 | 100.00% | 
static void iser_disconnected_handler(struct rdma_cm_id *cma_id)
{
	struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context;
	if (iser_conn_terminate(iser_conn)) {
		if (iser_conn->iscsi_conn)
			iscsi_conn_failure(iser_conn->iscsi_conn,
					   ISCSI_ERR_CONN_FAILED);
		else
			iser_err("iscsi_iser connection isn't bound\n");
	}
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| or gerlitz | or gerlitz | 33 | 60.00% | 1 | 14.29% | 
| roi dayan | roi dayan | 12 | 21.82% | 1 | 14.29% | 
| sagi grimberg | sagi grimberg | 8 | 14.55% | 3 | 42.86% | 
| ariel nahum | ariel nahum | 2 | 3.64% | 2 | 28.57% | 
 | Total | 55 | 100.00% | 7 | 100.00% | 
static void iser_cleanup_handler(struct rdma_cm_id *cma_id,
				 bool destroy)
{
	struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context;
	/*
         * We are not guaranteed that we visited disconnected_handler
         * by now, call it here to be safe that we handle CM drep
         * and flush errors.
         */
	iser_disconnected_handler(cma_id);
	iser_free_ib_conn_res(iser_conn, destroy);
	complete(&iser_conn->ib_completion);
}Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| sagi grimberg | sagi grimberg | 39 | 79.59% | 3 | 60.00% | 
| or gerlitz | or gerlitz | 9 | 18.37% | 1 | 20.00% | 
| ariel nahum | ariel nahum | 1 | 2.04% | 1 | 20.00% | 
 | Total | 49 | 100.00% | 5 | 100.00% | 
;
static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
{
	struct iser_conn *iser_conn;
	int ret = 0;
	iser_conn = (struct iser_conn *)cma_id->context;
	iser_info("%s (%d): status %d conn %p id %p\n",
		  rdma_event_msg(event->event), event->event,
		  event->status, cma_id->context, cma_id);
	mutex_lock(&iser_conn->state_mutex);
	switch (event->event) {
	case RDMA_CM_EVENT_ADDR_RESOLVED:
		iser_addr_handler(cma_id);
		break;
	case RDMA_CM_EVENT_ROUTE_RESOLVED:
		iser_route_handler(cma_id);
		break;
	case RDMA_CM_EVENT_ESTABLISHED:
		iser_connected_handler(cma_id, event->param.conn.private_data);
		break;
	case RDMA_CM_EVENT_ADDR_ERROR:
	case RDMA_CM_EVENT_ROUTE_ERROR:
	case RDMA_CM_EVENT_CONNECT_ERROR:
	case RDMA_CM_EVENT_UNREACHABLE:
	case RDMA_CM_EVENT_REJECTED:
		iser_connect_error(cma_id);
		break;
	case RDMA_CM_EVENT_DISCONNECTED:
	case RDMA_CM_EVENT_ADDR_CHANGE:
	case RDMA_CM_EVENT_TIMEWAIT_EXIT:
		iser_cleanup_handler(cma_id, false);
		break;
	case RDMA_CM_EVENT_DEVICE_REMOVAL:
		/*
                 * we *must* destroy the device as we cannot rely
                 * on iscsid to be around to initiate error handling.
                 * also if we are not in state DOWN implicitly destroy
                 * the cma_id.
                 */
		iser_cleanup_handler(cma_id, true);
		if (iser_conn->state != ISER_CONN_DOWN) {
			iser_conn->ib_conn.cma_id = NULL;
			ret = 1;
		}
		break;
	default:
		iser_err("Unexpected RDMA CM event: %s (%d)\n",
			 rdma_event_msg(event->event), event->event);
		break;
	}
	mutex_unlock(&iser_conn->state_mutex);
	return ret;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| or gerlitz | or gerlitz | 101 | 46.98% | 3 | 27.27% | 
| sagi grimberg | sagi grimberg | 52 | 24.19% | 3 | 27.27% | 
| ariel nahum | ariel nahum | 45 | 20.93% | 2 | 18.18% | 
| erez zilber | erez zilber | 8 | 3.72% | 1 | 9.09% | 
| jenny derzhavetz | jenny derzhavetz | 8 | 3.72% | 1 | 9.09% | 
| roi dayan | roi dayan | 1 | 0.47% | 1 | 9.09% | 
 | Total | 215 | 100.00% | 11 | 100.00% | 
void iser_conn_init(struct iser_conn *iser_conn)
{
	struct ib_conn *ib_conn = &iser_conn->ib_conn;
	iser_conn->state = ISER_CONN_INIT;
	init_completion(&iser_conn->stop_completion);
	init_completion(&iser_conn->ib_completion);
	init_completion(&iser_conn->up_completion);
	INIT_LIST_HEAD(&iser_conn->conn_list);
	mutex_init(&iser_conn->state_mutex);
	ib_conn->post_recv_buf_count = 0;
	ib_conn->reg_cqe.done = iser_reg_comp;
}
 Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| christoph hellwig | christoph hellwig | 24 | 30.00% | 1 | 9.09% | 
| ariel nahum | ariel nahum | 21 | 26.25% | 3 | 27.27% | 
| sagi grimberg | sagi grimberg | 15 | 18.75% | 4 | 36.36% | 
| or gerlitz | or gerlitz | 14 | 17.50% | 1 | 9.09% | 
| mike christie | mike christie | 6 | 7.50% | 2 | 18.18% | 
 | Total | 80 | 100.00% | 11 | 100.00% | 
/**
 * starts the process of connecting to the target
 * sleeps until the connection is established or rejected
 */
int iser_connect(struct iser_conn   *iser_conn,
		 struct sockaddr    *src_addr,
		 struct sockaddr    *dst_addr,
		 int                 non_blocking)
{
	struct ib_conn *ib_conn = &iser_conn->ib_conn;
	int err = 0;
	mutex_lock(&iser_conn->state_mutex);
	sprintf(iser_conn->name, "%pISp", dst_addr);
	iser_info("connecting to: %s\n", iser_conn->name);
	/* the device is known only --after-- address resolution */
	ib_conn->device = NULL;
	iser_conn->state = ISER_CONN_PENDING;
	ib_conn->cma_id = rdma_create_id(&init_net, iser_cma_handler,
					 (void *)iser_conn,
					 RDMA_PS_TCP, IB_QPT_RC);
	if (IS_ERR(ib_conn->cma_id)) {
		err = PTR_ERR(ib_conn->cma_id);
		iser_err("rdma_create_id failed: %d\n", err);
		goto id_failure;
	}
	err = rdma_resolve_addr(ib_conn->cma_id, src_addr, dst_addr, 1000);
	if (err) {
		iser_err("rdma_resolve_addr failed: %d\n", err);
		goto addr_failure;
	}
	if (!non_blocking) {
		wait_for_completion_interruptible(&iser_conn->up_completion);
		if (iser_conn->state != ISER_CONN_UP) {
			err =  -EIO;
			goto connect_failure;
		}
	}
	mutex_unlock(&iser_conn->state_mutex);
	mutex_lock(&ig.connlist_mutex);
	list_add(&iser_conn->conn_list, &ig.connlist);
	mutex_unlock(&ig.connlist_mutex);
	return 0;
id_failure:
	ib_conn->cma_id = NULL;
addr_failure:
	iser_conn->state = ISER_CONN_DOWN;
connect_failure:
	mutex_unlock(&iser_conn->state_mutex);
	iser_conn_release(iser_conn);
	return err;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| or gerlitz | or gerlitz | 199 | 73.98% | 1 | 11.11% | 
| sagi grimberg | sagi grimberg | 29 | 10.78% | 2 | 22.22% | 
| ariel nahum | ariel nahum | 25 | 9.29% | 3 | 33.33% | 
| roi dayan | roi dayan | 11 | 4.09% | 1 | 11.11% | 
| guy shapiro | guy shapiro | 3 | 1.12% | 1 | 11.11% | 
| sean hefty | sean hefty | 2 | 0.74% | 1 | 11.11% | 
 | Total | 269 | 100.00% | 9 | 100.00% | 
int iser_post_recvl(struct iser_conn *iser_conn)
{
	struct ib_conn *ib_conn = &iser_conn->ib_conn;
	struct iser_login_desc *desc = &iser_conn->login_desc;
	struct ib_recv_wr wr, *wr_failed;
	int ib_ret;
	desc->sge.addr = desc->rsp_dma;
	desc->sge.length = ISER_RX_LOGIN_SIZE;
	desc->sge.lkey = ib_conn->device->pd->local_dma_lkey;
	desc->cqe.done = iser_login_rsp;
	wr.wr_cqe = &desc->cqe;
	wr.sg_list = &desc->sge;
	wr.num_sge = 1;
	wr.next = NULL;
	ib_conn->post_recv_buf_count++;
	ib_ret = ib_post_recv(ib_conn->qp, &wr, &wr_failed);
	if (ib_ret) {
		iser_err("ib_post_recv failed ret=%d\n", ib_ret);
		ib_conn->post_recv_buf_count--;
	}
	return ib_ret;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| or gerlitz | or gerlitz | 93 | 61.59% | 3 | 37.50% | 
| sagi grimberg | sagi grimberg | 34 | 22.52% | 3 | 37.50% | 
| christoph hellwig | christoph hellwig | 22 | 14.57% | 1 | 12.50% | 
| jason gunthorpe | jason gunthorpe | 2 | 1.32% | 1 | 12.50% | 
 | Total | 151 | 100.00% | 8 | 100.00% | 
int iser_post_recvm(struct iser_conn *iser_conn, int count)
{
	struct ib_conn *ib_conn = &iser_conn->ib_conn;
	unsigned int my_rx_head = iser_conn->rx_desc_head;
	struct iser_rx_desc *rx_desc;
	struct ib_recv_wr *wr, *wr_failed;
	int i, ib_ret;
	for (wr = ib_conn->rx_wr, i = 0; i < count; i++, wr++) {
		rx_desc = &iser_conn->rx_descs[my_rx_head];
		rx_desc->cqe.done = iser_task_rsp;
		wr->wr_cqe = &rx_desc->cqe;
		wr->sg_list = &rx_desc->rx_sg;
		wr->num_sge = 1;
		wr->next = wr + 1;
		my_rx_head = (my_rx_head + 1) & iser_conn->qp_max_recv_dtos_mask;
	}
	wr--;
	wr->next = NULL; /* mark end of work requests list */
	ib_conn->post_recv_buf_count += count;
	ib_ret = ib_post_recv(ib_conn->qp, ib_conn->rx_wr, &wr_failed);
	if (ib_ret) {
		iser_err("ib_post_recv failed ret=%d\n", ib_ret);
		ib_conn->post_recv_buf_count -= count;
	} else
		iser_conn->rx_desc_head = my_rx_head;
	return ib_ret;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| or gerlitz | or gerlitz | 140 | 71.43% | 3 | 42.86% | 
| christoph hellwig | christoph hellwig | 35 | 17.86% | 1 | 14.29% | 
| sagi grimberg | sagi grimberg | 19 | 9.69% | 2 | 28.57% | 
| shlomo pongratz | shlomo pongratz | 2 | 1.02% | 1 | 14.29% | 
 | Total | 196 | 100.00% | 7 | 100.00% | 
/**
 * iser_start_send - Initiate a Send DTO operation
 *
 * returns 0 on success, -1 on failure
 */
int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc,
		   bool signal)
{
	struct ib_send_wr *bad_wr, *wr = iser_tx_next_wr(tx_desc);
	int ib_ret;
	ib_dma_sync_single_for_device(ib_conn->device->ib_device,
				      tx_desc->dma_addr, ISER_HEADERS_LEN,
				      DMA_TO_DEVICE);
	wr->next = NULL;
	wr->wr_cqe = &tx_desc->cqe;
	wr->sg_list = tx_desc->tx_sg;
	wr->num_sge = tx_desc->num_sge;
	wr->opcode = IB_WR_SEND;
	wr->send_flags = signal ? IB_SEND_SIGNALED : 0;
	ib_ret = ib_post_send(ib_conn->qp, &tx_desc->wrs[0].send, &bad_wr);
	if (ib_ret)
		iser_err("ib_post_send failed, ret:%d opcode:%d\n",
			 ib_ret, bad_wr->opcode);
	return ib_ret;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| or gerlitz | or gerlitz | 86 | 62.32% | 2 | 28.57% | 
| sagi grimberg | sagi grimberg | 46 | 33.33% | 3 | 42.86% | 
| christoph hellwig | christoph hellwig | 6 | 4.35% | 2 | 28.57% | 
 | Total | 138 | 100.00% | 7 | 100.00% | 
u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
			     enum iser_data_dir cmd_dir, sector_t *sector)
{
	struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir];
	struct iser_fr_desc *desc = reg->mem_h;
	unsigned long sector_size = iser_task->sc->device->sector_size;
	struct ib_mr_status mr_status;
	int ret;
	if (desc && desc->pi_ctx->sig_protected) {
		desc->pi_ctx->sig_protected = 0;
		ret = ib_check_mr_status(desc->pi_ctx->sig_mr,
					 IB_MR_CHECK_SIG_STATUS, &mr_status);
		if (ret) {
			pr_err("ib_check_mr_status failed, ret %d\n", ret);
			goto err;
		}
		if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
			sector_t sector_off = mr_status.sig_err.sig_err_offset;
			sector_div(sector_off, sector_size + 8);
			*sector = scsi_get_lba(iser_task->sc) + sector_off;
			pr_err("PI error found type %d at sector %llx "
			       "expected %x vs actual %x\n",
			       mr_status.sig_err.err_type,
			       (unsigned long long)*sector,
			       mr_status.sig_err.expected,
			       mr_status.sig_err.actual);
			switch (mr_status.sig_err.err_type) {
			case IB_SIG_BAD_GUARD:
				return 0x1;
			case IB_SIG_BAD_REFTAG:
				return 0x3;
			case IB_SIG_BAD_APPTAG:
				return 0x2;
			}
		}
	}
	return 0;
err:
	/* Not alot we can do here, return ambiguous guard error */
	return 0x1;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| sagi grimberg | sagi grimberg | 134 | 60.91% | 6 | 54.55% | 
| christoph hellwig | christoph hellwig | 71 | 32.27% | 1 | 9.09% | 
| or gerlitz | or gerlitz | 8 | 3.64% | 2 | 18.18% | 
| randy dunlap | randy dunlap | 6 | 2.73% | 1 | 9.09% | 
| arnd bergmann | arnd bergmann | 1 | 0.45% | 1 | 9.09% | 
 | Total | 220 | 100.00% | 11 | 100.00% | 
void iser_err_comp(struct ib_wc *wc, const char *type)
{
	if (wc->status != IB_WC_WR_FLUSH_ERR) {
		struct iser_conn *iser_conn = to_iser_conn(wc->qp->qp_context);
		iser_err("%s failure: %s (%d) vend_err %x\n", type,
			 ib_wc_status_msg(wc->status), wc->status,
			 wc->vendor_err);
		if (iser_conn->iscsi_conn)
			iscsi_conn_failure(iser_conn->iscsi_conn,
					   ISCSI_ERR_CONN_FAILED);
	} else {
		iser_dbg("%s failure: %s (%d)\n", type,
			 ib_wc_status_msg(wc->status), wc->status);
	}
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| christoph hellwig | christoph hellwig | 97 | 100.00% | 1 | 100.00% | 
 | Total | 97 | 100.00% | 1 | 100.00% | 
Overall Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| or gerlitz | or gerlitz | 1804 | 36.34% | 13 | 13.40% | 
| sagi grimberg | sagi grimberg | 1747 | 35.19% | 42 | 43.30% | 
| alex tabachnik | alex tabachnik | 345 | 6.95% | 3 | 3.09% | 
| ariel nahum | ariel nahum | 266 | 5.36% | 8 | 8.25% | 
| christoph hellwig | christoph hellwig | 263 | 5.30% | 2 | 2.06% | 
| adir lev | adir lev | 119 | 2.40% | 1 | 1.03% | 
| minh duc tran | minh duc tran | 94 | 1.89% | 1 | 1.03% | 
| shlomo pongratz | shlomo pongratz | 85 | 1.71% | 2 | 2.06% | 
| jenny derzhavetz | jenny derzhavetz | 69 | 1.39% | 2 | 2.06% | 
| roi dayan | roi dayan | 60 | 1.21% | 5 | 5.15% | 
| roland dreier | roland dreier | 46 | 0.93% | 1 | 1.03% | 
| arne redlich | arne redlich | 26 | 0.52% | 2 | 2.06% | 
| erez zilber | erez zilber | 9 | 0.18% | 2 | 2.06% | 
| randy dunlap | randy dunlap | 6 | 0.12% | 1 | 1.03% | 
| mike christie | mike christie | 6 | 0.12% | 2 | 2.06% | 
| dan carpenter | dan carpenter | 3 | 0.06% | 1 | 1.03% | 
| guy shapiro | guy shapiro | 3 | 0.06% | 1 | 1.03% | 
| tejun heo | tejun heo | 3 | 0.06% | 1 | 1.03% | 
| sean hefty | sean hefty | 2 | 0.04% | 1 | 1.03% | 
| steve wise | steve wise | 2 | 0.04% | 1 | 1.03% | 
| jason gunthorpe | jason gunthorpe | 2 | 0.04% | 1 | 1.03% | 
| arnd bergmann | arnd bergmann | 1 | 0.02% | 1 | 1.03% | 
| thadeu lima de souza cascardo | thadeu lima de souza cascardo | 1 | 0.02% | 1 | 1.03% | 
| doug ledford | doug ledford | 1 | 0.02% | 1 | 1.03% | 
| oliver pinter | oliver pinter | 1 | 0.02% | 1 | 1.03% | 
 | Total | 4964 | 100.00% | 97 | 100.00% | 
  
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.