Release 4.14 net/rds/ib_cm.c
/*
* Copyright (c) 2006 Oracle. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/kernel.h>
#include <linux/in.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/ratelimit.h>
#include "rds_single_path.h"
#include "rds.h"
#include "ib.h"
/*
* Set the selected protocol version
*/
static void rds_ib_set_protocol(struct rds_connection *conn, unsigned int version)
{
conn->c_version = version;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andy Grover | 21 | 100.00% | 1 | 100.00% |
Total | 21 | 100.00% | 1 | 100.00% |
/*
* Set up flow control
*/
static void rds_ib_set_flow_control(struct rds_connection *conn, u32 credits)
{
struct rds_ib_connection *ic = conn->c_transport_data;
if (rds_ib_sysctl_flow_control && credits != 0) {
/* We're doing flow control */
ic->i_flowctl = 1;
rds_ib_send_add_credits(conn, credits);
} else {
ic->i_flowctl = 0;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andy Grover | 56 | 100.00% | 1 | 100.00% |
Total | 56 | 100.00% | 1 | 100.00% |
/*
* Tune RNR behavior. Without flow control, we use a rather
* low timeout, but not the absolute minimum - this should
* be tunable.
*
* We already set the RNR retry count to 7 (which is the
* smallest infinite number :-) above.
* If flow control is off, we want to change this back to 0
* so that we learn quickly when our credit accounting is
* buggy.
*
* Caller passes in a qp_attr pointer - don't waste stack spacv
* by allocation this twice.
*/
static void
rds_ib_tune_rnr(struct rds_ib_connection *ic, struct ib_qp_attr *attr)
{
int ret;
attr->min_rnr_timer = IB_RNR_TIMER_000_32;
ret = ib_modify_qp(ic->i_cm_id->qp, attr, IB_QP_MIN_RNR_TIMER);
if (ret)
printk(KERN_NOTICE "ib_modify_qp(IB_QP_MIN_RNR_TIMER): err=%d\n", -ret);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andy Grover | 53 | 100.00% | 1 | 100.00% |
Total | 53 | 100.00% | 1 | 100.00% |
/*
* Connection established.
* We get here for both outgoing and incoming connection.
*/
void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_event *event)
{
const struct rds_ib_connect_private *dp = NULL;
struct rds_ib_connection *ic = conn->c_transport_data;
struct ib_qp_attr qp_attr;
int err;
if (event->param.conn.private_data_len >= sizeof(*dp)) {
dp = event->param.conn.private_data;
/* make sure it isn't empty data */
if (dp->dp_protocol_major) {
rds_ib_set_protocol(conn,
RDS_PROTOCOL(dp->dp_protocol_major,
dp->dp_protocol_minor));
rds_ib_set_flow_control(conn, be32_to_cpu(dp->dp_credit));
}
}
if (conn->c_version < RDS_PROTOCOL(3, 1)) {
pr_notice("RDS/IB: Connection <%pI4,%pI4> version %u.%u no longer supported\n",
&conn->c_laddr, &conn->c_faddr,
RDS_PROTOCOL_MAJOR(conn->c_version),
RDS_PROTOCOL_MINOR(conn->c_version));
rds_conn_destroy(conn);
return;
} else {
pr_notice("RDS/IB: %s conn connected <%pI4,%pI4> version %u.%u%s\n",
ic->i_active_side ? "Active" : "Passive",
&conn->c_laddr, &conn->c_faddr,
RDS_PROTOCOL_MAJOR(conn->c_version),
RDS_PROTOCOL_MINOR(conn->c_version),
ic->i_flowctl ? ", flow control" : "");
}
atomic_set(&ic->i_cq_quiesce, 0);
/* Init rings and fill recv. this needs to wait until protocol
* negotiation is complete, since ring layout is different
* from 3.1 to 4.1.
*/
rds_ib_send_init_ring(ic);
rds_ib_recv_init_ring(ic);
/* Post receive buffers - as a side effect, this will update
* the posted credit count. */
rds_ib_recv_refill(conn, 1, GFP_KERNEL);
/* Tune RNR behavior */
rds_ib_tune_rnr(ic, &qp_attr);
qp_attr.qp_state = IB_QPS_RTS;
err = ib_modify_qp(ic->i_cm_id->qp, &qp_attr, IB_QP_STATE);
if (err)
printk(KERN_NOTICE "ib_modify_qp(IB_QP_STATE, RTS): err=%d\n", err);
/* update ib_device with this local ipaddr */
err = rds_ib_update_ipaddr(ic->rds_ibdev, conn->c_laddr);
if (err)
printk(KERN_ERR "rds_ib_update_ipaddr failed (%d)\n",
err);
/* If the peer gave us the last packet it saw, process this as if
* we had received a regular ACK. */
if (dp) {
/* dp structure start is not guaranteed to be 8 bytes aligned.
* Since dp_ack_seq is 64-bit extended load operations can be
* used so go through get_unaligned to avoid unaligned errors.
*/
__be64 dp_ack_seq = get_unaligned(&dp->dp_ack_seq);
if (dp_ack_seq)
rds_send_drop_acked(conn, be64_to_cpu(dp_ack_seq),
NULL);
}
rds_connect_complete(conn);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andy Grover | 289 | 84.50% | 6 | 46.15% |
Santosh Shilimkar | 35 | 10.23% | 4 | 30.77% |
shamir rabinovitch | 14 | 4.09% | 1 | 7.69% |
Zach Brown | 3 | 0.88% | 1 | 7.69% |
David Ahern | 1 | 0.29% | 1 | 7.69% |
Total | 342 | 100.00% | 13 | 100.00% |
static void rds_ib_cm_fill_conn_param(struct rds_connection *conn,
struct rdma_conn_param *conn_param,
struct rds_ib_connect_private *dp,
u32 protocol_version,
u32 max_responder_resources,
u32 max_initiator_depth)
{
struct rds_ib_connection *ic = conn->c_transport_data;
struct rds_ib_device *rds_ibdev = ic->rds_ibdev;
memset(conn_param, 0, sizeof(struct rdma_conn_param));
conn_param->responder_resources =
min_t(u32, rds_ibdev->max_responder_resources, max_responder_resources);
conn_param->initiator_depth =
min_t(u32, rds_ibdev->max_initiator_depth, max_initiator_depth);
conn_param->retry_count = min_t(unsigned int, rds_ib_retry_count, 7);
conn_param->rnr_retry_count = 7;
if (dp) {
memset(dp, 0, sizeof(*dp));
dp->dp_saddr = conn->c_laddr;
dp->dp_daddr = conn->c_faddr;
dp->dp_protocol_major = RDS_PROTOCOL_MAJOR(protocol_version);
dp->dp_protocol_minor = RDS_PROTOCOL_MINOR(protocol_version);
dp->dp_protocol_minor_mask = cpu_to_be16(RDS_IB_SUPPORTED_PROTOCOLS);
dp->dp_ack_seq = cpu_to_be64(rds_ib_piggyb_ack(ic));
/* Advertise flow control */
if (ic->i_flowctl) {
unsigned int credits;
credits = IB_GET_POST_CREDITS(atomic_read(&ic->i_credits));
dp->dp_credit = cpu_to_be32(credits);
atomic_sub(IB_SET_POST_CREDITS(credits), &ic->i_credits);
}
conn_param->private_data = dp;
conn_param->private_data_len = sizeof(*dp);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andy Grover | 241 | 97.18% | 3 | 60.00% |
Zach Brown | 4 | 1.61% | 1 | 20.00% |
Qing Huang | 3 | 1.21% | 1 | 20.00% |
Total | 248 | 100.00% | 5 | 100.00% |
static void rds_ib_cq_event_handler(struct ib_event *event, void *data)
{
rdsdebug("event %u (%s) data %p\n",
event->event, ib_event_msg(event->event), data);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andy Grover | 25 | 75.76% | 1 | 33.33% |
Zach Brown | 7 | 21.21% | 1 | 33.33% |
Sagi Grimberg | 1 | 3.03% | 1 | 33.33% |
Total | 33 | 100.00% | 3 | 100.00% |
/* Plucking the oldest entry from the ring can be done concurrently with
* the thread refilling the ring. Each ring operation is protected by
* spinlocks and the transient state of refilling doesn't change the
* recording of which entry is oldest.
*
* This relies on IB only calling one cq comp_handler for each cq so that
* there will only be one caller of rds_recv_incoming() per RDS connection.
*/
static void rds_ib_cq_comp_handler_recv(struct ib_cq *cq, void *context)
{
struct rds_connection *conn = context;
struct rds_ib_connection *ic = conn->c_transport_data;
rdsdebug("conn %p cq %p\n", conn, cq);
rds_ib_stats_inc(s_ib_evt_handler_call);
tasklet_schedule(&ic->i_recv_tasklet);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Santosh Shilimkar | 53 | 100.00% | 1 | 100.00% |
Total | 53 | 100.00% | 1 | 100.00% |
static void poll_scq(struct rds_ib_connection *ic, struct ib_cq *cq,
struct ib_wc *wcs)
{
int nr, i;
struct ib_wc *wc;
while ((nr = ib_poll_cq(cq, RDS_IB_WC_MAX, wcs)) > 0) {
for (i = 0; i < nr; i++) {
wc = wcs + i;
rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n",
(unsigned long long)wc->wr_id, wc->status,
wc->byte_len, be32_to_cpu(wc->ex.imm_data));
if (wc->wr_id <= ic->i_send_ring.w_nr ||
wc->wr_id == RDS_IB_ACK_WR_ID)
rds_ib_send_cqe_handler(ic, wc);
else
rds_ib_mr_cqe_handler(ic, wc);
}
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Santosh Shilimkar | 109 | 80.74% | 3 | 75.00% |
Avinash Repaka | 26 | 19.26% | 1 | 25.00% |
Total | 135 | 100.00% | 4 | 100.00% |
static void rds_ib_tasklet_fn_send(unsigned long data)
{
struct rds_ib_connection *ic = (struct rds_ib_connection *)data;
struct rds_connection *conn = ic->conn;
rds_ib_stats_inc(s_ib_tasklet_call);
/* if cq has been already reaped, ignore incoming cq event */
if (atomic_read(&ic->i_cq_quiesce))
return;
poll_scq(ic, ic->i_send_cq, ic->i_send_wc);
ib_req_notify_cq(ic->i_send_cq, IB_CQ_NEXT_COMP);
poll_scq(ic, ic->i_send_cq, ic->i_send_wc);
if (rds_conn_up(conn) &&
(!test_bit(RDS_LL_SEND_FULL, &conn->c_flags) ||
test_bit(0, &conn->c_map_queued)))
rds_send_xmit(&ic->conn->c_path[0]);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Santosh Shilimkar | 120 | 95.24% | 3 | 75.00% |
Sowmini Varadhan | 6 | 4.76% | 1 | 25.00% |
Total | 126 | 100.00% | 4 | 100.00% |
static void poll_rcq(struct rds_ib_connection *ic, struct ib_cq *cq,
struct ib_wc *wcs,
struct rds_ib_ack_state *ack_state)
{
int nr, i;
struct ib_wc *wc;
while ((nr = ib_poll_cq(cq, RDS_IB_WC_MAX, wcs)) > 0) {
for (i = 0; i < nr; i++) {
wc = wcs + i;
rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n",
(unsigned long long)wc->wr_id, wc->status,
wc->byte_len, be32_to_cpu(wc->ex.imm_data));
rds_ib_recv_cqe_handler(ic, wc, ack_state);
}
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Santosh Shilimkar | 116 | 100.00% | 1 | 100.00% |
Total | 116 | 100.00% | 1 | 100.00% |
static void rds_ib_tasklet_fn_recv(unsigned long data)
{
struct rds_ib_connection *ic = (struct rds_ib_connection *)data;
struct rds_connection *conn = ic->conn;
struct rds_ib_device *rds_ibdev = ic->rds_ibdev;
struct rds_ib_ack_state state;
if (!rds_ibdev)
rds_conn_drop(conn);
rds_ib_stats_inc(s_ib_tasklet_call);
/* if cq has been already reaped, ignore incoming cq event */
if (atomic_read(&ic->i_cq_quiesce))
return;
memset(&state, 0, sizeof(state));
poll_rcq(ic, ic->i_recv_cq, ic->i_recv_wc, &state);
ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED);
poll_rcq(ic, ic->i_recv_cq, ic->i_recv_wc, &state);
if (state.ack_next_valid)
rds_ib_set_ack(ic, state.ack_next, state.ack_required);
if (state.ack_recv_valid && state.ack_recv > ic->i_ack_recv) {
rds_send_drop_acked(conn, state.ack_recv, NULL);
ic->i_ack_recv = state.ack_recv;
}
if (rds_conn_up(conn))
rds_ib_attempt_ack(ic);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Santosh Shilimkar | 191 | 100.00% | 4 | 100.00% |
Total | 191 | 100.00% | 4 | 100.00% |
static void rds_ib_qp_event_handler(struct ib_event *event, void *data)
{
struct rds_connection *conn = data;
struct rds_ib_connection *ic = conn->c_transport_data;
rdsdebug("conn %p ic %p event %u (%s)\n", conn, ic, event->event,
ib_event_msg(event->event));
switch (event->event) {
case IB_EVENT_COMM_EST:
rdma_notify(ic->i_cm_id, IB_EVENT_COMM_EST);
break;
default:
rdsdebug("Fatal QP Event %u (%s) "
"- connection %pI4->%pI4, reconnecting\n",
event->event, ib_event_msg(event->event),
&conn->c_laddr, &conn->c_faddr);
rds_conn_drop(conn);
break;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andy Grover | 90 | 84.91% | 3 | 60.00% |
Zach Brown | 14 | 13.21% | 1 | 20.00% |
Sagi Grimberg | 2 | 1.89% | 1 | 20.00% |
Total | 106 | 100.00% | 5 | 100.00% |
static void rds_ib_cq_comp_handler_send(struct ib_cq *cq, void *context)
{
struct rds_connection *conn = context;
struct rds_ib_connection *ic = conn->c_transport_data;
rdsdebug("conn %p cq %p\n", conn, cq);
rds_ib_stats_inc(s_ib_evt_handler_call);
tasklet_schedule(&ic->i_send_tasklet);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Santosh Shilimkar | 53 | 100.00% | 1 | 100.00% |
Total | 53 | 100.00% | 1 | 100.00% |
static inline int ibdev_get_unused_vector(struct rds_ib_device *rds_ibdev)
{
int min = rds_ibdev->vector_load[rds_ibdev->dev->num_comp_vectors - 1];
int index = rds_ibdev->dev->num_comp_vectors - 1;
int i;
for (i = rds_ibdev->dev->num_comp_vectors - 1; i >= 0; i--) {
if (rds_ibdev->vector_load[i] < min) {
index = i;
min = rds_ibdev->vector_load[i];
}
}
rds_ibdev->vector_load[index]++;
return index;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Santosh Shilimkar | 100 | 100.00% | 1 | 100.00% |
Total | 100 | 100.00% | 1 | 100.00% |
static inline void ibdev_put_vector(struct rds_ib_device *rds_ibdev, int index)
{
rds_ibdev->vector_load[index]--;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Santosh Shilimkar | 23 | 100.00% | 1 | 100.00% |
Total | 23 | 100.00% | 1 | 100.00% |
/*
* This needs to be very careful to not leave IS_ERR pointers around for
* cleanup to trip over.
*/
static int rds_ib_setup_qp(struct rds_connection *conn)
{
struct rds_ib_connection *ic = conn->c_transport_data;
struct ib_device *dev = ic->i_cm_id->device;
struct ib_qp_init_attr attr;
struct ib_cq_init_attr cq_attr = {};
struct rds_ib_device *rds_ibdev;
int ret, fr_queue_space;
/*
* It's normal to see a null device if an incoming connection races
* with device removal, so we don't print a warning.
*/
rds_ibdev = rds_ib_get_client_data(dev);
if (!rds_ibdev)
return -EOPNOTSUPP;
/* The fr_queue_space is currently set to 512, to add extra space on
* completion queue and send queue. This extra space is used for FRMR
* registration and invalidation work requests
*/
fr_queue_space = rds_ibdev->use_fastreg ?
(RDS_IB_DEFAULT_FR_WR + 1) +
(RDS_IB_DEFAULT_FR_INV_WR + 1)
: 0;
/* add the conn now so that connection establishment has the dev */
rds_ib_add_conn(rds_ibdev, conn);
if (rds_ibdev->max_wrs < ic->i_send_ring.w_nr + 1)
rds_ib_ring_resize(&ic->i_send_ring, rds_ibdev->max_wrs - 1);
if (rds_ibdev->max_wrs < ic->i_recv_ring.w_nr + 1)
rds_ib_ring_resize(&ic->i_recv_ring, rds_ibdev->max_wrs - 1);
/* Protection domain and memory range */
ic->i_pd = rds_ibdev->pd;
ic->i_scq_vector = ibdev_get_unused_vector(rds_ibdev);
cq_attr.cqe = ic->i_send_ring.w_nr + fr_queue_space + 1;
cq_attr.comp_vector = ic->i_scq_vector;
ic->i_send_cq = ib_create_cq(dev, rds_ib_cq_comp_handler_send,
rds_ib_cq_event_handler, conn,
&cq_attr);
if (IS_ERR(ic->i_send_cq)) {
ret = PTR_ERR(ic->i_send_cq);
ic->i_send_cq = NULL;
ibdev_put_vector(rds_ibdev, ic->i_scq_vector);
rdsdebug("ib_create_cq send failed: %d\n", ret);
goto rds_ibdev_out;
}
ic->i_rcq_vector = ibdev_get_unused_vector(rds_ibdev);
cq_attr.cqe = ic->i_recv_ring.w_nr;
cq_attr.comp_vector = ic->i_rcq_vector;
ic->i_recv_cq = ib_create_cq(dev, rds_ib_cq_comp_handler_recv,
rds_ib_cq_event_handler, conn,
&cq_attr);
if (IS_ERR(ic->i_recv_cq)) {
ret = PTR_ERR(ic->i_recv_cq);
ic->i_recv_cq = NULL;
ibdev_put_vector(rds_ibdev, ic->i_rcq_vector);
rdsdebug("ib_create_cq recv failed: %d\n", ret);
goto send_cq_out;
}
ret = ib_req_notify_cq(ic->i_send_cq, IB_CQ_NEXT_COMP);
if (ret) {
rdsdebug("ib_req_notify_cq send failed: %d\n", ret);
goto recv_cq_out;
}
ret = ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED);
if (ret) {
rdsdebug("ib_req_notify_cq recv failed: %d\n", ret);
goto recv_cq_out;
}
/* XXX negotiate max send/recv with remote? */
memset(&attr, 0, sizeof(attr));
attr.event_handler = rds_ib_qp_event_handler;
attr.qp_context = conn;
/* + 1 to allow for the single ack message */
attr.cap.max_send_wr = ic->i_send_ring.w_nr + fr_queue_space + 1;
attr.cap.max_recv_wr = ic->i_recv_ring.w_nr + 1;
attr.cap.max_send_sge = rds_ibdev->max_sge;
attr.cap.max_recv_sge = RDS_IB_RECV_SGE;
attr.sq_sig_type = IB_SIGNAL_REQ_WR;
attr.qp_type = IB_QPT_RC;
attr.send_cq = ic->i_send_cq;
attr.recv_cq = ic->i_recv_cq;
atomic_set(&ic->i_fastreg_wrs, RDS_IB_DEFAULT_FR_WR);
atomic_set(&ic->i_fastunreg_wrs, RDS_IB_DEFAULT_FR_INV_WR);
/*
* XXX this can fail if max_*_wr is too large? Are we supposed
* to back off until we get a value that the hardware can support?
*/
ret = rdma_create_qp(ic->i_cm_id, ic->i_pd, &attr);
if (ret) {
rdsdebug("rdma_create_qp failed: %d\n", ret);
goto recv_cq_out;
}
ic->i_send_hdrs = ib_dma_alloc_coherent(dev,
ic->i_send_ring.w_nr *
sizeof(struct rds_header),
&ic->i_send_hdrs_dma, GFP_KERNEL);
if (!ic->i_send_hdrs) {
ret = -ENOMEM;
rdsdebug("ib_dma_alloc_coherent send failed\n");
goto qp_out;
}
ic->i_recv_hdrs = ib_dma_alloc_coherent(dev,
ic->i_recv_ring.w_nr *
sizeof(struct rds_header),
&ic->i_recv_hdrs_dma, GFP_KERNEL);
if (!ic->i_recv_hdrs) {
ret = -ENOMEM;
rdsdebug("ib_dma_alloc_coherent recv failed\n");
goto send_hdrs_dma_out;
}
ic->i_ack = ib_dma_alloc_coherent(dev, sizeof(struct rds_header),
&ic->i_ack_dma, GFP_KERNEL);
if (!ic->i_ack) {
ret = -ENOMEM;
rdsdebug("ib_dma_alloc_coherent ack failed\n");
goto recv_hdrs_dma_out;
}
ic->i_sends = vzalloc_node(ic->i_send_ring.w_nr * sizeof(struct rds_ib_send_work),
ibdev_to_node(dev));
if (!ic->i_sends) {
ret = -ENOMEM;
rdsdebug("send allocation failed\n");
goto ack_dma_out;
}
ic->i_recvs = vzalloc_node(ic->i_recv_ring.w_nr * sizeof(struct rds_ib_recv_work),
ibdev_to_node(dev));
if (!ic->i_recvs) {
ret = -ENOMEM;
rdsdebug("recv allocation failed\n");
goto sends_out;
}
rds_ib_recv_init_ack(ic);
rdsdebug("conn %p pd %p cq %p %p\n", conn, ic->i_pd,
ic->i_send_cq, ic->i_recv_cq);
return ret;
sends_out:
vfree(ic->i_sends);
ack_dma_out:
ib_dma_free_coherent(dev, sizeof(struct rds_header),
ic->i_ack, ic->i_ack_dma);
recv_hdrs_dma_out:
ib_dma_free_coherent(dev, ic->i_recv_ring.w_nr *
sizeof(struct rds_header),
ic->i_recv_hdrs, ic->i_recv_hdrs_dma);
send_hdrs_dma_out:
ib_dma_free_coherent(dev, ic->i_send_ring.w_nr *
sizeof(struct rds_header),
ic->i_send_hdrs, ic->i_send_hdrs_dma);
qp_out:
rdma_destroy_qp(ic->i_cm_id);
recv_cq_out:
if (!ib_destroy_cq(ic->i_recv_cq))
ic->i_recv_cq = NULL;
send_cq_out:
if (!ib_destroy_cq(ic->i_send_cq))
ic->i_send_cq = NULL;
rds_ibdev_out:
rds_ib_remove_conn(rds_ibdev, conn);
rds_ib_dev_put(rds_ibdev);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andy Grover | 664 | 68.81% | 3 | 23.08% |
Zhu Yanjun | 150 | 15.54% | 1 | 7.69% |
Santosh Shilimkar | 101 | 10.47% | 5 | 38.46% |
Matan Barak | 32 | 3.32% | 1 | 7.69% |
Zach Brown | 15 | 1.55% | 1 | 7.69% |
Joe Perches | 2 | 0.21% | 1 | 7.69% |
Jason Gunthorpe | 1 | 0.10% | 1 | 7.69% |
Total | 965 | 100.00% | 13 | 100.00% |
static u32 rds_ib_protocol_compatible(struct rdma_cm_event *event)
{
const struct rds_ib_connect_private *dp = event->param.conn.private_data;
u16 common;
u32 version = 0;
/*
* rdma_cm private data is odd - when there is any private data in the
* request, we will be given a pretty large buffer without telling us the
* original size. The only way to tell the difference is by looking at
* the contents, which are initialized to zero.
* If the protocol version fields aren't set, this is a connection attempt
* from an older version. This could could be 3.0 or 2.0 - we can't tell.
* We really should have changed this for OFED 1.3 :-(
*/
/* Be paranoid. RDS always has privdata */
if (!event->param.conn.private_data_len) {
printk(KERN_NOTICE "RDS incoming connection has no private data, "
"rejecting\n");
return 0;
}
/* Even if len is crap *now* I still want to check it. -ASG */
if (event->param.conn.private_data_len < sizeof (*dp) ||
dp->dp_protocol_major == 0)
return RDS_PROTOCOL_3_0;
common = be16_to_cpu(dp->dp_protocol_minor_mask) & RDS_IB_SUPPORTED_PROTOCOLS;
if (dp->dp_protocol_major == 3 && common) {
version = RDS_PROTOCOL_3_0;
while ((common >>= 1) != 0)
version++;
} else
printk_ratelimited(KERN_NOTICE "RDS: Connection from %pI4 using incompatible protocol version %u.%u\n",
&dp->dp_saddr,
dp->dp_protocol_major,
dp->dp_protocol_minor);
return version;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andy Grover | 144 | 97.96% | 2 | 50.00% |
Mike Marciniszyn | 2 | 1.36% | 1 | 25.00% |
Manuel Zerpies | 1 | 0.68% | 1 | 25.00% |
Total | 147 | 100.00% | 4 | 100.00% |
int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id,
struct rdma_cm_event *event)
{
__be64 lguid = cm_id->route.path_rec->sgid.global.interface_id;
__be64 fguid = cm_id->route.path_rec->dgid.global.interface_id;
const struct rds_ib_connect_private *dp = event->param.conn.private_data;
struct rds_ib_connect_private dp_rep;
struct rds_connection *conn = NULL;
struct rds_ib_connection *ic = NULL;
struct rdma_conn_param conn_param;
u32 version;
int err = 1, destroy = 1;
/* Check whether the remote protocol version matches ours. */
version = rds_ib_protocol_compatible(event);
if (!version)
goto out;
rdsdebug("saddr %pI4 daddr %pI4 RDSv%u.%u lguid 0x%llx fguid "
"0x%llx\n", &dp->dp_saddr, &dp->dp_daddr,
RDS_PROTOCOL_MAJOR(version), RDS_PROTOCOL_MINOR(version),
(unsigned long long)be64_to_cpu(lguid),
(unsigned long long)be64_to_cpu(fguid));
/* RDS/IB is not currently netns aware, thus init_net */
conn = rds_conn_create(&init_net, dp->dp_daddr, dp->dp_saddr,
&rds_ib_transport, GFP_KERNEL);
if (IS_ERR(conn)) {
rdsdebug("rds_conn_create failed (%ld)\n", PTR_ERR(conn));
conn = NULL;
goto out;
}
/*
* The connection request may occur while the
* previous connection exist, e.g. in case of failover.
* But as connections may be initiated simultaneously
* by both hosts, we have a random backoff mechanism -
* see the comment above rds_queue_reconnect()
*/
mutex_lock(&conn->c_cm_lock);
if (!rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_CONNECTING)) {
if (rds_conn_state(conn) == RDS_CONN_UP) {
rdsdebug("incoming connect while connecting\n");
rds_conn_drop(conn);
rds_ib_stats_inc(s_ib_listen_closed_stale);
} else
if (rds_conn_state(conn) == RDS_CONN_CONNECTING) {
/* Wait and see - our connect may still be succeeding */
rds_ib_stats_inc(s_ib_connect_raced);
}
goto out;
}
ic = conn->c_transport_data;
rds_ib_set_protocol(conn, version);
rds_ib_set_flow_control(conn, be32_to_cpu(dp->dp_credit));
/* If the peer gave us the last packet it saw, process this as if
* we had received a regular ACK. */
if (dp->dp_ack_seq)
rds_send_drop_acked(