Release 4.12 include/rdma/ib_verbs.h
/*
* Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
* Copyright (c) 2004 Infinicon Corporation. All rights reserved.
* Copyright (c) 2004 Intel Corporation. All rights reserved.
* Copyright (c) 2004 Topspin Corporation. All rights reserved.
* Copyright (c) 2004 Voltaire Corporation. All rights reserved.
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
* Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#if !defined(IB_VERBS_H)
#define IB_VERBS_H
#include <linux/types.h>
#include <linux/device.h>
#include <linux/mm.h>
#include <linux/dma-mapping.h>
#include <linux/kref.h>
#include <linux/list.h>
#include <linux/rwsem.h>
#include <linux/scatterlist.h>
#include <linux/workqueue.h>
#include <linux/socket.h>
#include <linux/irq_poll.h>
#include <uapi/linux/if_ether.h>
#include <net/ipv6.h>
#include <net/ip.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/netdevice.h>
#include <linux/if_link.h>
#include <linux/atomic.h>
#include <linux/mmu_notifier.h>
#include <linux/uaccess.h>
#include <linux/cgroup_rdma.h>
#include <uapi/rdma/ib_user_verbs.h>
extern struct workqueue_struct *ib_wq;
extern struct workqueue_struct *ib_comp_wq;
union ib_gid {
u8 raw[16];
struct {
__be64 subnet_prefix;
__be64 interface_id;
} global;
};
extern union ib_gid zgid;
enum ib_gid_type {
/* If link layer is Ethernet, this is RoCE V1 */
IB_GID_TYPE_IB = 0,
IB_GID_TYPE_ROCE = 0,
IB_GID_TYPE_ROCE_UDP_ENCAP = 1,
IB_GID_TYPE_SIZE
};
#define ROCE_V2_UDP_DPORT 4791
struct ib_gid_attr {
enum ib_gid_type gid_type;
struct net_device *ndev;
};
enum rdma_node_type {
/* IB values map to NodeInfo:NodeType. */
RDMA_NODE_IB_CA = 1,
RDMA_NODE_IB_SWITCH,
RDMA_NODE_IB_ROUTER,
RDMA_NODE_RNIC,
RDMA_NODE_USNIC,
RDMA_NODE_USNIC_UDP,
};
enum {
/* set the local administered indication */
IB_SA_WELL_KNOWN_GUID = BIT_ULL(57) | 2,
};
enum rdma_transport_type {
RDMA_TRANSPORT_IB,
RDMA_TRANSPORT_IWARP,
RDMA_TRANSPORT_USNIC,
RDMA_TRANSPORT_USNIC_UDP
};
enum rdma_protocol_type {
RDMA_PROTOCOL_IB,
RDMA_PROTOCOL_IBOE,
RDMA_PROTOCOL_IWARP,
RDMA_PROTOCOL_USNIC_UDP
};
__attribute_const__ enum rdma_transport_type
rdma_node_get_transport(enum rdma_node_type node_type);
enum rdma_network_type {
RDMA_NETWORK_IB,
RDMA_NETWORK_ROCE_V1 = RDMA_NETWORK_IB,
RDMA_NETWORK_IPV4,
RDMA_NETWORK_IPV6
};
static inline enum ib_gid_type ib_network_to_gid_type(enum rdma_network_type network_type)
{
if (network_type == RDMA_NETWORK_IPV4 ||
network_type == RDMA_NETWORK_IPV6)
return IB_GID_TYPE_ROCE_UDP_ENCAP;
/* IB_GID_TYPE_IB same as RDMA_NETWORK_ROCE_V1 */
return IB_GID_TYPE_IB;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Somnath Kotur | 29 | 100.00% | 1 | 100.00% |
Total | 29 | 100.00% | 1 | 100.00% |
static inline enum rdma_network_type ib_gid_to_network_type(enum ib_gid_type gid_type,
union ib_gid *gid)
{
if (gid_type == IB_GID_TYPE_IB)
return RDMA_NETWORK_IB;
if (ipv6_addr_v4mapped((struct in6_addr *)gid))
return RDMA_NETWORK_IPV4;
else
return RDMA_NETWORK_IPV6;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Somnath Kotur | 45 | 100.00% | 1 | 100.00% |
Total | 45 | 100.00% | 1 | 100.00% |
enum rdma_link_layer {
IB_LINK_LAYER_UNSPECIFIED,
IB_LINK_LAYER_INFINIBAND,
IB_LINK_LAYER_ETHERNET,
};
enum ib_device_cap_flags {
IB_DEVICE_RESIZE_MAX_WR = (1 << 0),
IB_DEVICE_BAD_PKEY_CNTR = (1 << 1),
IB_DEVICE_BAD_QKEY_CNTR = (1 << 2),
IB_DEVICE_RAW_MULTI = (1 << 3),
IB_DEVICE_AUTO_PATH_MIG = (1 << 4),
IB_DEVICE_CHANGE_PHY_PORT = (1 << 5),
IB_DEVICE_UD_AV_PORT_ENFORCE = (1 << 6),
IB_DEVICE_CURR_QP_STATE_MOD = (1 << 7),
IB_DEVICE_SHUTDOWN_PORT = (1 << 8),
IB_DEVICE_INIT_TYPE = (1 << 9),
IB_DEVICE_PORT_ACTIVE_EVENT = (1 << 10),
IB_DEVICE_SYS_IMAGE_GUID = (1 << 11),
IB_DEVICE_RC_RNR_NAK_GEN = (1 << 12),
IB_DEVICE_SRQ_RESIZE = (1 << 13),
IB_DEVICE_N_NOTIFY_CQ = (1 << 14),
/*
* This device supports a per-device lkey or stag that can be
* used without performing a memory registration for the local
* memory. Note that ULPs should never check this flag, but
* instead of use the local_dma_lkey flag in the ib_pd structure,
* which will always contain a usable lkey.
*/
IB_DEVICE_LOCAL_DMA_LKEY = (1 << 15),
IB_DEVICE_RESERVED /* old SEND_W_INV */ = (1 << 16),
IB_DEVICE_MEM_WINDOW = (1 << 17),
/*
* Devices should set IB_DEVICE_UD_IP_SUM if they support
* insertion of UDP and TCP checksum on outgoing UD IPoIB
* messages and can verify the validity of checksum for
* incoming messages. Setting this flag implies that the
* IPoIB driver may set NETIF_F_IP_CSUM for datagram mode.
*/
IB_DEVICE_UD_IP_CSUM = (1 << 18),
IB_DEVICE_UD_TSO = (1 << 19),
IB_DEVICE_XRC = (1 << 20),
/*
* This device supports the IB "base memory management extension",
* which includes support for fast registrations (IB_WR_REG_MR,
* IB_WR_LOCAL_INV and IB_WR_SEND_WITH_INV verbs). This flag should
* also be set by any iWarp device which must support FRs to comply
* to the iWarp verbs spec. iWarp devices also support the
* IB_WR_RDMA_READ_WITH_INV verb for RDMA READs that invalidate the
* stag.
*/
IB_DEVICE_MEM_MGT_EXTENSIONS = (1 << 21),
IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1 << 22),
IB_DEVICE_MEM_WINDOW_TYPE_2A = (1 << 23),
IB_DEVICE_MEM_WINDOW_TYPE_2B = (1 << 24),
IB_DEVICE_RC_IP_CSUM = (1 << 25),
/* Deprecated. Please use IB_RAW_PACKET_CAP_IP_CSUM. */
IB_DEVICE_RAW_IP_CSUM = (1 << 26),
/*
* Devices should set IB_DEVICE_CROSS_CHANNEL if they
* support execution of WQEs that involve synchronization
* of I/O operations with single completion queue managed
* by hardware.
*/
IB_DEVICE_CROSS_CHANNEL = (1 << 27),
IB_DEVICE_MANAGED_FLOW_STEERING = (1 << 29),
IB_DEVICE_SIGNATURE_HANDOVER = (1 << 30),
IB_DEVICE_ON_DEMAND_PAGING = (1ULL << 31),
IB_DEVICE_SG_GAPS_REG = (1ULL << 32),
IB_DEVICE_VIRTUAL_FUNCTION = (1ULL << 33),
/* Deprecated. Please use IB_RAW_PACKET_CAP_SCATTER_FCS. */
IB_DEVICE_RAW_SCATTER_FCS = (1ULL << 34),
IB_DEVICE_RDMA_NETDEV_OPA_VNIC = (1ULL << 35),
};
enum ib_signature_prot_cap {
IB_PROT_T10DIF_TYPE_1 = 1,
IB_PROT_T10DIF_TYPE_2 = 1 << 1,
IB_PROT_T10DIF_TYPE_3 = 1 << 2,
};
enum ib_signature_guard_cap {
IB_GUARD_T10DIF_CRC = 1,
IB_GUARD_T10DIF_CSUM = 1 << 1,
};
enum ib_atomic_cap {
IB_ATOMIC_NONE,
IB_ATOMIC_HCA,
IB_ATOMIC_GLOB
};
enum ib_odp_general_cap_bits {
IB_ODP_SUPPORT = 1 << 0,
IB_ODP_SUPPORT_IMPLICIT = 1 << 1,
};
enum ib_odp_transport_cap_bits {
IB_ODP_SUPPORT_SEND = 1 << 0,
IB_ODP_SUPPORT_RECV = 1 << 1,
IB_ODP_SUPPORT_WRITE = 1 << 2,
IB_ODP_SUPPORT_READ = 1 << 3,
IB_ODP_SUPPORT_ATOMIC = 1 << 4,
};
struct ib_odp_caps {
uint64_t general_caps;
struct {
uint32_t rc_odp_caps;
uint32_t uc_odp_caps;
uint32_t ud_odp_caps;
} per_transport_caps;
};
struct ib_rss_caps {
/* Corresponding bit will be set if qp type from
* 'enum ib_qp_type' is supported, e.g.
* supported_qpts |= 1 << IB_QPT_UD
*/
u32 supported_qpts;
u32 max_rwq_indirection_tables;
u32 max_rwq_indirection_table_size;
};
enum ib_cq_creation_flags {
IB_CQ_FLAGS_TIMESTAMP_COMPLETION = 1 << 0,
IB_CQ_FLAGS_IGNORE_OVERRUN = 1 << 1,
};
struct ib_cq_init_attr {
unsigned int cqe;
int comp_vector;
u32 flags;
};
struct ib_device_attr {
u64 fw_ver;
__be64 sys_image_guid;
u64 max_mr_size;
u64 page_size_cap;
u32 vendor_id;
u32 vendor_part_id;
u32 hw_ver;
int max_qp;
int max_qp_wr;
u64 device_cap_flags;
int max_sge;
int max_sge_rd;
int max_cq;
int max_cqe;
int max_mr;
int max_pd;
int max_qp_rd_atom;
int max_ee_rd_atom;
int max_res_rd_atom;
int max_qp_init_rd_atom;
int max_ee_init_rd_atom;
enum ib_atomic_cap atomic_cap;
enum ib_atomic_cap masked_atomic_cap;
int max_ee;
int max_rdd;
int max_mw;
int max_raw_ipv6_qp;
int max_raw_ethy_qp;
int max_mcast_grp;
int max_mcast_qp_attach;
int max_total_mcast_qp_attach;
int max_ah;
int max_fmr;
int max_map_per_fmr;
int max_srq;
int max_srq_wr;
int max_srq_sge;
unsigned int max_fast_reg_page_list_len;
u16 max_pkeys;
u8 local_ca_ack_delay;
int sig_prot_cap;
int sig_guard_cap;
struct ib_odp_caps odp_caps;
uint64_t timestamp_mask;
uint64_t hca_core_clock; /* in KHZ */
struct ib_rss_caps rss_caps;
u32 max_wq_type_rq;
u32 raw_packet_caps; /* Use ib_raw_packet_caps enum */
};
enum ib_mtu {
IB_MTU_256 = 1,
IB_MTU_512 = 2,
IB_MTU_1024 = 3,
IB_MTU_2048 = 4,
IB_MTU_4096 = 5
};
static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
{
switch (mtu) {
case IB_MTU_256: return 256;
case IB_MTU_512: return 512;
case IB_MTU_1024: return 1024;
case IB_MTU_2048: return 2048;
case IB_MTU_4096: return 4096;
default: return -1;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Roland Dreier | 52 | 100.00% | 1 | 100.00% |
Total | 52 | 100.00% | 1 | 100.00% |
static inline enum ib_mtu ib_mtu_int_to_enum(int mtu)
{
if (mtu >= 4096)
return IB_MTU_4096;
else if (mtu >= 2048)
return IB_MTU_2048;
else if (mtu >= 1024)
return IB_MTU_1024;
else if (mtu >= 512)
return IB_MTU_512;
else
return IB_MTU_256;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ram Amrani | 54 | 100.00% | 1 | 100.00% |
Total | 54 | 100.00% | 1 | 100.00% |
enum ib_port_state {
IB_PORT_NOP = 0,
IB_PORT_DOWN = 1,
IB_PORT_INIT = 2,
IB_PORT_ARMED = 3,
IB_PORT_ACTIVE = 4,
IB_PORT_ACTIVE_DEFER = 5
};
enum ib_port_cap_flags {
IB_PORT_SM = 1 << 1,
IB_PORT_NOTICE_SUP = 1 << 2,
IB_PORT_TRAP_SUP = 1 << 3,
IB_PORT_OPT_IPD_SUP = 1 << 4,
IB_PORT_AUTO_MIGR_SUP = 1 << 5,
IB_PORT_SL_MAP_SUP = 1 << 6,
IB_PORT_MKEY_NVRAM = 1 << 7,
IB_PORT_PKEY_NVRAM = 1 << 8,
IB_PORT_LED_INFO_SUP = 1 << 9,
IB_PORT_SM_DISABLED = 1 << 10,
IB_PORT_SYS_IMAGE_GUID_SUP = 1 << 11,
IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP = 1 << 12,
IB_PORT_EXTENDED_SPEEDS_SUP = 1 << 14,
IB_PORT_CM_SUP = 1 << 16,
IB_PORT_SNMP_TUNNEL_SUP = 1 << 17,
IB_PORT_REINIT_SUP = 1 << 18,
IB_PORT_DEVICE_MGMT_SUP = 1 << 19,
IB_PORT_VENDOR_CLASS_SUP = 1 << 20,
IB_PORT_DR_NOTICE_SUP = 1 << 21,
IB_PORT_CAP_MASK_NOTICE_SUP = 1 << 22,
IB_PORT_BOOT_MGMT_SUP = 1 << 23,
IB_PORT_LINK_LATENCY_SUP = 1 << 24,
IB_PORT_CLIENT_REG_SUP = 1 << 25,
IB_PORT_IP_BASED_GIDS = 1 << 26,
};
enum ib_port_width {
IB_WIDTH_1X = 1,
IB_WIDTH_4X = 2,
IB_WIDTH_8X = 4,
IB_WIDTH_12X = 8
};
static inline int ib_width_enum_to_int(enum ib_port_width width)
{
switch (width) {
case IB_WIDTH_1X: return 1;
case IB_WIDTH_4X: return 4;
case IB_WIDTH_8X: return 8;
case IB_WIDTH_12X: return 12;
default: return -1;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Roland Dreier | 46 | 100.00% | 1 | 100.00% |
Total | 46 | 100.00% | 1 | 100.00% |
enum ib_port_speed {
IB_SPEED_SDR = 1,
IB_SPEED_DDR = 2,
IB_SPEED_QDR = 4,
IB_SPEED_FDR10 = 8,
IB_SPEED_FDR = 16,
IB_SPEED_EDR = 32,
IB_SPEED_HDR = 64
};
/**
* struct rdma_hw_stats
* @timestamp - Used by the core code to track when the last update was
* @lifespan - Used by the core code to determine how old the counters
* should be before being updated again. Stored in jiffies, defaults
* to 10 milliseconds, drivers can override the default be specifying
* their own value during their allocation routine.
* @name - Array of pointers to static names used for the counters in
* directory.
* @num_counters - How many hardware counters there are. If name is
* shorter than this number, a kernel oops will result. Driver authors
* are encouraged to leave BUILD_BUG_ON(ARRAY_SIZE(@name) < num_counters)
* in their code to prevent this.
* @value - Array of u64 counters that are accessed by the sysfs code and
* filled in by the drivers get_stats routine
*/
struct rdma_hw_stats {
unsigned long timestamp;
unsigned long lifespan;
const char * const *names;
int num_counters;
u64 value[];
};
#define RDMA_HW_STATS_DEFAULT_LIFESPAN 10
/**
* rdma_alloc_hw_stats_struct - Helper function to allocate dynamic struct
* for drivers.
* @names - Array of static const char *
* @num_counters - How many elements in array
* @lifespan - How many milliseconds between updates
*/
static inline struct rdma_hw_stats *rdma_alloc_hw_stats_struct(
const char * const *names, int num_counters,
unsigned long lifespan)
{
struct rdma_hw_stats *stats;
stats = kzalloc(sizeof(*stats) + num_counters * sizeof(u64),
GFP_KERNEL);
if (!stats)
return NULL;
stats->names = names;
stats->num_counters = num_counters;
stats->lifespan = msecs_to_jiffies(lifespan);
return stats;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christoph Lameter | 76 | 95.00% | 1 | 50.00% |
Steve Wise | 4 | 5.00% | 1 | 50.00% |
Total | 80 | 100.00% | 2 | 100.00% |
/* Define bits for the various functionality this port needs to be supported by
* the core.
*/
/* Management 0x00000FFF */
#define RDMA_CORE_CAP_IB_MAD 0x00000001
#define RDMA_CORE_CAP_IB_SMI 0x00000002
#define RDMA_CORE_CAP_IB_CM 0x00000004
#define RDMA_CORE_CAP_IW_CM 0x00000008
#define RDMA_CORE_CAP_IB_SA 0x00000010
#define RDMA_CORE_CAP_OPA_MAD 0x00000020
/* Address format 0x000FF000 */
#define RDMA_CORE_CAP_AF_IB 0x00001000
#define RDMA_CORE_CAP_ETH_AH 0x00002000
#define RDMA_CORE_CAP_OPA_AH 0x00004000
/* Protocol 0xFFF00000 */
#define RDMA_CORE_CAP_PROT_IB 0x00100000
#define RDMA_CORE_CAP_PROT_ROCE 0x00200000
#define RDMA_CORE_CAP_PROT_IWARP 0x00400000
#define RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP 0x00800000
#define RDMA_CORE_CAP_PROT_RAW_PACKET 0x01000000
#define RDMA_CORE_CAP_PROT_USNIC 0x02000000
#define RDMA_CORE_PORT_IBA_IB (RDMA_CORE_CAP_PROT_IB \
| RDMA_CORE_CAP_IB_MAD \
| RDMA_CORE_CAP_IB_SMI \
| RDMA_CORE_CAP_IB_CM \
| RDMA_CORE_CAP_IB_SA \
| RDMA_CORE_CAP_AF_IB)
#define RDMA_CORE_PORT_IBA_ROCE (RDMA_CORE_CAP_PROT_ROCE \
| RDMA_CORE_CAP_IB_MAD \
| RDMA_CORE_CAP_IB_CM \
| RDMA_CORE_CAP_AF_IB \
| RDMA_CORE_CAP_ETH_AH)
#define RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP \
(RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP \
| RDMA_CORE_CAP_IB_MAD \
| RDMA_CORE_CAP_IB_CM \
| RDMA_CORE_CAP_AF_IB \
| RDMA_CORE_CAP_ETH_AH)
#define RDMA_CORE_PORT_IWARP (RDMA_CORE_CAP_PROT_IWARP \
| RDMA_CORE_CAP_IW_CM)
#define RDMA_CORE_PORT_INTEL_OPA (RDMA_CORE_PORT_IBA_IB \
| RDMA_CORE_CAP_OPA_MAD)
#define RDMA_CORE_PORT_RAW_PACKET (RDMA_CORE_CAP_PROT_RAW_PACKET)
#define RDMA_CORE_PORT_USNIC (RDMA_CORE_CAP_PROT_USNIC)
struct ib_port_attr {
u64 subnet_prefix;
enum ib_port_state state;
enum ib_mtu max_mtu;
enum ib_mtu active_mtu;
int gid_tbl_len;
u32 port_cap_flags;
u32 max_msg_sz;
u32 bad_pkey_cntr;
u32 qkey_viol_cntr;
u16 pkey_tbl_len;
u16 lid;
u16 sm_lid;
u8 lmc;
u8 max_vl_num;
u8 sm_sl;
u8 subnet_timeout;
u8 init_type_reply;
u8 active_width;
u8 active_speed;
u8 phys_state;
bool grh_required;
};
enum ib_device_modify_flags {
IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0,
IB_DEVICE_MODIFY_NODE_DESC = 1 << 1
};
#define IB_DEVICE_NODE_DESC_MAX 64
struct ib_device_modify {
u64 sys_image_guid;
char node_desc[IB_DEVICE_NODE_DESC_MAX];
};
enum ib_port_modify_flags {
IB_PORT_SHUTDOWN = 1,
IB_PORT_INIT_TYPE = (1<<2),
IB_PORT_RESET_QKEY_CNTR = (1<<3)
};
struct ib_port_modify {
u32 set_port_cap_mask;
u32 clr_port_cap_mask;
u8 init_type;
};
enum ib_event_type {
IB_EVENT_CQ_ERR,
IB_EVENT_QP_FATAL,
IB_EVENT_QP_REQ_ERR,
IB_EVENT_QP_ACCESS_ERR,
IB_EVENT_COMM_EST,
IB_EVENT_SQ_DRAINED,
IB_EVENT_PATH_MIG,
IB_EVENT_PATH_MIG_ERR,
IB_EVENT_DEVICE_FATAL,
IB_EVENT_PORT_ACTIVE,
IB_EVENT_PORT_ERR,
IB_EVENT_LID_CHANGE,
IB_EVENT_PKEY_CHANGE,
IB_EVENT_SM_CHANGE,
IB_EVENT_SRQ_ERR,
IB_EVENT_SRQ_LIMIT_REACHED,
IB_EVENT_QP_LAST_WQE_REACHED,
IB_EVENT_CLIENT_REREGISTER,
IB_EVENT_GID_CHANGE,
IB_EVENT_WQ_FATAL,
};
const char *__attribute_const__ ib_event_msg(enum ib_event_type event);
struct ib_event {
struct ib_device *device;
union {
struct ib_cq *cq;
struct ib_qp *qp;
struct ib_srq *srq;
struct ib_wq *wq;
u8 port_num;
} element;
enum ib_event_type event;
};
struct ib_event_handler {
struct ib_device *device;
void (*handler)(struct ib_event_handler *, struct ib_event *);
struct list_head list;
};
#define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \
do { \
(_ptr)->device = _device; \
(_ptr)->handler = _handler; \
INIT_LIST_HEAD(&(_ptr)->list); \
} while (0)
struct ib_global_route {
union ib_gid dgid;
u32 flow_label;
u8 sgid_index;
u8 hop_limit;
u8 traffic_class;
};
struct ib_grh {
__be32 version_tclass_flow;
__be16 paylen;
u8 next_hdr;
u8 hop_limit;
union ib_gid sgid;
union ib_gid dgid;
};
union rdma_network_hdr {
struct ib_grh ibgrh;
struct {
/* The IB spec states that if it's IPv4, the header
* is located in the last 20 bytes of the header.
*/
u8 reserved[20];
struct iphdr roce4grh;
};
};
enum {
IB_MULTICAST_QPN = 0xffffff
};
#define IB_LID_PERMISSIVE cpu_to_be16(0xFFFF)
#define IB_MULTICAST_LID_BASE cpu_to_be16(0xC000)
enum ib_ah_flags {
IB_AH_GRH = 1
};
enum ib_rate {
IB_RATE_PORT_CURRENT = 0,
IB_RATE_2_5_GBPS = 2,
IB_RATE_5_GBPS = 5,
IB_RATE_10_GBPS = 3,
IB_RATE_20_GBPS = 6,
IB_RATE_30_GBPS = 4,
IB_RATE_40_GBPS = 7,
IB_RATE_60_GBPS = 8,
IB_RATE_80_GBPS = 9,
IB_RATE_120_GBPS = 10,
IB_RATE_14_GBPS = 11,
IB_RATE_56_GBPS = 12,
IB_RATE_112_GBPS = 13,
IB_RATE_168_GBPS = 14,
IB_RATE_25_GBPS = 15,
IB_RATE_100_GBPS = 16,
IB_RATE_200_GBPS = 17,
IB_RATE_300_GBPS = 18
};
/**
* ib_rate_to_mult - Convert the IB rate enum to a multiple of the
* base rate of 2.5 Gbit/sec. For example, IB_RATE_5_GBPS will be
* converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec.
* @rate: rate to convert.
*/
__attribute_const__ int ib_rate_to_mult(enum ib_rate rate);
/**
* ib_rate_to_mbps - Convert the IB rate enum to Mbps.
* For example, IB_RATE_2_5_GBPS will be converted to 2500.
* @rate: rate to convert.
*/
__attribute_const__ int ib_rate_to_mbps(enum ib_rate rate);
/**
* enum ib_mr_type - memory region type
* @IB_MR_TYPE_MEM_REG: memory region that is used for
* normal registration
* @IB_MR_TYPE_SIGNATURE: memory region that is used for
* signature operations (data-integrity
* capable regions)
* @IB_MR_TYPE_SG_GAPS: memory region that is capable to
* register any arbitrary sg lists (without
* the normal mr constraints - see
* ib_map_mr_sg)
*/
enum ib_mr_type {
IB_MR_TYPE_MEM_REG,
IB_MR_TYPE_SIGNATURE,
IB_MR_TYPE_SG_GAPS,
};
/**
* Signature types
* IB_SIG_TYPE_NONE: Unprotected.
* IB_SIG_TYPE_T10_DIF: Type T10-DIF
*/
enum ib_signature_type {
IB_SIG_TYPE_NONE,
IB_SIG_TYPE_T10_DIF,
};
/**
* Signature T10-DIF block-guard types
* IB_T10DIF_CRC: Corresponds to T10-PI mandated CRC checksum rules.
* IB_T10DIF_CSUM: Corresponds to IP checksum rules.
*/
enum ib_t10_dif_bg_type {
IB_T10DIF_CRC,
IB_T10DIF_CSUM
};
/**
* struct ib_t10_dif_domain - Parameters specific for T10-DIF
* domain.
* @bg_type: T10-DIF block guard type (CRC|CSUM)
* @pi_interval: protection information interval.
* @bg: seed of guard computation.
* @app_tag: application tag of guard block
* @ref_tag: initial guard block reference tag.
* @ref_remap: Indicate wethear the reftag increments each block
* @app_escape: Indicate to skip block check if apptag=0xffff
* @ref_escape: Indicate to skip block check if reftag=0xffffffff
* @apptag_check_mask: check bitmask of application tag.
*/
struct ib_t10_dif_domain {
enum ib_t10_dif_bg_type bg_type;
u16 pi_interval;
u16 bg;
u16 app_tag;
u32 ref_tag;
bool ref_remap;
bool app_escape;
bool ref_escape;
u16 apptag_check_mask;
};
/**
* struct ib_sig_domain - Parameters for signature domain
* @sig_type: specific signauture type
* @sig: union of all signature domain attributes that may
* be used to set domain layout.
*/
struct ib_sig_domain {
enum ib_signature_type sig_type;
union {
struct ib_t10_dif_domain dif;
} sig;
};
/**
* struct ib_sig_attrs - Parameters for signature handover operation
* @check_mask: bitmask for signature byte check (8 bytes)
* @mem: memory domain layout desciptor.
* @wire: wire domain layout desciptor.
*/
struct ib_sig_attrs {
u8 check_mask;
struct ib_sig_domain mem;
struct ib_sig_domain wire;
};
enum ib_sig_err_type {
IB_SIG_BAD_GUARD,
IB_SIG_BAD_REFTAG,
IB_SIG_BAD_APPTAG,
};
/**
* struct ib_sig_err - signature error descriptor
*/
struct ib_sig_err {
enum ib_sig_err_type err_type;
u32 expected;
u32 actual;
u64 sig_err_offset;
u32 key;
};
enum ib_mr_status_check {
IB_MR_CHECK_SIG_STATUS = 1,
};
/**
* struct ib_mr_status - Memory region status container
*
* @fail_status: Bitmask of MR checks status. For each
* failed check a corresponding status bit is set.
* @sig_err: Additional info for IB_MR_CEHCK_SIG_STATUS
* failure.
*/
struct ib_mr_status {
u32 fail_status;
struct ib_sig_err sig_err;
};
/**
* mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate
* enum.
* @mult: multiple to convert.
*/
__attribute_const__ enum ib_rate mult_to_ib_rate(int mult);
enum rdma_ah_attr_type {
RDMA_AH_ATTR_TYPE_IB,
RDMA_AH_ATTR_TYPE_ROCE,
RDMA_AH_ATTR_TYPE_OPA,
};
struct ib_ah_attr {
u16 dlid;
u8 src_path_bits;
};
struct roce_ah_attr {
u8 dmac[ETH_ALEN];
};
struct opa_ah_attr {
u32 dlid;
u8 src_path_bits;
};
struct rdma_ah_attr {
struct ib_global_route grh;
u8 sl;
u8 static_rate;
u8 port_num;
u8 ah_flags;
enum rdma_ah_attr_type type;
union {
struct ib_ah_attr ib;
struct roce_ah_attr roce;
struct opa_ah_attr opa;
};
};
enum ib_wc_status {
IB_WC_SUCCESS,
IB_WC_LOC_LEN_ERR,
IB_WC_LOC_QP_OP_ERR,
IB_WC_LOC_EEC_OP_ERR,
IB_WC_LOC_PROT_ERR,
IB_WC_WR_FLUSH_ERR,
IB_WC_MW_BIND_ERR,
IB_WC_BAD_RESP_ERR,
IB_WC_LOC_ACCESS_ERR,
IB_WC_REM_INV_REQ_ERR,
IB_WC_REM_ACCESS_ERR,
IB_WC_REM_OP_ERR,
IB_WC_RETRY_EXC_ERR,
IB_WC_RNR_RETRY_EXC_ERR,
IB_WC_LOC_RDD_VIOL_ERR,
IB_WC_REM_INV_RD_REQ_ERR,
IB_WC_REM_ABORT_ERR,
IB_WC_INV_EECN_ERR,
IB_WC_INV_EEC_STATE_ERR,
IB_WC_FATAL_ERR,
IB_WC_RESP_TIMEOUT_ERR,
IB_WC_GENERAL_ERR
};
const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status);
enum ib_wc_opcode {
IB_WC_SEND,
IB_WC_RDMA_WRITE,
IB_WC_RDMA_READ,
IB_WC_COMP_SWAP,
IB_WC_FETCH_ADD,
IB_WC_LSO,
IB_WC_LOCAL_INV,
IB_WC_REG_MR,
IB_WC_MASKED_COMP_SWAP,
IB_WC_MASKED_FETCH_ADD,
/*
* Set value of IB_WC_RECV so consumers can test if a completion is a
* receive by testing (opcode & IB_WC_RECV).
*/
IB_WC_RECV = 1 << 7,
IB_WC_RECV_RDMA_WITH_IMM
};
enum ib_wc_flags {
IB_WC_GRH = 1,
IB_WC_WITH_IMM = (1<<1),
IB_WC_WITH_INVALIDATE = (1<<2),
IB_WC_IP_CSUM_OK = (1<<3),
IB_WC_WITH_SMAC = (1<<4),
IB_WC_WITH_VLAN = (1<<5),
IB_WC_WITH_NETWORK_HDR_TYPE = (1<<6),
};
struct ib_wc {
union {
u64 wr_id;
struct ib_cqe *wr_cqe;
};
enum ib_wc_status status;
enum ib_wc_opcode opcode;
u32 vendor_err;
u32 byte_len;
struct ib_qp *qp;
union {
__be32 imm_data;
u32 invalidate_rkey;