2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
4 * Copyright (c) 2004 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
8 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
10 * This software is available to you under a choice of one of two
11 * licenses. You may choose to be licensed under the terms of the GNU
12 * General Public License (GPL) Version 2, available from the file
13 * COPYING in the main directory of this source tree, or the
14 * OpenIB.org BSD license below:
16 * Redistribution and use in source and binary forms, with or
17 * without modification, are permitted provided that the following
20 * - Redistributions of source code must retain the above
21 * copyright notice, this list of conditions and the following
24 * - Redistributions in binary form must reproduce the above
25 * copyright notice, this list of conditions and the following
26 * disclaimer in the documentation and/or other materials
27 * provided with the distribution.
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
39 #include <linux/errno.h>
40 #include <linux/err.h>
41 #include <linux/export.h>
42 #include <linux/string.h>
43 #include <linux/slab.h>
45 #include <linux/in6.h>
46 #include <net/addrconf.h>
47 #include <linux/security.h>
49 #include <rdma/ib_verbs.h>
50 #include <rdma/ib_cache.h>
51 #include <rdma/ib_addr.h>
54 #include "core_priv.h"
56 static int ib_resolve_eth_dmac(struct ib_device *device,
57 struct rdma_ah_attr *ah_attr);
59 static const char * const ib_events[] = {
60 [IB_EVENT_CQ_ERR] = "CQ error",
61 [IB_EVENT_QP_FATAL] = "QP fatal error",
62 [IB_EVENT_QP_REQ_ERR] = "QP request error",
63 [IB_EVENT_QP_ACCESS_ERR] = "QP access error",
64 [IB_EVENT_COMM_EST] = "communication established",
65 [IB_EVENT_SQ_DRAINED] = "send queue drained",
66 [IB_EVENT_PATH_MIG] = "path migration successful",
67 [IB_EVENT_PATH_MIG_ERR] = "path migration error",
68 [IB_EVENT_DEVICE_FATAL] = "device fatal error",
69 [IB_EVENT_PORT_ACTIVE] = "port active",
70 [IB_EVENT_PORT_ERR] = "port error",
71 [IB_EVENT_LID_CHANGE] = "LID change",
72 [IB_EVENT_PKEY_CHANGE] = "P_key change",
73 [IB_EVENT_SM_CHANGE] = "SM change",
74 [IB_EVENT_SRQ_ERR] = "SRQ error",
75 [IB_EVENT_SRQ_LIMIT_REACHED] = "SRQ limit reached",
76 [IB_EVENT_QP_LAST_WQE_REACHED] = "last WQE reached",
77 [IB_EVENT_CLIENT_REREGISTER] = "client reregister",
78 [IB_EVENT_GID_CHANGE] = "GID changed",
81 const char *__attribute_const__ ib_event_msg(enum ib_event_type event)
85 return (index < ARRAY_SIZE(ib_events) && ib_events[index]) ?
86 ib_events[index] : "unrecognized event";
88 EXPORT_SYMBOL(ib_event_msg);
90 static const char * const wc_statuses[] = {
91 [IB_WC_SUCCESS] = "success",
92 [IB_WC_LOC_LEN_ERR] = "local length error",
93 [IB_WC_LOC_QP_OP_ERR] = "local QP operation error",
94 [IB_WC_LOC_EEC_OP_ERR] = "local EE context operation error",
95 [IB_WC_LOC_PROT_ERR] = "local protection error",
96 [IB_WC_WR_FLUSH_ERR] = "WR flushed",
97 [IB_WC_MW_BIND_ERR] = "memory management operation error",
98 [IB_WC_BAD_RESP_ERR] = "bad response error",
99 [IB_WC_LOC_ACCESS_ERR] = "local access error",
100 [IB_WC_REM_INV_REQ_ERR] = "invalid request error",
101 [IB_WC_REM_ACCESS_ERR] = "remote access error",
102 [IB_WC_REM_OP_ERR] = "remote operation error",
103 [IB_WC_RETRY_EXC_ERR] = "transport retry counter exceeded",
104 [IB_WC_RNR_RETRY_EXC_ERR] = "RNR retry counter exceeded",
105 [IB_WC_LOC_RDD_VIOL_ERR] = "local RDD violation error",
106 [IB_WC_REM_INV_RD_REQ_ERR] = "remote invalid RD request",
107 [IB_WC_REM_ABORT_ERR] = "operation aborted",
108 [IB_WC_INV_EECN_ERR] = "invalid EE context number",
109 [IB_WC_INV_EEC_STATE_ERR] = "invalid EE context state",
110 [IB_WC_FATAL_ERR] = "fatal error",
111 [IB_WC_RESP_TIMEOUT_ERR] = "response timeout error",
112 [IB_WC_GENERAL_ERR] = "general error",
115 const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status)
117 size_t index = status;
119 return (index < ARRAY_SIZE(wc_statuses) && wc_statuses[index]) ?
120 wc_statuses[index] : "unrecognized status";
122 EXPORT_SYMBOL(ib_wc_status_msg);
124 __attribute_const__ int ib_rate_to_mult(enum ib_rate rate)
127 case IB_RATE_2_5_GBPS: return 1;
128 case IB_RATE_5_GBPS: return 2;
129 case IB_RATE_10_GBPS: return 4;
130 case IB_RATE_20_GBPS: return 8;
131 case IB_RATE_30_GBPS: return 12;
132 case IB_RATE_40_GBPS: return 16;
133 case IB_RATE_60_GBPS: return 24;
134 case IB_RATE_80_GBPS: return 32;
135 case IB_RATE_120_GBPS: return 48;
136 case IB_RATE_14_GBPS: return 6;
137 case IB_RATE_56_GBPS: return 22;
138 case IB_RATE_112_GBPS: return 45;
139 case IB_RATE_168_GBPS: return 67;
140 case IB_RATE_25_GBPS: return 10;
141 case IB_RATE_100_GBPS: return 40;
142 case IB_RATE_200_GBPS: return 80;
143 case IB_RATE_300_GBPS: return 120;
144 case IB_RATE_28_GBPS: return 11;
145 case IB_RATE_50_GBPS: return 20;
146 case IB_RATE_400_GBPS: return 160;
147 case IB_RATE_600_GBPS: return 240;
151 EXPORT_SYMBOL(ib_rate_to_mult);
153 __attribute_const__ enum ib_rate mult_to_ib_rate(int mult)
156 case 1: return IB_RATE_2_5_GBPS;
157 case 2: return IB_RATE_5_GBPS;
158 case 4: return IB_RATE_10_GBPS;
159 case 8: return IB_RATE_20_GBPS;
160 case 12: return IB_RATE_30_GBPS;
161 case 16: return IB_RATE_40_GBPS;
162 case 24: return IB_RATE_60_GBPS;
163 case 32: return IB_RATE_80_GBPS;
164 case 48: return IB_RATE_120_GBPS;
165 case 6: return IB_RATE_14_GBPS;
166 case 22: return IB_RATE_56_GBPS;
167 case 45: return IB_RATE_112_GBPS;
168 case 67: return IB_RATE_168_GBPS;
169 case 10: return IB_RATE_25_GBPS;
170 case 40: return IB_RATE_100_GBPS;
171 case 80: return IB_RATE_200_GBPS;
172 case 120: return IB_RATE_300_GBPS;
173 case 11: return IB_RATE_28_GBPS;
174 case 20: return IB_RATE_50_GBPS;
175 case 160: return IB_RATE_400_GBPS;
176 case 240: return IB_RATE_600_GBPS;
177 default: return IB_RATE_PORT_CURRENT;
180 EXPORT_SYMBOL(mult_to_ib_rate);
182 __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate)
185 case IB_RATE_2_5_GBPS: return 2500;
186 case IB_RATE_5_GBPS: return 5000;
187 case IB_RATE_10_GBPS: return 10000;
188 case IB_RATE_20_GBPS: return 20000;
189 case IB_RATE_30_GBPS: return 30000;
190 case IB_RATE_40_GBPS: return 40000;
191 case IB_RATE_60_GBPS: return 60000;
192 case IB_RATE_80_GBPS: return 80000;
193 case IB_RATE_120_GBPS: return 120000;
194 case IB_RATE_14_GBPS: return 14062;
195 case IB_RATE_56_GBPS: return 56250;
196 case IB_RATE_112_GBPS: return 112500;
197 case IB_RATE_168_GBPS: return 168750;
198 case IB_RATE_25_GBPS: return 25781;
199 case IB_RATE_100_GBPS: return 103125;
200 case IB_RATE_200_GBPS: return 206250;
201 case IB_RATE_300_GBPS: return 309375;
202 case IB_RATE_28_GBPS: return 28125;
203 case IB_RATE_50_GBPS: return 53125;
204 case IB_RATE_400_GBPS: return 425000;
205 case IB_RATE_600_GBPS: return 637500;
209 EXPORT_SYMBOL(ib_rate_to_mbps);
211 __attribute_const__ enum rdma_transport_type
212 rdma_node_get_transport(unsigned int node_type)
215 if (node_type == RDMA_NODE_USNIC)
216 return RDMA_TRANSPORT_USNIC;
217 if (node_type == RDMA_NODE_USNIC_UDP)
218 return RDMA_TRANSPORT_USNIC_UDP;
219 if (node_type == RDMA_NODE_RNIC)
220 return RDMA_TRANSPORT_IWARP;
221 if (node_type == RDMA_NODE_UNSPECIFIED)
222 return RDMA_TRANSPORT_UNSPECIFIED;
224 return RDMA_TRANSPORT_IB;
226 EXPORT_SYMBOL(rdma_node_get_transport);
228 enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_num)
230 enum rdma_transport_type lt;
231 if (device->ops.get_link_layer)
232 return device->ops.get_link_layer(device, port_num);
234 lt = rdma_node_get_transport(device->node_type);
235 if (lt == RDMA_TRANSPORT_IB)
236 return IB_LINK_LAYER_INFINIBAND;
238 return IB_LINK_LAYER_ETHERNET;
240 EXPORT_SYMBOL(rdma_port_get_link_layer);
242 /* Protection domains */
245 * ib_alloc_pd - Allocates an unused protection domain.
246 * @device: The device on which to allocate the protection domain.
247 * @flags: protection domain flags
248 * @caller: caller's build-time module name
250 * A protection domain object provides an association between QPs, shared
251 * receive queues, address handles, memory regions, and memory windows.
253 * Every PD has a local_dma_lkey which can be used as the lkey value for local
256 struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
260 int mr_access_flags = 0;
263 pd = rdma_zalloc_drv_obj(device, ib_pd);
265 return ERR_PTR(-ENOMEM);
269 pd->__internal_mr = NULL;
270 atomic_set(&pd->usecnt, 0);
273 pd->res.type = RDMA_RESTRACK_PD;
274 rdma_restrack_set_task(&pd->res, caller);
276 ret = device->ops.alloc_pd(pd, NULL);
281 rdma_restrack_kadd(&pd->res);
283 if (device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)
284 pd->local_dma_lkey = device->local_dma_lkey;
286 mr_access_flags |= IB_ACCESS_LOCAL_WRITE;
288 if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) {
289 pr_warn("%s: enabling unsafe global rkey\n", caller);
290 mr_access_flags |= IB_ACCESS_REMOTE_READ | IB_ACCESS_REMOTE_WRITE;
293 if (mr_access_flags) {
296 mr = pd->device->ops.get_dma_mr(pd, mr_access_flags);
302 mr->device = pd->device;
304 mr->type = IB_MR_TYPE_DMA;
306 mr->need_inval = false;
308 pd->__internal_mr = mr;
310 if (!(device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY))
311 pd->local_dma_lkey = pd->__internal_mr->lkey;
313 if (flags & IB_PD_UNSAFE_GLOBAL_RKEY)
314 pd->unsafe_global_rkey = pd->__internal_mr->rkey;
319 EXPORT_SYMBOL(__ib_alloc_pd);
322 * ib_dealloc_pd_user - Deallocates a protection domain.
323 * @pd: The protection domain to deallocate.
324 * @udata: Valid user data or NULL for kernel object
326 * It is an error to call this function while any resources in the pd still
327 * exist. The caller is responsible to synchronously destroy them and
328 * guarantee no new allocations will happen.
330 void ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata)
334 if (pd->__internal_mr) {
335 ret = pd->device->ops.dereg_mr(pd->__internal_mr, NULL);
337 pd->__internal_mr = NULL;
340 /* uverbs manipulates usecnt with proper locking, while the kabi
341 requires the caller to guarantee we can't race here. */
342 WARN_ON(atomic_read(&pd->usecnt));
344 rdma_restrack_del(&pd->res);
345 pd->device->ops.dealloc_pd(pd, udata);
348 EXPORT_SYMBOL(ib_dealloc_pd_user);
350 /* Address handles */
353 * rdma_copy_ah_attr - Copy rdma ah attribute from source to destination.
354 * @dest: Pointer to destination ah_attr. Contents of the destination
355 * pointer is assumed to be invalid and attribute are overwritten.
356 * @src: Pointer to source ah_attr.
358 void rdma_copy_ah_attr(struct rdma_ah_attr *dest,
359 const struct rdma_ah_attr *src)
362 if (dest->grh.sgid_attr)
363 rdma_hold_gid_attr(dest->grh.sgid_attr);
365 EXPORT_SYMBOL(rdma_copy_ah_attr);
368 * rdma_replace_ah_attr - Replace valid ah_attr with new new one.
369 * @old: Pointer to existing ah_attr which needs to be replaced.
370 * old is assumed to be valid or zero'd
371 * @new: Pointer to the new ah_attr.
373 * rdma_replace_ah_attr() first releases any reference in the old ah_attr if
374 * old the ah_attr is valid; after that it copies the new attribute and holds
375 * the reference to the replaced ah_attr.
377 void rdma_replace_ah_attr(struct rdma_ah_attr *old,
378 const struct rdma_ah_attr *new)
380 rdma_destroy_ah_attr(old);
382 if (old->grh.sgid_attr)
383 rdma_hold_gid_attr(old->grh.sgid_attr);
385 EXPORT_SYMBOL(rdma_replace_ah_attr);
388 * rdma_move_ah_attr - Move ah_attr pointed by source to destination.
389 * @dest: Pointer to destination ah_attr to copy to.
390 * dest is assumed to be valid or zero'd
391 * @src: Pointer to the new ah_attr.
393 * rdma_move_ah_attr() first releases any reference in the destination ah_attr
394 * if it is valid. This also transfers ownership of internal references from
395 * src to dest, making src invalid in the process. No new reference of the src
398 void rdma_move_ah_attr(struct rdma_ah_attr *dest, struct rdma_ah_attr *src)
400 rdma_destroy_ah_attr(dest);
402 src->grh.sgid_attr = NULL;
404 EXPORT_SYMBOL(rdma_move_ah_attr);
407 * Validate that the rdma_ah_attr is valid for the device before passing it
410 static int rdma_check_ah_attr(struct ib_device *device,
411 struct rdma_ah_attr *ah_attr)
413 if (!rdma_is_port_valid(device, ah_attr->port_num))
416 if ((rdma_is_grh_required(device, ah_attr->port_num) ||
417 ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) &&
418 !(ah_attr->ah_flags & IB_AH_GRH))
421 if (ah_attr->grh.sgid_attr) {
423 * Make sure the passed sgid_attr is consistent with the
426 if (ah_attr->grh.sgid_attr->index != ah_attr->grh.sgid_index ||
427 ah_attr->grh.sgid_attr->port_num != ah_attr->port_num)
434 * If the ah requires a GRH then ensure that sgid_attr pointer is filled in.
435 * On success the caller is responsible to call rdma_unfill_sgid_attr().
437 static int rdma_fill_sgid_attr(struct ib_device *device,
438 struct rdma_ah_attr *ah_attr,
439 const struct ib_gid_attr **old_sgid_attr)
441 const struct ib_gid_attr *sgid_attr;
442 struct ib_global_route *grh;
445 *old_sgid_attr = ah_attr->grh.sgid_attr;
447 ret = rdma_check_ah_attr(device, ah_attr);
451 if (!(ah_attr->ah_flags & IB_AH_GRH))
454 grh = rdma_ah_retrieve_grh(ah_attr);
459 rdma_get_gid_attr(device, ah_attr->port_num, grh->sgid_index);
460 if (IS_ERR(sgid_attr))
461 return PTR_ERR(sgid_attr);
463 /* Move ownerhip of the kref into the ah_attr */
464 grh->sgid_attr = sgid_attr;
468 static void rdma_unfill_sgid_attr(struct rdma_ah_attr *ah_attr,
469 const struct ib_gid_attr *old_sgid_attr)
472 * Fill didn't change anything, the caller retains ownership of
475 if (ah_attr->grh.sgid_attr == old_sgid_attr)
479 * Otherwise, we need to undo what rdma_fill_sgid_attr so the caller
480 * doesn't see any change in the rdma_ah_attr. If we get here
481 * old_sgid_attr is NULL.
483 rdma_destroy_ah_attr(ah_attr);
486 static const struct ib_gid_attr *
487 rdma_update_sgid_attr(struct rdma_ah_attr *ah_attr,
488 const struct ib_gid_attr *old_attr)
491 rdma_put_gid_attr(old_attr);
492 if (ah_attr->ah_flags & IB_AH_GRH) {
493 rdma_hold_gid_attr(ah_attr->grh.sgid_attr);
494 return ah_attr->grh.sgid_attr;
499 static struct ib_ah *_rdma_create_ah(struct ib_pd *pd,
500 struct rdma_ah_attr *ah_attr,
502 struct ib_udata *udata)
504 struct ib_device *device = pd->device;
508 might_sleep_if(flags & RDMA_CREATE_AH_SLEEPABLE);
510 if (!device->ops.create_ah)
511 return ERR_PTR(-EOPNOTSUPP);
513 ah = rdma_zalloc_drv_obj_gfp(
515 (flags & RDMA_CREATE_AH_SLEEPABLE) ? GFP_KERNEL : GFP_ATOMIC);
517 return ERR_PTR(-ENOMEM);
521 ah->type = ah_attr->type;
522 ah->sgid_attr = rdma_update_sgid_attr(ah_attr, NULL);
524 ret = device->ops.create_ah(ah, ah_attr, flags, udata);
530 atomic_inc(&pd->usecnt);
535 * rdma_create_ah - Creates an address handle for the
536 * given address vector.
537 * @pd: The protection domain associated with the address handle.
538 * @ah_attr: The attributes of the address vector.
539 * @flags: Create address handle flags (see enum rdma_create_ah_flags).
541 * It returns 0 on success and returns appropriate error code on error.
542 * The address handle is used to reference a local or global destination
543 * in all UD QP post sends.
545 struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
548 const struct ib_gid_attr *old_sgid_attr;
552 ret = rdma_fill_sgid_attr(pd->device, ah_attr, &old_sgid_attr);
556 ah = _rdma_create_ah(pd, ah_attr, flags, NULL);
558 rdma_unfill_sgid_attr(ah_attr, old_sgid_attr);
561 EXPORT_SYMBOL(rdma_create_ah);
564 * rdma_create_user_ah - Creates an address handle for the
565 * given address vector.
566 * It resolves destination mac address for ah attribute of RoCE type.
567 * @pd: The protection domain associated with the address handle.
568 * @ah_attr: The attributes of the address vector.
569 * @udata: pointer to user's input output buffer information need by
572 * It returns 0 on success and returns appropriate error code on error.
573 * The address handle is used to reference a local or global destination
574 * in all UD QP post sends.
576 struct ib_ah *rdma_create_user_ah(struct ib_pd *pd,
577 struct rdma_ah_attr *ah_attr,
578 struct ib_udata *udata)
580 const struct ib_gid_attr *old_sgid_attr;
584 err = rdma_fill_sgid_attr(pd->device, ah_attr, &old_sgid_attr);
588 if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
589 err = ib_resolve_eth_dmac(pd->device, ah_attr);
596 ah = _rdma_create_ah(pd, ah_attr, RDMA_CREATE_AH_SLEEPABLE, udata);
599 rdma_unfill_sgid_attr(ah_attr, old_sgid_attr);
602 EXPORT_SYMBOL(rdma_create_user_ah);
604 int ib_get_rdma_header_version(const union rdma_network_hdr *hdr)
606 const struct iphdr *ip4h = (struct iphdr *)&hdr->roce4grh;
607 struct iphdr ip4h_checked;
608 const struct ipv6hdr *ip6h = (struct ipv6hdr *)&hdr->ibgrh;
610 /* If it's IPv6, the version must be 6, otherwise, the first
611 * 20 bytes (before the IPv4 header) are garbled.
613 if (ip6h->version != 6)
614 return (ip4h->version == 4) ? 4 : 0;
615 /* version may be 6 or 4 because the first 20 bytes could be garbled */
617 /* RoCE v2 requires no options, thus header length
624 * We can't write on scattered buffers so we need to copy to
627 memcpy(&ip4h_checked, ip4h, sizeof(ip4h_checked));
628 ip4h_checked.check = 0;
629 ip4h_checked.check = ip_fast_csum((u8 *)&ip4h_checked, 5);
630 /* if IPv4 header checksum is OK, believe it */
631 if (ip4h->check == ip4h_checked.check)
635 EXPORT_SYMBOL(ib_get_rdma_header_version);
637 static enum rdma_network_type ib_get_net_type_by_grh(struct ib_device *device,
639 const struct ib_grh *grh)
643 if (rdma_protocol_ib(device, port_num))
644 return RDMA_NETWORK_IB;
646 grh_version = ib_get_rdma_header_version((union rdma_network_hdr *)grh);
648 if (grh_version == 4)
649 return RDMA_NETWORK_IPV4;
651 if (grh->next_hdr == IPPROTO_UDP)
652 return RDMA_NETWORK_IPV6;
654 return RDMA_NETWORK_ROCE_V1;
657 struct find_gid_index_context {
659 enum ib_gid_type gid_type;
662 static bool find_gid_index(const union ib_gid *gid,
663 const struct ib_gid_attr *gid_attr,
666 struct find_gid_index_context *ctx = context;
667 u16 vlan_id = 0xffff;
670 if (ctx->gid_type != gid_attr->gid_type)
673 ret = rdma_read_gid_l2_fields(gid_attr, &vlan_id, NULL);
677 return ctx->vlan_id == vlan_id;
680 static const struct ib_gid_attr *
681 get_sgid_attr_from_eth(struct ib_device *device, u8 port_num,
682 u16 vlan_id, const union ib_gid *sgid,
683 enum ib_gid_type gid_type)
685 struct find_gid_index_context context = {.vlan_id = vlan_id,
686 .gid_type = gid_type};
688 return rdma_find_gid_by_filter(device, sgid, port_num, find_gid_index,
692 int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
693 enum rdma_network_type net_type,
694 union ib_gid *sgid, union ib_gid *dgid)
696 struct sockaddr_in src_in;
697 struct sockaddr_in dst_in;
698 __be32 src_saddr, dst_saddr;
703 if (net_type == RDMA_NETWORK_IPV4) {
704 memcpy(&src_in.sin_addr.s_addr,
705 &hdr->roce4grh.saddr, 4);
706 memcpy(&dst_in.sin_addr.s_addr,
707 &hdr->roce4grh.daddr, 4);
708 src_saddr = src_in.sin_addr.s_addr;
709 dst_saddr = dst_in.sin_addr.s_addr;
710 ipv6_addr_set_v4mapped(src_saddr,
711 (struct in6_addr *)sgid);
712 ipv6_addr_set_v4mapped(dst_saddr,
713 (struct in6_addr *)dgid);
715 } else if (net_type == RDMA_NETWORK_IPV6 ||
716 net_type == RDMA_NETWORK_IB) {
717 *dgid = hdr->ibgrh.dgid;
718 *sgid = hdr->ibgrh.sgid;
724 EXPORT_SYMBOL(ib_get_gids_from_rdma_hdr);
726 /* Resolve destination mac address and hop limit for unicast destination
727 * GID entry, considering the source GID entry as well.
728 * ah_attribute must have have valid port_num, sgid_index.
730 static int ib_resolve_unicast_gid_dmac(struct ib_device *device,
731 struct rdma_ah_attr *ah_attr)
733 struct ib_global_route *grh = rdma_ah_retrieve_grh(ah_attr);
734 const struct ib_gid_attr *sgid_attr = grh->sgid_attr;
735 int hop_limit = 0xff;
738 /* If destination is link local and source GID is RoCEv1,
739 * IP stack is not used.
741 if (rdma_link_local_addr((struct in6_addr *)grh->dgid.raw) &&
742 sgid_attr->gid_type == IB_GID_TYPE_ROCE) {
743 rdma_get_ll_mac((struct in6_addr *)grh->dgid.raw,
748 ret = rdma_addr_find_l2_eth_by_grh(&sgid_attr->gid, &grh->dgid,
750 sgid_attr, &hop_limit);
752 grh->hop_limit = hop_limit;
757 * This function initializes address handle attributes from the incoming packet.
758 * Incoming packet has dgid of the receiver node on which this code is
759 * getting executed and, sgid contains the GID of the sender.
761 * When resolving mac address of destination, the arrived dgid is used
762 * as sgid and, sgid is used as dgid because sgid contains destinations
763 * GID whom to respond to.
765 * On success the caller is responsible to call rdma_destroy_ah_attr on the
768 int ib_init_ah_attr_from_wc(struct ib_device *device, u8 port_num,
769 const struct ib_wc *wc, const struct ib_grh *grh,
770 struct rdma_ah_attr *ah_attr)
774 enum rdma_network_type net_type = RDMA_NETWORK_IB;
775 enum ib_gid_type gid_type = IB_GID_TYPE_IB;
776 const struct ib_gid_attr *sgid_attr;
783 memset(ah_attr, 0, sizeof *ah_attr);
784 ah_attr->type = rdma_ah_find_type(device, port_num);
785 if (rdma_cap_eth_ah(device, port_num)) {
786 if (wc->wc_flags & IB_WC_WITH_NETWORK_HDR_TYPE)
787 net_type = wc->network_hdr_type;
789 net_type = ib_get_net_type_by_grh(device, port_num, grh);
790 gid_type = ib_network_to_gid_type(net_type);
792 ret = ib_get_gids_from_rdma_hdr((union rdma_network_hdr *)grh, net_type,
797 rdma_ah_set_sl(ah_attr, wc->sl);
798 rdma_ah_set_port_num(ah_attr, port_num);
800 if (rdma_protocol_roce(device, port_num)) {
801 u16 vlan_id = wc->wc_flags & IB_WC_WITH_VLAN ?
802 wc->vlan_id : 0xffff;
804 if (!(wc->wc_flags & IB_WC_GRH))
807 sgid_attr = get_sgid_attr_from_eth(device, port_num,
810 if (IS_ERR(sgid_attr))
811 return PTR_ERR(sgid_attr);
813 flow_class = be32_to_cpu(grh->version_tclass_flow);
814 rdma_move_grh_sgid_attr(ah_attr,
816 flow_class & 0xFFFFF,
818 (flow_class >> 20) & 0xFF,
821 ret = ib_resolve_unicast_gid_dmac(device, ah_attr);
823 rdma_destroy_ah_attr(ah_attr);
827 rdma_ah_set_dlid(ah_attr, wc->slid);
828 rdma_ah_set_path_bits(ah_attr, wc->dlid_path_bits);
830 if ((wc->wc_flags & IB_WC_GRH) == 0)
833 if (dgid.global.interface_id !=
834 cpu_to_be64(IB_SA_WELL_KNOWN_GUID)) {
835 sgid_attr = rdma_find_gid_by_port(
836 device, &dgid, IB_GID_TYPE_IB, port_num, NULL);
838 sgid_attr = rdma_get_gid_attr(device, port_num, 0);
840 if (IS_ERR(sgid_attr))
841 return PTR_ERR(sgid_attr);
842 flow_class = be32_to_cpu(grh->version_tclass_flow);
843 rdma_move_grh_sgid_attr(ah_attr,
845 flow_class & 0xFFFFF,
847 (flow_class >> 20) & 0xFF,
853 EXPORT_SYMBOL(ib_init_ah_attr_from_wc);
856 * rdma_move_grh_sgid_attr - Sets the sgid attribute of GRH, taking ownership
859 * @attr: Pointer to AH attribute structure
860 * @dgid: Destination GID
861 * @flow_label: Flow label
862 * @hop_limit: Hop limit
863 * @traffic_class: traffic class
864 * @sgid_attr: Pointer to SGID attribute
866 * This takes ownership of the sgid_attr reference. The caller must ensure
867 * rdma_destroy_ah_attr() is called before destroying the rdma_ah_attr after
868 * calling this function.
870 void rdma_move_grh_sgid_attr(struct rdma_ah_attr *attr, union ib_gid *dgid,
871 u32 flow_label, u8 hop_limit, u8 traffic_class,
872 const struct ib_gid_attr *sgid_attr)
874 rdma_ah_set_grh(attr, dgid, flow_label, sgid_attr->index, hop_limit,
876 attr->grh.sgid_attr = sgid_attr;
878 EXPORT_SYMBOL(rdma_move_grh_sgid_attr);
881 * rdma_destroy_ah_attr - Release reference to SGID attribute of
883 * @ah_attr: Pointer to ah attribute
885 * Release reference to the SGID attribute of the ah attribute if it is
886 * non NULL. It is safe to call this multiple times, and safe to call it on
887 * a zero initialized ah_attr.
889 void rdma_destroy_ah_attr(struct rdma_ah_attr *ah_attr)
891 if (ah_attr->grh.sgid_attr) {
892 rdma_put_gid_attr(ah_attr->grh.sgid_attr);
893 ah_attr->grh.sgid_attr = NULL;
896 EXPORT_SYMBOL(rdma_destroy_ah_attr);
898 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
899 const struct ib_grh *grh, u8 port_num)
901 struct rdma_ah_attr ah_attr;
905 ret = ib_init_ah_attr_from_wc(pd->device, port_num, wc, grh, &ah_attr);
909 ah = rdma_create_ah(pd, &ah_attr, RDMA_CREATE_AH_SLEEPABLE);
911 rdma_destroy_ah_attr(&ah_attr);
914 EXPORT_SYMBOL(ib_create_ah_from_wc);
916 int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr)
918 const struct ib_gid_attr *old_sgid_attr;
921 if (ah->type != ah_attr->type)
924 ret = rdma_fill_sgid_attr(ah->device, ah_attr, &old_sgid_attr);
928 ret = ah->device->ops.modify_ah ?
929 ah->device->ops.modify_ah(ah, ah_attr) :
932 ah->sgid_attr = rdma_update_sgid_attr(ah_attr, ah->sgid_attr);
933 rdma_unfill_sgid_attr(ah_attr, old_sgid_attr);
936 EXPORT_SYMBOL(rdma_modify_ah);
938 int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr)
940 ah_attr->grh.sgid_attr = NULL;
942 return ah->device->ops.query_ah ?
943 ah->device->ops.query_ah(ah, ah_attr) :
946 EXPORT_SYMBOL(rdma_query_ah);
948 int rdma_destroy_ah_user(struct ib_ah *ah, u32 flags, struct ib_udata *udata)
950 const struct ib_gid_attr *sgid_attr = ah->sgid_attr;
953 might_sleep_if(flags & RDMA_DESTROY_AH_SLEEPABLE);
957 ah->device->ops.destroy_ah(ah, flags);
958 atomic_dec(&pd->usecnt);
960 rdma_put_gid_attr(sgid_attr);
965 EXPORT_SYMBOL(rdma_destroy_ah_user);
967 /* Shared receive queues */
969 struct ib_srq *ib_create_srq(struct ib_pd *pd,
970 struct ib_srq_init_attr *srq_init_attr)
975 if (!pd->device->ops.create_srq)
976 return ERR_PTR(-EOPNOTSUPP);
978 srq = rdma_zalloc_drv_obj(pd->device, ib_srq);
980 return ERR_PTR(-ENOMEM);
982 srq->device = pd->device;
984 srq->event_handler = srq_init_attr->event_handler;
985 srq->srq_context = srq_init_attr->srq_context;
986 srq->srq_type = srq_init_attr->srq_type;
988 if (ib_srq_has_cq(srq->srq_type)) {
989 srq->ext.cq = srq_init_attr->ext.cq;
990 atomic_inc(&srq->ext.cq->usecnt);
992 if (srq->srq_type == IB_SRQT_XRC) {
993 srq->ext.xrc.xrcd = srq_init_attr->ext.xrc.xrcd;
994 atomic_inc(&srq->ext.xrc.xrcd->usecnt);
996 atomic_inc(&pd->usecnt);
998 ret = pd->device->ops.create_srq(srq, srq_init_attr, NULL);
1000 atomic_dec(&srq->pd->usecnt);
1001 if (srq->srq_type == IB_SRQT_XRC)
1002 atomic_dec(&srq->ext.xrc.xrcd->usecnt);
1003 if (ib_srq_has_cq(srq->srq_type))
1004 atomic_dec(&srq->ext.cq->usecnt);
1006 return ERR_PTR(ret);
1011 EXPORT_SYMBOL(ib_create_srq);
1013 int ib_modify_srq(struct ib_srq *srq,
1014 struct ib_srq_attr *srq_attr,
1015 enum ib_srq_attr_mask srq_attr_mask)
1017 return srq->device->ops.modify_srq ?
1018 srq->device->ops.modify_srq(srq, srq_attr, srq_attr_mask,
1019 NULL) : -EOPNOTSUPP;
1021 EXPORT_SYMBOL(ib_modify_srq);
1023 int ib_query_srq(struct ib_srq *srq,
1024 struct ib_srq_attr *srq_attr)
1026 return srq->device->ops.query_srq ?
1027 srq->device->ops.query_srq(srq, srq_attr) : -EOPNOTSUPP;
1029 EXPORT_SYMBOL(ib_query_srq);
1031 int ib_destroy_srq_user(struct ib_srq *srq, struct ib_udata *udata)
1033 if (atomic_read(&srq->usecnt))
1036 srq->device->ops.destroy_srq(srq, udata);
1038 atomic_dec(&srq->pd->usecnt);
1039 if (srq->srq_type == IB_SRQT_XRC)
1040 atomic_dec(&srq->ext.xrc.xrcd->usecnt);
1041 if (ib_srq_has_cq(srq->srq_type))
1042 atomic_dec(&srq->ext.cq->usecnt);
1047 EXPORT_SYMBOL(ib_destroy_srq_user);
1051 static void __ib_shared_qp_event_handler(struct ib_event *event, void *context)
1053 struct ib_qp *qp = context;
1054 unsigned long flags;
1056 spin_lock_irqsave(&qp->device->event_handler_lock, flags);
1057 list_for_each_entry(event->element.qp, &qp->open_list, open_list)
1058 if (event->element.qp->event_handler)
1059 event->element.qp->event_handler(event, event->element.qp->qp_context);
1060 spin_unlock_irqrestore(&qp->device->event_handler_lock, flags);
1063 static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp)
1065 mutex_lock(&xrcd->tgt_qp_mutex);
1066 list_add(&qp->xrcd_list, &xrcd->tgt_qp_list);
1067 mutex_unlock(&xrcd->tgt_qp_mutex);
1070 static struct ib_qp *__ib_open_qp(struct ib_qp *real_qp,
1071 void (*event_handler)(struct ib_event *, void *),
1075 unsigned long flags;
1078 qp = kzalloc(sizeof *qp, GFP_KERNEL);
1080 return ERR_PTR(-ENOMEM);
1082 qp->real_qp = real_qp;
1083 err = ib_open_shared_qp_security(qp, real_qp->device);
1086 return ERR_PTR(err);
1089 qp->real_qp = real_qp;
1090 atomic_inc(&real_qp->usecnt);
1091 qp->device = real_qp->device;
1092 qp->event_handler = event_handler;
1093 qp->qp_context = qp_context;
1094 qp->qp_num = real_qp->qp_num;
1095 qp->qp_type = real_qp->qp_type;
1097 spin_lock_irqsave(&real_qp->device->event_handler_lock, flags);
1098 list_add(&qp->open_list, &real_qp->open_list);
1099 spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags);
1104 struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
1105 struct ib_qp_open_attr *qp_open_attr)
1107 struct ib_qp *qp, *real_qp;
1109 if (qp_open_attr->qp_type != IB_QPT_XRC_TGT)
1110 return ERR_PTR(-EINVAL);
1112 qp = ERR_PTR(-EINVAL);
1113 mutex_lock(&xrcd->tgt_qp_mutex);
1114 list_for_each_entry(real_qp, &xrcd->tgt_qp_list, xrcd_list) {
1115 if (real_qp->qp_num == qp_open_attr->qp_num) {
1116 qp = __ib_open_qp(real_qp, qp_open_attr->event_handler,
1117 qp_open_attr->qp_context);
1121 mutex_unlock(&xrcd->tgt_qp_mutex);
1124 EXPORT_SYMBOL(ib_open_qp);
1126 static struct ib_qp *create_xrc_qp_user(struct ib_qp *qp,
1127 struct ib_qp_init_attr *qp_init_attr,
1128 struct ib_udata *udata)
1130 struct ib_qp *real_qp = qp;
1132 qp->event_handler = __ib_shared_qp_event_handler;
1133 qp->qp_context = qp;
1135 qp->send_cq = qp->recv_cq = NULL;
1137 qp->xrcd = qp_init_attr->xrcd;
1138 atomic_inc(&qp_init_attr->xrcd->usecnt);
1139 INIT_LIST_HEAD(&qp->open_list);
1141 qp = __ib_open_qp(real_qp, qp_init_attr->event_handler,
1142 qp_init_attr->qp_context);
1146 __ib_insert_xrcd_qp(qp_init_attr->xrcd, real_qp);
1150 struct ib_qp *ib_create_qp_user(struct ib_pd *pd,
1151 struct ib_qp_init_attr *qp_init_attr,
1152 struct ib_udata *udata)
1154 struct ib_device *device = pd ? pd->device : qp_init_attr->xrcd->device;
1158 if (qp_init_attr->rwq_ind_tbl &&
1159 (qp_init_attr->recv_cq ||
1160 qp_init_attr->srq || qp_init_attr->cap.max_recv_wr ||
1161 qp_init_attr->cap.max_recv_sge))
1162 return ERR_PTR(-EINVAL);
1164 if ((qp_init_attr->create_flags & IB_QP_CREATE_INTEGRITY_EN) &&
1165 !(device->attrs.device_cap_flags & IB_DEVICE_INTEGRITY_HANDOVER))
1166 return ERR_PTR(-EINVAL);
1169 * If the callers is using the RDMA API calculate the resources
1170 * needed for the RDMA READ/WRITE operations.
1172 * Note that these callers need to pass in a port number.
1174 if (qp_init_attr->cap.max_rdma_ctxs)
1175 rdma_rw_init_qp(device, qp_init_attr);
1177 qp = _ib_create_qp(device, pd, qp_init_attr, NULL, NULL);
1181 ret = ib_create_qp_security(qp, device);
1185 qp->qp_type = qp_init_attr->qp_type;
1186 qp->rwq_ind_tbl = qp_init_attr->rwq_ind_tbl;
1188 atomic_set(&qp->usecnt, 0);
1190 spin_lock_init(&qp->mr_lock);
1191 INIT_LIST_HEAD(&qp->rdma_mrs);
1192 INIT_LIST_HEAD(&qp->sig_mrs);
1195 if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) {
1196 struct ib_qp *xrc_qp =
1197 create_xrc_qp_user(qp, qp_init_attr, udata);
1199 if (IS_ERR(xrc_qp)) {
1200 ret = PTR_ERR(xrc_qp);
1206 qp->event_handler = qp_init_attr->event_handler;
1207 qp->qp_context = qp_init_attr->qp_context;
1208 if (qp_init_attr->qp_type == IB_QPT_XRC_INI) {
1212 qp->recv_cq = qp_init_attr->recv_cq;
1213 if (qp_init_attr->recv_cq)
1214 atomic_inc(&qp_init_attr->recv_cq->usecnt);
1215 qp->srq = qp_init_attr->srq;
1217 atomic_inc(&qp_init_attr->srq->usecnt);
1220 qp->send_cq = qp_init_attr->send_cq;
1223 atomic_inc(&pd->usecnt);
1224 if (qp_init_attr->send_cq)
1225 atomic_inc(&qp_init_attr->send_cq->usecnt);
1226 if (qp_init_attr->rwq_ind_tbl)
1227 atomic_inc(&qp->rwq_ind_tbl->usecnt);
1229 if (qp_init_attr->cap.max_rdma_ctxs) {
1230 ret = rdma_rw_init_mrs(qp, qp_init_attr);
1236 * Note: all hw drivers guarantee that max_send_sge is lower than
1237 * the device RDMA WRITE SGE limit but not all hw drivers ensure that
1238 * max_send_sge <= max_sge_rd.
1240 qp->max_write_sge = qp_init_attr->cap.max_send_sge;
1241 qp->max_read_sge = min_t(u32, qp_init_attr->cap.max_send_sge,
1242 device->attrs.max_sge_rd);
1243 if (qp_init_attr->create_flags & IB_QP_CREATE_INTEGRITY_EN)
1244 qp->integrity_en = true;
1250 return ERR_PTR(ret);
1253 EXPORT_SYMBOL(ib_create_qp_user);
1255 static const struct {
1257 enum ib_qp_attr_mask req_param[IB_QPT_MAX];
1258 enum ib_qp_attr_mask opt_param[IB_QPT_MAX];
1259 } qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
1261 [IB_QPS_RESET] = { .valid = 1 },
1265 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
1268 [IB_QPT_RAW_PACKET] = IB_QP_PORT,
1269 [IB_QPT_UC] = (IB_QP_PKEY_INDEX |
1271 IB_QP_ACCESS_FLAGS),
1272 [IB_QPT_RC] = (IB_QP_PKEY_INDEX |
1274 IB_QP_ACCESS_FLAGS),
1275 [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX |
1277 IB_QP_ACCESS_FLAGS),
1278 [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX |
1280 IB_QP_ACCESS_FLAGS),
1281 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
1283 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
1289 [IB_QPS_RESET] = { .valid = 1 },
1290 [IB_QPS_ERR] = { .valid = 1 },
1294 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
1297 [IB_QPT_UC] = (IB_QP_PKEY_INDEX |
1299 IB_QP_ACCESS_FLAGS),
1300 [IB_QPT_RC] = (IB_QP_PKEY_INDEX |
1302 IB_QP_ACCESS_FLAGS),
1303 [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX |
1305 IB_QP_ACCESS_FLAGS),
1306 [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX |
1308 IB_QP_ACCESS_FLAGS),
1309 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
1311 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
1318 [IB_QPT_UC] = (IB_QP_AV |
1322 [IB_QPT_RC] = (IB_QP_AV |
1326 IB_QP_MAX_DEST_RD_ATOMIC |
1327 IB_QP_MIN_RNR_TIMER),
1328 [IB_QPT_XRC_INI] = (IB_QP_AV |
1332 [IB_QPT_XRC_TGT] = (IB_QP_AV |
1336 IB_QP_MAX_DEST_RD_ATOMIC |
1337 IB_QP_MIN_RNR_TIMER),
1340 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
1342 [IB_QPT_UC] = (IB_QP_ALT_PATH |
1343 IB_QP_ACCESS_FLAGS |
1345 [IB_QPT_RC] = (IB_QP_ALT_PATH |
1346 IB_QP_ACCESS_FLAGS |
1348 [IB_QPT_XRC_INI] = (IB_QP_ALT_PATH |
1349 IB_QP_ACCESS_FLAGS |
1351 [IB_QPT_XRC_TGT] = (IB_QP_ALT_PATH |
1352 IB_QP_ACCESS_FLAGS |
1354 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
1356 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
1362 [IB_QPS_RESET] = { .valid = 1 },
1363 [IB_QPS_ERR] = { .valid = 1 },
1367 [IB_QPT_UD] = IB_QP_SQ_PSN,
1368 [IB_QPT_UC] = IB_QP_SQ_PSN,
1369 [IB_QPT_RC] = (IB_QP_TIMEOUT |
1373 IB_QP_MAX_QP_RD_ATOMIC),
1374 [IB_QPT_XRC_INI] = (IB_QP_TIMEOUT |
1378 IB_QP_MAX_QP_RD_ATOMIC),
1379 [IB_QPT_XRC_TGT] = (IB_QP_TIMEOUT |
1381 [IB_QPT_SMI] = IB_QP_SQ_PSN,
1382 [IB_QPT_GSI] = IB_QP_SQ_PSN,
1385 [IB_QPT_UD] = (IB_QP_CUR_STATE |
1387 [IB_QPT_UC] = (IB_QP_CUR_STATE |
1389 IB_QP_ACCESS_FLAGS |
1390 IB_QP_PATH_MIG_STATE),
1391 [IB_QPT_RC] = (IB_QP_CUR_STATE |
1393 IB_QP_ACCESS_FLAGS |
1394 IB_QP_MIN_RNR_TIMER |
1395 IB_QP_PATH_MIG_STATE),
1396 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE |
1398 IB_QP_ACCESS_FLAGS |
1399 IB_QP_PATH_MIG_STATE),
1400 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE |
1402 IB_QP_ACCESS_FLAGS |
1403 IB_QP_MIN_RNR_TIMER |
1404 IB_QP_PATH_MIG_STATE),
1405 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
1407 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
1409 [IB_QPT_RAW_PACKET] = IB_QP_RATE_LIMIT,
1414 [IB_QPS_RESET] = { .valid = 1 },
1415 [IB_QPS_ERR] = { .valid = 1 },
1419 [IB_QPT_UD] = (IB_QP_CUR_STATE |
1421 [IB_QPT_UC] = (IB_QP_CUR_STATE |
1422 IB_QP_ACCESS_FLAGS |
1424 IB_QP_PATH_MIG_STATE),
1425 [IB_QPT_RC] = (IB_QP_CUR_STATE |
1426 IB_QP_ACCESS_FLAGS |
1428 IB_QP_PATH_MIG_STATE |
1429 IB_QP_MIN_RNR_TIMER),
1430 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE |
1431 IB_QP_ACCESS_FLAGS |
1433 IB_QP_PATH_MIG_STATE),
1434 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE |
1435 IB_QP_ACCESS_FLAGS |
1437 IB_QP_PATH_MIG_STATE |
1438 IB_QP_MIN_RNR_TIMER),
1439 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
1441 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
1443 [IB_QPT_RAW_PACKET] = IB_QP_RATE_LIMIT,
1449 [IB_QPT_UD] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1450 [IB_QPT_UC] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1451 [IB_QPT_RC] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1452 [IB_QPT_XRC_INI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1453 [IB_QPT_XRC_TGT] = IB_QP_EN_SQD_ASYNC_NOTIFY, /* ??? */
1454 [IB_QPT_SMI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1455 [IB_QPT_GSI] = IB_QP_EN_SQD_ASYNC_NOTIFY
1460 [IB_QPS_RESET] = { .valid = 1 },
1461 [IB_QPS_ERR] = { .valid = 1 },
1465 [IB_QPT_UD] = (IB_QP_CUR_STATE |
1467 [IB_QPT_UC] = (IB_QP_CUR_STATE |
1469 IB_QP_ACCESS_FLAGS |
1470 IB_QP_PATH_MIG_STATE),
1471 [IB_QPT_RC] = (IB_QP_CUR_STATE |
1473 IB_QP_ACCESS_FLAGS |
1474 IB_QP_MIN_RNR_TIMER |
1475 IB_QP_PATH_MIG_STATE),
1476 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE |
1478 IB_QP_ACCESS_FLAGS |
1479 IB_QP_PATH_MIG_STATE),
1480 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE |
1482 IB_QP_ACCESS_FLAGS |
1483 IB_QP_MIN_RNR_TIMER |
1484 IB_QP_PATH_MIG_STATE),
1485 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
1487 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
1494 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
1496 [IB_QPT_UC] = (IB_QP_AV |
1498 IB_QP_ACCESS_FLAGS |
1500 IB_QP_PATH_MIG_STATE),
1501 [IB_QPT_RC] = (IB_QP_PORT |
1506 IB_QP_MAX_QP_RD_ATOMIC |
1507 IB_QP_MAX_DEST_RD_ATOMIC |
1509 IB_QP_ACCESS_FLAGS |
1511 IB_QP_MIN_RNR_TIMER |
1512 IB_QP_PATH_MIG_STATE),
1513 [IB_QPT_XRC_INI] = (IB_QP_PORT |
1518 IB_QP_MAX_QP_RD_ATOMIC |
1520 IB_QP_ACCESS_FLAGS |
1522 IB_QP_PATH_MIG_STATE),
1523 [IB_QPT_XRC_TGT] = (IB_QP_PORT |
1526 IB_QP_MAX_DEST_RD_ATOMIC |
1528 IB_QP_ACCESS_FLAGS |
1530 IB_QP_MIN_RNR_TIMER |
1531 IB_QP_PATH_MIG_STATE),
1532 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
1534 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
1540 [IB_QPS_RESET] = { .valid = 1 },
1541 [IB_QPS_ERR] = { .valid = 1 },
1545 [IB_QPT_UD] = (IB_QP_CUR_STATE |
1547 [IB_QPT_UC] = (IB_QP_CUR_STATE |
1548 IB_QP_ACCESS_FLAGS),
1549 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
1551 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
1557 [IB_QPS_RESET] = { .valid = 1 },
1558 [IB_QPS_ERR] = { .valid = 1 }
1562 bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
1563 enum ib_qp_type type, enum ib_qp_attr_mask mask)
1565 enum ib_qp_attr_mask req_param, opt_param;
1567 if (mask & IB_QP_CUR_STATE &&
1568 cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS &&
1569 cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE)
1572 if (!qp_state_table[cur_state][next_state].valid)
1575 req_param = qp_state_table[cur_state][next_state].req_param[type];
1576 opt_param = qp_state_table[cur_state][next_state].opt_param[type];
1578 if ((mask & req_param) != req_param)
1581 if (mask & ~(req_param | opt_param | IB_QP_STATE))
1586 EXPORT_SYMBOL(ib_modify_qp_is_ok);
1589 * ib_resolve_eth_dmac - Resolve destination mac address
1590 * @device: Device to consider
1591 * @ah_attr: address handle attribute which describes the
1592 * source and destination parameters
1593 * ib_resolve_eth_dmac() resolves destination mac address and L3 hop limit It
1594 * returns 0 on success or appropriate error code. It initializes the
1595 * necessary ah_attr fields when call is successful.
1597 static int ib_resolve_eth_dmac(struct ib_device *device,
1598 struct rdma_ah_attr *ah_attr)
1602 if (rdma_is_multicast_addr((struct in6_addr *)ah_attr->grh.dgid.raw)) {
1603 if (ipv6_addr_v4mapped((struct in6_addr *)ah_attr->grh.dgid.raw)) {
1606 memcpy(&addr, ah_attr->grh.dgid.raw + 12, 4);
1607 ip_eth_mc_map(addr, (char *)ah_attr->roce.dmac);
1609 ipv6_eth_mc_map((struct in6_addr *)ah_attr->grh.dgid.raw,
1610 (char *)ah_attr->roce.dmac);
1613 ret = ib_resolve_unicast_gid_dmac(device, ah_attr);
1618 static bool is_qp_type_connected(const struct ib_qp *qp)
1620 return (qp->qp_type == IB_QPT_UC ||
1621 qp->qp_type == IB_QPT_RC ||
1622 qp->qp_type == IB_QPT_XRC_INI ||
1623 qp->qp_type == IB_QPT_XRC_TGT);
1627 * IB core internal function to perform QP attributes modification.
1629 static int _ib_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
1630 int attr_mask, struct ib_udata *udata)
1632 u8 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
1633 const struct ib_gid_attr *old_sgid_attr_av;
1634 const struct ib_gid_attr *old_sgid_attr_alt_av;
1637 if (attr_mask & IB_QP_AV) {
1638 ret = rdma_fill_sgid_attr(qp->device, &attr->ah_attr,
1643 if (attr_mask & IB_QP_ALT_PATH) {
1645 * FIXME: This does not track the migration state, so if the
1646 * user loads a new alternate path after the HW has migrated
1647 * from primary->alternate we will keep the wrong
1648 * references. This is OK for IB because the reference
1649 * counting does not serve any functional purpose.
1651 ret = rdma_fill_sgid_attr(qp->device, &attr->alt_ah_attr,
1652 &old_sgid_attr_alt_av);
1657 * Today the core code can only handle alternate paths and APM
1658 * for IB. Ban them in roce mode.
1660 if (!(rdma_protocol_ib(qp->device,
1661 attr->alt_ah_attr.port_num) &&
1662 rdma_protocol_ib(qp->device, port))) {
1669 * If the user provided the qp_attr then we have to resolve it. Kernel
1670 * users have to provide already resolved rdma_ah_attr's
1672 if (udata && (attr_mask & IB_QP_AV) &&
1673 attr->ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE &&
1674 is_qp_type_connected(qp)) {
1675 ret = ib_resolve_eth_dmac(qp->device, &attr->ah_attr);
1680 if (rdma_ib_or_roce(qp->device, port)) {
1681 if (attr_mask & IB_QP_RQ_PSN && attr->rq_psn & ~0xffffff) {
1682 dev_warn(&qp->device->dev,
1683 "%s rq_psn overflow, masking to 24 bits\n",
1685 attr->rq_psn &= 0xffffff;
1688 if (attr_mask & IB_QP_SQ_PSN && attr->sq_psn & ~0xffffff) {
1689 dev_warn(&qp->device->dev,
1690 " %s sq_psn overflow, masking to 24 bits\n",
1692 attr->sq_psn &= 0xffffff;
1697 * Bind this qp to a counter automatically based on the rdma counter
1698 * rules. This only set in RST2INIT with port specified
1700 if (!qp->counter && (attr_mask & IB_QP_PORT) &&
1701 ((attr_mask & IB_QP_STATE) && attr->qp_state == IB_QPS_INIT))
1702 rdma_counter_bind_qp_auto(qp, attr->port_num);
1704 ret = ib_security_modify_qp(qp, attr, attr_mask, udata);
1708 if (attr_mask & IB_QP_PORT)
1709 qp->port = attr->port_num;
1710 if (attr_mask & IB_QP_AV)
1712 rdma_update_sgid_attr(&attr->ah_attr, qp->av_sgid_attr);
1713 if (attr_mask & IB_QP_ALT_PATH)
1714 qp->alt_path_sgid_attr = rdma_update_sgid_attr(
1715 &attr->alt_ah_attr, qp->alt_path_sgid_attr);
1718 if (attr_mask & IB_QP_ALT_PATH)
1719 rdma_unfill_sgid_attr(&attr->alt_ah_attr, old_sgid_attr_alt_av);
1721 if (attr_mask & IB_QP_AV)
1722 rdma_unfill_sgid_attr(&attr->ah_attr, old_sgid_attr_av);
1727 * ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
1728 * @ib_qp: The QP to modify.
1729 * @attr: On input, specifies the QP attributes to modify. On output,
1730 * the current values of selected QP attributes are returned.
1731 * @attr_mask: A bit-mask used to specify which attributes of the QP
1732 * are being modified.
1733 * @udata: pointer to user's input output buffer information
1734 * are being modified.
1735 * It returns 0 on success and returns appropriate error code on error.
1737 int ib_modify_qp_with_udata(struct ib_qp *ib_qp, struct ib_qp_attr *attr,
1738 int attr_mask, struct ib_udata *udata)
1740 return _ib_modify_qp(ib_qp->real_qp, attr, attr_mask, udata);
1742 EXPORT_SYMBOL(ib_modify_qp_with_udata);
1744 int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width)
1748 struct net_device *netdev;
1749 struct ethtool_link_ksettings lksettings;
1751 if (rdma_port_get_link_layer(dev, port_num) != IB_LINK_LAYER_ETHERNET)
1754 netdev = ib_device_get_netdev(dev, port_num);
1759 rc = __ethtool_get_link_ksettings(netdev, &lksettings);
1765 netdev_speed = lksettings.base.speed;
1767 netdev_speed = SPEED_1000;
1768 pr_warn("%s speed is unknown, defaulting to %d\n", netdev->name,
1772 if (netdev_speed <= SPEED_1000) {
1773 *width = IB_WIDTH_1X;
1774 *speed = IB_SPEED_SDR;
1775 } else if (netdev_speed <= SPEED_10000) {
1776 *width = IB_WIDTH_1X;
1777 *speed = IB_SPEED_FDR10;
1778 } else if (netdev_speed <= SPEED_20000) {
1779 *width = IB_WIDTH_4X;
1780 *speed = IB_SPEED_DDR;
1781 } else if (netdev_speed <= SPEED_25000) {
1782 *width = IB_WIDTH_1X;
1783 *speed = IB_SPEED_EDR;
1784 } else if (netdev_speed <= SPEED_40000) {
1785 *width = IB_WIDTH_4X;
1786 *speed = IB_SPEED_FDR10;
1788 *width = IB_WIDTH_4X;
1789 *speed = IB_SPEED_EDR;
1794 EXPORT_SYMBOL(ib_get_eth_speed);
1796 int ib_modify_qp(struct ib_qp *qp,
1797 struct ib_qp_attr *qp_attr,
1800 return _ib_modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL);
1802 EXPORT_SYMBOL(ib_modify_qp);
1804 int ib_query_qp(struct ib_qp *qp,
1805 struct ib_qp_attr *qp_attr,
1807 struct ib_qp_init_attr *qp_init_attr)
1809 qp_attr->ah_attr.grh.sgid_attr = NULL;
1810 qp_attr->alt_ah_attr.grh.sgid_attr = NULL;
1812 return qp->device->ops.query_qp ?
1813 qp->device->ops.query_qp(qp->real_qp, qp_attr, qp_attr_mask,
1814 qp_init_attr) : -EOPNOTSUPP;
1816 EXPORT_SYMBOL(ib_query_qp);
1818 int ib_close_qp(struct ib_qp *qp)
1820 struct ib_qp *real_qp;
1821 unsigned long flags;
1823 real_qp = qp->real_qp;
1827 spin_lock_irqsave(&real_qp->device->event_handler_lock, flags);
1828 list_del(&qp->open_list);
1829 spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags);
1831 atomic_dec(&real_qp->usecnt);
1833 ib_close_shared_qp_security(qp->qp_sec);
1838 EXPORT_SYMBOL(ib_close_qp);
1840 static int __ib_destroy_shared_qp(struct ib_qp *qp)
1842 struct ib_xrcd *xrcd;
1843 struct ib_qp *real_qp;
1846 real_qp = qp->real_qp;
1847 xrcd = real_qp->xrcd;
1849 mutex_lock(&xrcd->tgt_qp_mutex);
1851 if (atomic_read(&real_qp->usecnt) == 0)
1852 list_del(&real_qp->xrcd_list);
1855 mutex_unlock(&xrcd->tgt_qp_mutex);
1858 ret = ib_destroy_qp(real_qp);
1860 atomic_dec(&xrcd->usecnt);
1862 __ib_insert_xrcd_qp(xrcd, real_qp);
1868 int ib_destroy_qp_user(struct ib_qp *qp, struct ib_udata *udata)
1870 const struct ib_gid_attr *alt_path_sgid_attr = qp->alt_path_sgid_attr;
1871 const struct ib_gid_attr *av_sgid_attr = qp->av_sgid_attr;
1873 struct ib_cq *scq, *rcq;
1875 struct ib_rwq_ind_table *ind_tbl;
1876 struct ib_qp_security *sec;
1879 WARN_ON_ONCE(qp->mrs_used > 0);
1881 if (atomic_read(&qp->usecnt))
1884 if (qp->real_qp != qp)
1885 return __ib_destroy_shared_qp(qp);
1891 ind_tbl = qp->rwq_ind_tbl;
1894 ib_destroy_qp_security_begin(sec);
1897 rdma_rw_cleanup_mrs(qp);
1899 rdma_counter_unbind_qp(qp, true);
1900 rdma_restrack_del(&qp->res);
1901 ret = qp->device->ops.destroy_qp(qp, udata);
1903 if (alt_path_sgid_attr)
1904 rdma_put_gid_attr(alt_path_sgid_attr);
1906 rdma_put_gid_attr(av_sgid_attr);
1908 atomic_dec(&pd->usecnt);
1910 atomic_dec(&scq->usecnt);
1912 atomic_dec(&rcq->usecnt);
1914 atomic_dec(&srq->usecnt);
1916 atomic_dec(&ind_tbl->usecnt);
1918 ib_destroy_qp_security_end(sec);
1921 ib_destroy_qp_security_abort(sec);
1926 EXPORT_SYMBOL(ib_destroy_qp_user);
1928 /* Completion queues */
1930 struct ib_cq *__ib_create_cq(struct ib_device *device,
1931 ib_comp_handler comp_handler,
1932 void (*event_handler)(struct ib_event *, void *),
1934 const struct ib_cq_init_attr *cq_attr,
1940 cq = rdma_zalloc_drv_obj(device, ib_cq);
1942 return ERR_PTR(-ENOMEM);
1944 cq->device = device;
1946 cq->comp_handler = comp_handler;
1947 cq->event_handler = event_handler;
1948 cq->cq_context = cq_context;
1949 atomic_set(&cq->usecnt, 0);
1950 cq->res.type = RDMA_RESTRACK_CQ;
1951 rdma_restrack_set_task(&cq->res, caller);
1953 ret = device->ops.create_cq(cq, cq_attr, NULL);
1956 return ERR_PTR(ret);
1959 rdma_restrack_kadd(&cq->res);
1962 EXPORT_SYMBOL(__ib_create_cq);
1964 int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period)
1966 return cq->device->ops.modify_cq ?
1967 cq->device->ops.modify_cq(cq, cq_count,
1968 cq_period) : -EOPNOTSUPP;
1970 EXPORT_SYMBOL(rdma_set_cq_moderation);
1972 int ib_destroy_cq_user(struct ib_cq *cq, struct ib_udata *udata)
1974 if (atomic_read(&cq->usecnt))
1977 rdma_restrack_del(&cq->res);
1978 cq->device->ops.destroy_cq(cq, udata);
1982 EXPORT_SYMBOL(ib_destroy_cq_user);
1984 int ib_resize_cq(struct ib_cq *cq, int cqe)
1986 return cq->device->ops.resize_cq ?
1987 cq->device->ops.resize_cq(cq, cqe, NULL) : -EOPNOTSUPP;
1989 EXPORT_SYMBOL(ib_resize_cq);
1991 /* Memory regions */
1993 int ib_dereg_mr_user(struct ib_mr *mr, struct ib_udata *udata)
1995 struct ib_pd *pd = mr->pd;
1996 struct ib_dm *dm = mr->dm;
1997 struct ib_sig_attrs *sig_attrs = mr->sig_attrs;
2000 rdma_restrack_del(&mr->res);
2001 ret = mr->device->ops.dereg_mr(mr, udata);
2003 atomic_dec(&pd->usecnt);
2005 atomic_dec(&dm->usecnt);
2011 EXPORT_SYMBOL(ib_dereg_mr_user);
2014 * ib_alloc_mr_user() - Allocates a memory region
2015 * @pd: protection domain associated with the region
2016 * @mr_type: memory region type
2017 * @max_num_sg: maximum sg entries available for registration.
2018 * @udata: user data or null for kernel objects
2021 * Memory registeration page/sg lists must not exceed max_num_sg.
2022 * For mr_type IB_MR_TYPE_MEM_REG, the total length cannot exceed
2023 * max_num_sg * used_page_size.
2026 struct ib_mr *ib_alloc_mr_user(struct ib_pd *pd, enum ib_mr_type mr_type,
2027 u32 max_num_sg, struct ib_udata *udata)
2031 if (!pd->device->ops.alloc_mr)
2032 return ERR_PTR(-EOPNOTSUPP);
2034 if (WARN_ON_ONCE(mr_type == IB_MR_TYPE_INTEGRITY))
2035 return ERR_PTR(-EINVAL);
2037 mr = pd->device->ops.alloc_mr(pd, mr_type, max_num_sg, udata);
2039 mr->device = pd->device;
2043 atomic_inc(&pd->usecnt);
2044 mr->need_inval = false;
2045 mr->res.type = RDMA_RESTRACK_MR;
2046 rdma_restrack_kadd(&mr->res);
2048 mr->sig_attrs = NULL;
2053 EXPORT_SYMBOL(ib_alloc_mr_user);
2056 * ib_alloc_mr_integrity() - Allocates an integrity memory region
2057 * @pd: protection domain associated with the region
2058 * @max_num_data_sg: maximum data sg entries available for registration
2059 * @max_num_meta_sg: maximum metadata sg entries available for
2063 * Memory registration page/sg lists must not exceed max_num_sg,
2064 * also the integrity page/sg lists must not exceed max_num_meta_sg.
2067 struct ib_mr *ib_alloc_mr_integrity(struct ib_pd *pd,
2068 u32 max_num_data_sg,
2069 u32 max_num_meta_sg)
2072 struct ib_sig_attrs *sig_attrs;
2074 if (!pd->device->ops.alloc_mr_integrity ||
2075 !pd->device->ops.map_mr_sg_pi)
2076 return ERR_PTR(-EOPNOTSUPP);
2078 if (!max_num_meta_sg)
2079 return ERR_PTR(-EINVAL);
2081 sig_attrs = kzalloc(sizeof(struct ib_sig_attrs), GFP_KERNEL);
2083 return ERR_PTR(-ENOMEM);
2085 mr = pd->device->ops.alloc_mr_integrity(pd, max_num_data_sg,
2092 mr->device = pd->device;
2096 atomic_inc(&pd->usecnt);
2097 mr->need_inval = false;
2098 mr->res.type = RDMA_RESTRACK_MR;
2099 rdma_restrack_kadd(&mr->res);
2100 mr->type = IB_MR_TYPE_INTEGRITY;
2101 mr->sig_attrs = sig_attrs;
2105 EXPORT_SYMBOL(ib_alloc_mr_integrity);
2107 /* "Fast" memory regions */
2109 struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
2110 int mr_access_flags,
2111 struct ib_fmr_attr *fmr_attr)
2115 if (!pd->device->ops.alloc_fmr)
2116 return ERR_PTR(-EOPNOTSUPP);
2118 fmr = pd->device->ops.alloc_fmr(pd, mr_access_flags, fmr_attr);
2120 fmr->device = pd->device;
2122 atomic_inc(&pd->usecnt);
2127 EXPORT_SYMBOL(ib_alloc_fmr);
2129 int ib_unmap_fmr(struct list_head *fmr_list)
2133 if (list_empty(fmr_list))
2136 fmr = list_entry(fmr_list->next, struct ib_fmr, list);
2137 return fmr->device->ops.unmap_fmr(fmr_list);
2139 EXPORT_SYMBOL(ib_unmap_fmr);
2141 int ib_dealloc_fmr(struct ib_fmr *fmr)
2147 ret = fmr->device->ops.dealloc_fmr(fmr);
2149 atomic_dec(&pd->usecnt);
2153 EXPORT_SYMBOL(ib_dealloc_fmr);
2155 /* Multicast groups */
2157 static bool is_valid_mcast_lid(struct ib_qp *qp, u16 lid)
2159 struct ib_qp_init_attr init_attr = {};
2160 struct ib_qp_attr attr = {};
2161 int num_eth_ports = 0;
2164 /* If QP state >= init, it is assigned to a port and we can check this
2167 if (!ib_query_qp(qp, &attr, IB_QP_STATE | IB_QP_PORT, &init_attr)) {
2168 if (attr.qp_state >= IB_QPS_INIT) {
2169 if (rdma_port_get_link_layer(qp->device, attr.port_num) !=
2170 IB_LINK_LAYER_INFINIBAND)
2176 /* Can't get a quick answer, iterate over all ports */
2177 for (port = 0; port < qp->device->phys_port_cnt; port++)
2178 if (rdma_port_get_link_layer(qp->device, port) !=
2179 IB_LINK_LAYER_INFINIBAND)
2182 /* If we have at lease one Ethernet port, RoCE annex declares that
2183 * multicast LID should be ignored. We can't tell at this step if the
2184 * QP belongs to an IB or Ethernet port.
2189 /* If all the ports are IB, we can check according to IB spec. */
2191 return !(lid < be16_to_cpu(IB_MULTICAST_LID_BASE) ||
2192 lid == be16_to_cpu(IB_LID_PERMISSIVE));
2195 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
2199 if (!qp->device->ops.attach_mcast)
2202 if (!rdma_is_multicast_addr((struct in6_addr *)gid->raw) ||
2203 qp->qp_type != IB_QPT_UD || !is_valid_mcast_lid(qp, lid))
2206 ret = qp->device->ops.attach_mcast(qp, gid, lid);
2208 atomic_inc(&qp->usecnt);
2211 EXPORT_SYMBOL(ib_attach_mcast);
2213 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
2217 if (!qp->device->ops.detach_mcast)
2220 if (!rdma_is_multicast_addr((struct in6_addr *)gid->raw) ||
2221 qp->qp_type != IB_QPT_UD || !is_valid_mcast_lid(qp, lid))
2224 ret = qp->device->ops.detach_mcast(qp, gid, lid);
2226 atomic_dec(&qp->usecnt);
2229 EXPORT_SYMBOL(ib_detach_mcast);
2231 struct ib_xrcd *__ib_alloc_xrcd(struct ib_device *device, const char *caller)
2233 struct ib_xrcd *xrcd;
2235 if (!device->ops.alloc_xrcd)
2236 return ERR_PTR(-EOPNOTSUPP);
2238 xrcd = device->ops.alloc_xrcd(device, NULL);
2239 if (!IS_ERR(xrcd)) {
2240 xrcd->device = device;
2242 atomic_set(&xrcd->usecnt, 0);
2243 mutex_init(&xrcd->tgt_qp_mutex);
2244 INIT_LIST_HEAD(&xrcd->tgt_qp_list);
2249 EXPORT_SYMBOL(__ib_alloc_xrcd);
2251 int ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata)
2256 if (atomic_read(&xrcd->usecnt))
2259 while (!list_empty(&xrcd->tgt_qp_list)) {
2260 qp = list_entry(xrcd->tgt_qp_list.next, struct ib_qp, xrcd_list);
2261 ret = ib_destroy_qp(qp);
2265 mutex_destroy(&xrcd->tgt_qp_mutex);
2267 return xrcd->device->ops.dealloc_xrcd(xrcd, udata);
2269 EXPORT_SYMBOL(ib_dealloc_xrcd);
2272 * ib_create_wq - Creates a WQ associated with the specified protection
2274 * @pd: The protection domain associated with the WQ.
2275 * @wq_attr: A list of initial attributes required to create the
2276 * WQ. If WQ creation succeeds, then the attributes are updated to
2277 * the actual capabilities of the created WQ.
2279 * wq_attr->max_wr and wq_attr->max_sge determine
2280 * the requested size of the WQ, and set to the actual values allocated
2282 * If ib_create_wq() succeeds, then max_wr and max_sge will always be
2283 * at least as large as the requested values.
2285 struct ib_wq *ib_create_wq(struct ib_pd *pd,
2286 struct ib_wq_init_attr *wq_attr)
2290 if (!pd->device->ops.create_wq)
2291 return ERR_PTR(-EOPNOTSUPP);
2293 wq = pd->device->ops.create_wq(pd, wq_attr, NULL);
2295 wq->event_handler = wq_attr->event_handler;
2296 wq->wq_context = wq_attr->wq_context;
2297 wq->wq_type = wq_attr->wq_type;
2298 wq->cq = wq_attr->cq;
2299 wq->device = pd->device;
2302 atomic_inc(&pd->usecnt);
2303 atomic_inc(&wq_attr->cq->usecnt);
2304 atomic_set(&wq->usecnt, 0);
2308 EXPORT_SYMBOL(ib_create_wq);
2311 * ib_destroy_wq - Destroys the specified user WQ.
2312 * @wq: The WQ to destroy.
2313 * @udata: Valid user data
2315 int ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata)
2317 struct ib_cq *cq = wq->cq;
2318 struct ib_pd *pd = wq->pd;
2320 if (atomic_read(&wq->usecnt))
2323 wq->device->ops.destroy_wq(wq, udata);
2324 atomic_dec(&pd->usecnt);
2325 atomic_dec(&cq->usecnt);
2329 EXPORT_SYMBOL(ib_destroy_wq);
2332 * ib_modify_wq - Modifies the specified WQ.
2333 * @wq: The WQ to modify.
2334 * @wq_attr: On input, specifies the WQ attributes to modify.
2335 * @wq_attr_mask: A bit-mask used to specify which attributes of the WQ
2336 * are being modified.
2337 * On output, the current values of selected WQ attributes are returned.
2339 int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
2344 if (!wq->device->ops.modify_wq)
2347 err = wq->device->ops.modify_wq(wq, wq_attr, wq_attr_mask, NULL);
2350 EXPORT_SYMBOL(ib_modify_wq);
2353 * ib_create_rwq_ind_table - Creates a RQ Indirection Table.
2354 * @device: The device on which to create the rwq indirection table.
2355 * @ib_rwq_ind_table_init_attr: A list of initial attributes required to
2356 * create the Indirection Table.
2358 * Note: The life time of ib_rwq_ind_table_init_attr->ind_tbl is not less
2359 * than the created ib_rwq_ind_table object and the caller is responsible
2360 * for its memory allocation/free.
2362 struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device,
2363 struct ib_rwq_ind_table_init_attr *init_attr)
2365 struct ib_rwq_ind_table *rwq_ind_table;
2369 if (!device->ops.create_rwq_ind_table)
2370 return ERR_PTR(-EOPNOTSUPP);
2372 table_size = (1 << init_attr->log_ind_tbl_size);
2373 rwq_ind_table = device->ops.create_rwq_ind_table(device,
2375 if (IS_ERR(rwq_ind_table))
2376 return rwq_ind_table;
2378 rwq_ind_table->ind_tbl = init_attr->ind_tbl;
2379 rwq_ind_table->log_ind_tbl_size = init_attr->log_ind_tbl_size;
2380 rwq_ind_table->device = device;
2381 rwq_ind_table->uobject = NULL;
2382 atomic_set(&rwq_ind_table->usecnt, 0);
2384 for (i = 0; i < table_size; i++)
2385 atomic_inc(&rwq_ind_table->ind_tbl[i]->usecnt);
2387 return rwq_ind_table;
2389 EXPORT_SYMBOL(ib_create_rwq_ind_table);
2392 * ib_destroy_rwq_ind_table - Destroys the specified Indirection Table.
2393 * @wq_ind_table: The Indirection Table to destroy.
2395 int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *rwq_ind_table)
2398 u32 table_size = (1 << rwq_ind_table->log_ind_tbl_size);
2399 struct ib_wq **ind_tbl = rwq_ind_table->ind_tbl;
2401 if (atomic_read(&rwq_ind_table->usecnt))
2404 err = rwq_ind_table->device->ops.destroy_rwq_ind_table(rwq_ind_table);
2406 for (i = 0; i < table_size; i++)
2407 atomic_dec(&ind_tbl[i]->usecnt);
2412 EXPORT_SYMBOL(ib_destroy_rwq_ind_table);
2414 int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
2415 struct ib_mr_status *mr_status)
2417 if (!mr->device->ops.check_mr_status)
2420 return mr->device->ops.check_mr_status(mr, check_mask, mr_status);
2422 EXPORT_SYMBOL(ib_check_mr_status);
2424 int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port,
2427 if (!device->ops.set_vf_link_state)
2430 return device->ops.set_vf_link_state(device, vf, port, state);
2432 EXPORT_SYMBOL(ib_set_vf_link_state);
2434 int ib_get_vf_config(struct ib_device *device, int vf, u8 port,
2435 struct ifla_vf_info *info)
2437 if (!device->ops.get_vf_config)
2440 return device->ops.get_vf_config(device, vf, port, info);
2442 EXPORT_SYMBOL(ib_get_vf_config);
2444 int ib_get_vf_stats(struct ib_device *device, int vf, u8 port,
2445 struct ifla_vf_stats *stats)
2447 if (!device->ops.get_vf_stats)
2450 return device->ops.get_vf_stats(device, vf, port, stats);
2452 EXPORT_SYMBOL(ib_get_vf_stats);
2454 int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
2457 if (!device->ops.set_vf_guid)
2460 return device->ops.set_vf_guid(device, vf, port, guid, type);
2462 EXPORT_SYMBOL(ib_set_vf_guid);
2464 int ib_get_vf_guid(struct ib_device *device, int vf, u8 port,
2465 struct ifla_vf_guid *node_guid,
2466 struct ifla_vf_guid *port_guid)
2468 if (!device->ops.get_vf_guid)
2471 return device->ops.get_vf_guid(device, vf, port, node_guid, port_guid);
2473 EXPORT_SYMBOL(ib_get_vf_guid);
2475 * ib_map_mr_sg_pi() - Map the dma mapped SG lists for PI (protection
2476 * information) and set an appropriate memory region for registration.
2477 * @mr: memory region
2478 * @data_sg: dma mapped scatterlist for data
2479 * @data_sg_nents: number of entries in data_sg
2480 * @data_sg_offset: offset in bytes into data_sg
2481 * @meta_sg: dma mapped scatterlist for metadata
2482 * @meta_sg_nents: number of entries in meta_sg
2483 * @meta_sg_offset: offset in bytes into meta_sg
2484 * @page_size: page vector desired page size
2487 * - The MR must be allocated with type IB_MR_TYPE_INTEGRITY.
2489 * Return: 0 on success.
2491 * After this completes successfully, the memory region
2492 * is ready for registration.
2494 int ib_map_mr_sg_pi(struct ib_mr *mr, struct scatterlist *data_sg,
2495 int data_sg_nents, unsigned int *data_sg_offset,
2496 struct scatterlist *meta_sg, int meta_sg_nents,
2497 unsigned int *meta_sg_offset, unsigned int page_size)
2499 if (unlikely(!mr->device->ops.map_mr_sg_pi ||
2500 WARN_ON_ONCE(mr->type != IB_MR_TYPE_INTEGRITY)))
2503 mr->page_size = page_size;
2505 return mr->device->ops.map_mr_sg_pi(mr, data_sg, data_sg_nents,
2506 data_sg_offset, meta_sg,
2507 meta_sg_nents, meta_sg_offset);
2509 EXPORT_SYMBOL(ib_map_mr_sg_pi);
2512 * ib_map_mr_sg() - Map the largest prefix of a dma mapped SG list
2513 * and set it the memory region.
2514 * @mr: memory region
2515 * @sg: dma mapped scatterlist
2516 * @sg_nents: number of entries in sg
2517 * @sg_offset: offset in bytes into sg
2518 * @page_size: page vector desired page size
2521 * - The first sg element is allowed to have an offset.
2522 * - Each sg element must either be aligned to page_size or virtually
2523 * contiguous to the previous element. In case an sg element has a
2524 * non-contiguous offset, the mapping prefix will not include it.
2525 * - The last sg element is allowed to have length less than page_size.
2526 * - If sg_nents total byte length exceeds the mr max_num_sge * page_size
2527 * then only max_num_sg entries will be mapped.
2528 * - If the MR was allocated with type IB_MR_TYPE_SG_GAPS, none of these
2529 * constraints holds and the page_size argument is ignored.
2531 * Returns the number of sg elements that were mapped to the memory region.
2533 * After this completes successfully, the memory region
2534 * is ready for registration.
2536 int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
2537 unsigned int *sg_offset, unsigned int page_size)
2539 if (unlikely(!mr->device->ops.map_mr_sg))
2542 mr->page_size = page_size;
2544 return mr->device->ops.map_mr_sg(mr, sg, sg_nents, sg_offset);
2546 EXPORT_SYMBOL(ib_map_mr_sg);
2549 * ib_sg_to_pages() - Convert the largest prefix of a sg list
2551 * @mr: memory region
2552 * @sgl: dma mapped scatterlist
2553 * @sg_nents: number of entries in sg
2554 * @sg_offset_p: IN: start offset in bytes into sg
2555 * OUT: offset in bytes for element n of the sg of the first
2556 * byte that has not been processed where n is the return
2557 * value of this function.
2558 * @set_page: driver page assignment function pointer
2560 * Core service helper for drivers to convert the largest
2561 * prefix of given sg list to a page vector. The sg list
2562 * prefix converted is the prefix that meet the requirements
2565 * Returns the number of sg elements that were assigned to
2568 int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
2569 unsigned int *sg_offset_p, int (*set_page)(struct ib_mr *, u64))
2571 struct scatterlist *sg;
2572 u64 last_end_dma_addr = 0;
2573 unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
2574 unsigned int last_page_off = 0;
2575 u64 page_mask = ~((u64)mr->page_size - 1);
2578 if (unlikely(sg_nents <= 0 || sg_offset > sg_dma_len(&sgl[0])))
2581 mr->iova = sg_dma_address(&sgl[0]) + sg_offset;
2584 for_each_sg(sgl, sg, sg_nents, i) {
2585 u64 dma_addr = sg_dma_address(sg) + sg_offset;
2586 u64 prev_addr = dma_addr;
2587 unsigned int dma_len = sg_dma_len(sg) - sg_offset;
2588 u64 end_dma_addr = dma_addr + dma_len;
2589 u64 page_addr = dma_addr & page_mask;
2592 * For the second and later elements, check whether either the
2593 * end of element i-1 or the start of element i is not aligned
2594 * on a page boundary.
2596 if (i && (last_page_off != 0 || page_addr != dma_addr)) {
2597 /* Stop mapping if there is a gap. */
2598 if (last_end_dma_addr != dma_addr)
2602 * Coalesce this element with the last. If it is small
2603 * enough just update mr->length. Otherwise start
2604 * mapping from the next page.
2610 ret = set_page(mr, page_addr);
2611 if (unlikely(ret < 0)) {
2612 sg_offset = prev_addr - sg_dma_address(sg);
2613 mr->length += prev_addr - dma_addr;
2615 *sg_offset_p = sg_offset;
2616 return i || sg_offset ? i : ret;
2618 prev_addr = page_addr;
2620 page_addr += mr->page_size;
2621 } while (page_addr < end_dma_addr);
2623 mr->length += dma_len;
2624 last_end_dma_addr = end_dma_addr;
2625 last_page_off = end_dma_addr & ~page_mask;
2634 EXPORT_SYMBOL(ib_sg_to_pages);
2636 struct ib_drain_cqe {
2638 struct completion done;
2641 static void ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc)
2643 struct ib_drain_cqe *cqe = container_of(wc->wr_cqe, struct ib_drain_cqe,
2646 complete(&cqe->done);
2650 * Post a WR and block until its completion is reaped for the SQ.
2652 static void __ib_drain_sq(struct ib_qp *qp)
2654 struct ib_cq *cq = qp->send_cq;
2655 struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
2656 struct ib_drain_cqe sdrain;
2657 struct ib_rdma_wr swr = {
2660 { .wr_cqe = &sdrain.cqe, },
2661 .opcode = IB_WR_RDMA_WRITE,
2666 ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
2668 WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
2672 sdrain.cqe.done = ib_drain_qp_done;
2673 init_completion(&sdrain.done);
2675 ret = ib_post_send(qp, &swr.wr, NULL);
2677 WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
2681 if (cq->poll_ctx == IB_POLL_DIRECT)
2682 while (wait_for_completion_timeout(&sdrain.done, HZ / 10) <= 0)
2683 ib_process_cq_direct(cq, -1);
2685 wait_for_completion(&sdrain.done);
2689 * Post a WR and block until its completion is reaped for the RQ.
2691 static void __ib_drain_rq(struct ib_qp *qp)
2693 struct ib_cq *cq = qp->recv_cq;
2694 struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
2695 struct ib_drain_cqe rdrain;
2696 struct ib_recv_wr rwr = {};
2699 ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
2701 WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
2705 rwr.wr_cqe = &rdrain.cqe;
2706 rdrain.cqe.done = ib_drain_qp_done;
2707 init_completion(&rdrain.done);
2709 ret = ib_post_recv(qp, &rwr, NULL);
2711 WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
2715 if (cq->poll_ctx == IB_POLL_DIRECT)
2716 while (wait_for_completion_timeout(&rdrain.done, HZ / 10) <= 0)
2717 ib_process_cq_direct(cq, -1);
2719 wait_for_completion(&rdrain.done);
2723 * ib_drain_sq() - Block until all SQ CQEs have been consumed by the
2725 * @qp: queue pair to drain
2727 * If the device has a provider-specific drain function, then
2728 * call that. Otherwise call the generic drain function
2733 * ensure there is room in the CQ and SQ for the drain work request and
2736 * allocate the CQ using ib_alloc_cq().
2738 * ensure that there are no other contexts that are posting WRs concurrently.
2739 * Otherwise the drain is not guaranteed.
2741 void ib_drain_sq(struct ib_qp *qp)
2743 if (qp->device->ops.drain_sq)
2744 qp->device->ops.drain_sq(qp);
2748 EXPORT_SYMBOL(ib_drain_sq);
2751 * ib_drain_rq() - Block until all RQ CQEs have been consumed by the
2753 * @qp: queue pair to drain
2755 * If the device has a provider-specific drain function, then
2756 * call that. Otherwise call the generic drain function
2761 * ensure there is room in the CQ and RQ for the drain work request and
2764 * allocate the CQ using ib_alloc_cq().
2766 * ensure that there are no other contexts that are posting WRs concurrently.
2767 * Otherwise the drain is not guaranteed.
2769 void ib_drain_rq(struct ib_qp *qp)
2771 if (qp->device->ops.drain_rq)
2772 qp->device->ops.drain_rq(qp);
2776 EXPORT_SYMBOL(ib_drain_rq);
2779 * ib_drain_qp() - Block until all CQEs have been consumed by the
2780 * application on both the RQ and SQ.
2781 * @qp: queue pair to drain
2785 * ensure there is room in the CQ(s), SQ, and RQ for drain work requests
2788 * allocate the CQs using ib_alloc_cq().
2790 * ensure that there are no other contexts that are posting WRs concurrently.
2791 * Otherwise the drain is not guaranteed.
2793 void ib_drain_qp(struct ib_qp *qp)
2799 EXPORT_SYMBOL(ib_drain_qp);
2801 struct net_device *rdma_alloc_netdev(struct ib_device *device, u8 port_num,
2802 enum rdma_netdev_t type, const char *name,
2803 unsigned char name_assign_type,
2804 void (*setup)(struct net_device *))
2806 struct rdma_netdev_alloc_params params;
2807 struct net_device *netdev;
2810 if (!device->ops.rdma_netdev_get_params)
2811 return ERR_PTR(-EOPNOTSUPP);
2813 rc = device->ops.rdma_netdev_get_params(device, port_num, type,
2818 netdev = alloc_netdev_mqs(params.sizeof_priv, name, name_assign_type,
2819 setup, params.txqs, params.rxqs);
2821 return ERR_PTR(-ENOMEM);
2825 EXPORT_SYMBOL(rdma_alloc_netdev);
2827 int rdma_init_netdev(struct ib_device *device, u8 port_num,
2828 enum rdma_netdev_t type, const char *name,
2829 unsigned char name_assign_type,
2830 void (*setup)(struct net_device *),
2831 struct net_device *netdev)
2833 struct rdma_netdev_alloc_params params;
2836 if (!device->ops.rdma_netdev_get_params)
2839 rc = device->ops.rdma_netdev_get_params(device, port_num, type,
2844 return params.initialize_rdma_netdev(device, port_num,
2845 netdev, params.param);
2847 EXPORT_SYMBOL(rdma_init_netdev);
2849 void __rdma_block_iter_start(struct ib_block_iter *biter,
2850 struct scatterlist *sglist, unsigned int nents,
2853 memset(biter, 0, sizeof(struct ib_block_iter));
2854 biter->__sg = sglist;
2855 biter->__sg_nents = nents;
2857 /* Driver provides best block size to use */
2858 biter->__pg_bit = __fls(pgsz);
2860 EXPORT_SYMBOL(__rdma_block_iter_start);
2862 bool __rdma_block_iter_next(struct ib_block_iter *biter)
2864 unsigned int block_offset;
2866 if (!biter->__sg_nents || !biter->__sg)
2869 biter->__dma_addr = sg_dma_address(biter->__sg) + biter->__sg_advance;
2870 block_offset = biter->__dma_addr & (BIT_ULL(biter->__pg_bit) - 1);
2871 biter->__sg_advance += BIT_ULL(biter->__pg_bit) - block_offset;
2873 if (biter->__sg_advance >= sg_dma_len(biter->__sg)) {
2874 biter->__sg_advance = 0;
2875 biter->__sg = sg_next(biter->__sg);
2876 biter->__sg_nents--;
2881 EXPORT_SYMBOL(__rdma_block_iter_next);