Merge tag 'mlx5-fixes-2021-09-07' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / drivers / infiniband / core / verbs.c
1 /*
2  * Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
3  * Copyright (c) 2004 Infinicon Corporation.  All rights reserved.
4  * Copyright (c) 2004 Intel Corporation.  All rights reserved.
5  * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
6  * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.
7  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
8  * Copyright (c) 2005, 2006 Cisco Systems.  All rights reserved.
9  *
10  * This software is available to you under a choice of one of two
11  * licenses.  You may choose to be licensed under the terms of the GNU
12  * General Public License (GPL) Version 2, available from the file
13  * COPYING in the main directory of this source tree, or the
14  * OpenIB.org BSD license below:
15  *
16  *     Redistribution and use in source and binary forms, with or
17  *     without modification, are permitted provided that the following
18  *     conditions are met:
19  *
20  *      - Redistributions of source code must retain the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer.
23  *
24  *      - Redistributions in binary form must reproduce the above
25  *        copyright notice, this list of conditions and the following
26  *        disclaimer in the documentation and/or other materials
27  *        provided with the distribution.
28  *
29  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36  * SOFTWARE.
37  */
38
39 #include <linux/errno.h>
40 #include <linux/err.h>
41 #include <linux/export.h>
42 #include <linux/string.h>
43 #include <linux/slab.h>
44 #include <linux/in.h>
45 #include <linux/in6.h>
46 #include <net/addrconf.h>
47 #include <linux/security.h>
48
49 #include <rdma/ib_verbs.h>
50 #include <rdma/ib_cache.h>
51 #include <rdma/ib_addr.h>
52 #include <rdma/rw.h>
53 #include <rdma/lag.h>
54
55 #include "core_priv.h"
56 #include <trace/events/rdma_core.h>
57
58 static int ib_resolve_eth_dmac(struct ib_device *device,
59                                struct rdma_ah_attr *ah_attr);
60
61 static const char * const ib_events[] = {
62         [IB_EVENT_CQ_ERR]               = "CQ error",
63         [IB_EVENT_QP_FATAL]             = "QP fatal error",
64         [IB_EVENT_QP_REQ_ERR]           = "QP request error",
65         [IB_EVENT_QP_ACCESS_ERR]        = "QP access error",
66         [IB_EVENT_COMM_EST]             = "communication established",
67         [IB_EVENT_SQ_DRAINED]           = "send queue drained",
68         [IB_EVENT_PATH_MIG]             = "path migration successful",
69         [IB_EVENT_PATH_MIG_ERR]         = "path migration error",
70         [IB_EVENT_DEVICE_FATAL]         = "device fatal error",
71         [IB_EVENT_PORT_ACTIVE]          = "port active",
72         [IB_EVENT_PORT_ERR]             = "port error",
73         [IB_EVENT_LID_CHANGE]           = "LID change",
74         [IB_EVENT_PKEY_CHANGE]          = "P_key change",
75         [IB_EVENT_SM_CHANGE]            = "SM change",
76         [IB_EVENT_SRQ_ERR]              = "SRQ error",
77         [IB_EVENT_SRQ_LIMIT_REACHED]    = "SRQ limit reached",
78         [IB_EVENT_QP_LAST_WQE_REACHED]  = "last WQE reached",
79         [IB_EVENT_CLIENT_REREGISTER]    = "client reregister",
80         [IB_EVENT_GID_CHANGE]           = "GID changed",
81 };
82
83 const char *__attribute_const__ ib_event_msg(enum ib_event_type event)
84 {
85         size_t index = event;
86
87         return (index < ARRAY_SIZE(ib_events) && ib_events[index]) ?
88                         ib_events[index] : "unrecognized event";
89 }
90 EXPORT_SYMBOL(ib_event_msg);
91
92 static const char * const wc_statuses[] = {
93         [IB_WC_SUCCESS]                 = "success",
94         [IB_WC_LOC_LEN_ERR]             = "local length error",
95         [IB_WC_LOC_QP_OP_ERR]           = "local QP operation error",
96         [IB_WC_LOC_EEC_OP_ERR]          = "local EE context operation error",
97         [IB_WC_LOC_PROT_ERR]            = "local protection error",
98         [IB_WC_WR_FLUSH_ERR]            = "WR flushed",
99         [IB_WC_MW_BIND_ERR]             = "memory bind operation error",
100         [IB_WC_BAD_RESP_ERR]            = "bad response error",
101         [IB_WC_LOC_ACCESS_ERR]          = "local access error",
102         [IB_WC_REM_INV_REQ_ERR]         = "remote invalid request error",
103         [IB_WC_REM_ACCESS_ERR]          = "remote access error",
104         [IB_WC_REM_OP_ERR]              = "remote operation error",
105         [IB_WC_RETRY_EXC_ERR]           = "transport retry counter exceeded",
106         [IB_WC_RNR_RETRY_EXC_ERR]       = "RNR retry counter exceeded",
107         [IB_WC_LOC_RDD_VIOL_ERR]        = "local RDD violation error",
108         [IB_WC_REM_INV_RD_REQ_ERR]      = "remote invalid RD request",
109         [IB_WC_REM_ABORT_ERR]           = "operation aborted",
110         [IB_WC_INV_EECN_ERR]            = "invalid EE context number",
111         [IB_WC_INV_EEC_STATE_ERR]       = "invalid EE context state",
112         [IB_WC_FATAL_ERR]               = "fatal error",
113         [IB_WC_RESP_TIMEOUT_ERR]        = "response timeout error",
114         [IB_WC_GENERAL_ERR]             = "general error",
115 };
116
117 const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status)
118 {
119         size_t index = status;
120
121         return (index < ARRAY_SIZE(wc_statuses) && wc_statuses[index]) ?
122                         wc_statuses[index] : "unrecognized status";
123 }
124 EXPORT_SYMBOL(ib_wc_status_msg);
125
126 __attribute_const__ int ib_rate_to_mult(enum ib_rate rate)
127 {
128         switch (rate) {
129         case IB_RATE_2_5_GBPS: return   1;
130         case IB_RATE_5_GBPS:   return   2;
131         case IB_RATE_10_GBPS:  return   4;
132         case IB_RATE_20_GBPS:  return   8;
133         case IB_RATE_30_GBPS:  return  12;
134         case IB_RATE_40_GBPS:  return  16;
135         case IB_RATE_60_GBPS:  return  24;
136         case IB_RATE_80_GBPS:  return  32;
137         case IB_RATE_120_GBPS: return  48;
138         case IB_RATE_14_GBPS:  return   6;
139         case IB_RATE_56_GBPS:  return  22;
140         case IB_RATE_112_GBPS: return  45;
141         case IB_RATE_168_GBPS: return  67;
142         case IB_RATE_25_GBPS:  return  10;
143         case IB_RATE_100_GBPS: return  40;
144         case IB_RATE_200_GBPS: return  80;
145         case IB_RATE_300_GBPS: return 120;
146         case IB_RATE_28_GBPS:  return  11;
147         case IB_RATE_50_GBPS:  return  20;
148         case IB_RATE_400_GBPS: return 160;
149         case IB_RATE_600_GBPS: return 240;
150         default:               return  -1;
151         }
152 }
153 EXPORT_SYMBOL(ib_rate_to_mult);
154
155 __attribute_const__ enum ib_rate mult_to_ib_rate(int mult)
156 {
157         switch (mult) {
158         case 1:   return IB_RATE_2_5_GBPS;
159         case 2:   return IB_RATE_5_GBPS;
160         case 4:   return IB_RATE_10_GBPS;
161         case 8:   return IB_RATE_20_GBPS;
162         case 12:  return IB_RATE_30_GBPS;
163         case 16:  return IB_RATE_40_GBPS;
164         case 24:  return IB_RATE_60_GBPS;
165         case 32:  return IB_RATE_80_GBPS;
166         case 48:  return IB_RATE_120_GBPS;
167         case 6:   return IB_RATE_14_GBPS;
168         case 22:  return IB_RATE_56_GBPS;
169         case 45:  return IB_RATE_112_GBPS;
170         case 67:  return IB_RATE_168_GBPS;
171         case 10:  return IB_RATE_25_GBPS;
172         case 40:  return IB_RATE_100_GBPS;
173         case 80:  return IB_RATE_200_GBPS;
174         case 120: return IB_RATE_300_GBPS;
175         case 11:  return IB_RATE_28_GBPS;
176         case 20:  return IB_RATE_50_GBPS;
177         case 160: return IB_RATE_400_GBPS;
178         case 240: return IB_RATE_600_GBPS;
179         default:  return IB_RATE_PORT_CURRENT;
180         }
181 }
182 EXPORT_SYMBOL(mult_to_ib_rate);
183
184 __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate)
185 {
186         switch (rate) {
187         case IB_RATE_2_5_GBPS: return 2500;
188         case IB_RATE_5_GBPS:   return 5000;
189         case IB_RATE_10_GBPS:  return 10000;
190         case IB_RATE_20_GBPS:  return 20000;
191         case IB_RATE_30_GBPS:  return 30000;
192         case IB_RATE_40_GBPS:  return 40000;
193         case IB_RATE_60_GBPS:  return 60000;
194         case IB_RATE_80_GBPS:  return 80000;
195         case IB_RATE_120_GBPS: return 120000;
196         case IB_RATE_14_GBPS:  return 14062;
197         case IB_RATE_56_GBPS:  return 56250;
198         case IB_RATE_112_GBPS: return 112500;
199         case IB_RATE_168_GBPS: return 168750;
200         case IB_RATE_25_GBPS:  return 25781;
201         case IB_RATE_100_GBPS: return 103125;
202         case IB_RATE_200_GBPS: return 206250;
203         case IB_RATE_300_GBPS: return 309375;
204         case IB_RATE_28_GBPS:  return 28125;
205         case IB_RATE_50_GBPS:  return 53125;
206         case IB_RATE_400_GBPS: return 425000;
207         case IB_RATE_600_GBPS: return 637500;
208         default:               return -1;
209         }
210 }
211 EXPORT_SYMBOL(ib_rate_to_mbps);
212
213 __attribute_const__ enum rdma_transport_type
214 rdma_node_get_transport(unsigned int node_type)
215 {
216
217         if (node_type == RDMA_NODE_USNIC)
218                 return RDMA_TRANSPORT_USNIC;
219         if (node_type == RDMA_NODE_USNIC_UDP)
220                 return RDMA_TRANSPORT_USNIC_UDP;
221         if (node_type == RDMA_NODE_RNIC)
222                 return RDMA_TRANSPORT_IWARP;
223         if (node_type == RDMA_NODE_UNSPECIFIED)
224                 return RDMA_TRANSPORT_UNSPECIFIED;
225
226         return RDMA_TRANSPORT_IB;
227 }
228 EXPORT_SYMBOL(rdma_node_get_transport);
229
230 enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
231                                               u32 port_num)
232 {
233         enum rdma_transport_type lt;
234         if (device->ops.get_link_layer)
235                 return device->ops.get_link_layer(device, port_num);
236
237         lt = rdma_node_get_transport(device->node_type);
238         if (lt == RDMA_TRANSPORT_IB)
239                 return IB_LINK_LAYER_INFINIBAND;
240
241         return IB_LINK_LAYER_ETHERNET;
242 }
243 EXPORT_SYMBOL(rdma_port_get_link_layer);
244
245 /* Protection domains */
246
247 /**
248  * __ib_alloc_pd - Allocates an unused protection domain.
249  * @device: The device on which to allocate the protection domain.
250  * @flags: protection domain flags
251  * @caller: caller's build-time module name
252  *
253  * A protection domain object provides an association between QPs, shared
254  * receive queues, address handles, memory regions, and memory windows.
255  *
256  * Every PD has a local_dma_lkey which can be used as the lkey value for local
257  * memory operations.
258  */
259 struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
260                 const char *caller)
261 {
262         struct ib_pd *pd;
263         int mr_access_flags = 0;
264         int ret;
265
266         pd = rdma_zalloc_drv_obj(device, ib_pd);
267         if (!pd)
268                 return ERR_PTR(-ENOMEM);
269
270         pd->device = device;
271         pd->uobject = NULL;
272         pd->__internal_mr = NULL;
273         atomic_set(&pd->usecnt, 0);
274         pd->flags = flags;
275
276         rdma_restrack_new(&pd->res, RDMA_RESTRACK_PD);
277         rdma_restrack_set_name(&pd->res, caller);
278
279         ret = device->ops.alloc_pd(pd, NULL);
280         if (ret) {
281                 rdma_restrack_put(&pd->res);
282                 kfree(pd);
283                 return ERR_PTR(ret);
284         }
285         rdma_restrack_add(&pd->res);
286
287         if (device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)
288                 pd->local_dma_lkey = device->local_dma_lkey;
289         else
290                 mr_access_flags |= IB_ACCESS_LOCAL_WRITE;
291
292         if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) {
293                 pr_warn("%s: enabling unsafe global rkey\n", caller);
294                 mr_access_flags |= IB_ACCESS_REMOTE_READ | IB_ACCESS_REMOTE_WRITE;
295         }
296
297         if (mr_access_flags) {
298                 struct ib_mr *mr;
299
300                 mr = pd->device->ops.get_dma_mr(pd, mr_access_flags);
301                 if (IS_ERR(mr)) {
302                         ib_dealloc_pd(pd);
303                         return ERR_CAST(mr);
304                 }
305
306                 mr->device      = pd->device;
307                 mr->pd          = pd;
308                 mr->type        = IB_MR_TYPE_DMA;
309                 mr->uobject     = NULL;
310                 mr->need_inval  = false;
311
312                 pd->__internal_mr = mr;
313
314                 if (!(device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY))
315                         pd->local_dma_lkey = pd->__internal_mr->lkey;
316
317                 if (flags & IB_PD_UNSAFE_GLOBAL_RKEY)
318                         pd->unsafe_global_rkey = pd->__internal_mr->rkey;
319         }
320
321         return pd;
322 }
323 EXPORT_SYMBOL(__ib_alloc_pd);
324
325 /**
326  * ib_dealloc_pd_user - Deallocates a protection domain.
327  * @pd: The protection domain to deallocate.
328  * @udata: Valid user data or NULL for kernel object
329  *
330  * It is an error to call this function while any resources in the pd still
331  * exist.  The caller is responsible to synchronously destroy them and
332  * guarantee no new allocations will happen.
333  */
334 int ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata)
335 {
336         int ret;
337
338         if (pd->__internal_mr) {
339                 ret = pd->device->ops.dereg_mr(pd->__internal_mr, NULL);
340                 WARN_ON(ret);
341                 pd->__internal_mr = NULL;
342         }
343
344         /* uverbs manipulates usecnt with proper locking, while the kabi
345          * requires the caller to guarantee we can't race here.
346          */
347         WARN_ON(atomic_read(&pd->usecnt));
348
349         ret = pd->device->ops.dealloc_pd(pd, udata);
350         if (ret)
351                 return ret;
352
353         rdma_restrack_del(&pd->res);
354         kfree(pd);
355         return ret;
356 }
357 EXPORT_SYMBOL(ib_dealloc_pd_user);
358
359 /* Address handles */
360
361 /**
362  * rdma_copy_ah_attr - Copy rdma ah attribute from source to destination.
363  * @dest:       Pointer to destination ah_attr. Contents of the destination
364  *              pointer is assumed to be invalid and attribute are overwritten.
365  * @src:        Pointer to source ah_attr.
366  */
367 void rdma_copy_ah_attr(struct rdma_ah_attr *dest,
368                        const struct rdma_ah_attr *src)
369 {
370         *dest = *src;
371         if (dest->grh.sgid_attr)
372                 rdma_hold_gid_attr(dest->grh.sgid_attr);
373 }
374 EXPORT_SYMBOL(rdma_copy_ah_attr);
375
376 /**
377  * rdma_replace_ah_attr - Replace valid ah_attr with new new one.
378  * @old:        Pointer to existing ah_attr which needs to be replaced.
379  *              old is assumed to be valid or zero'd
380  * @new:        Pointer to the new ah_attr.
381  *
382  * rdma_replace_ah_attr() first releases any reference in the old ah_attr if
383  * old the ah_attr is valid; after that it copies the new attribute and holds
384  * the reference to the replaced ah_attr.
385  */
386 void rdma_replace_ah_attr(struct rdma_ah_attr *old,
387                           const struct rdma_ah_attr *new)
388 {
389         rdma_destroy_ah_attr(old);
390         *old = *new;
391         if (old->grh.sgid_attr)
392                 rdma_hold_gid_attr(old->grh.sgid_attr);
393 }
394 EXPORT_SYMBOL(rdma_replace_ah_attr);
395
396 /**
397  * rdma_move_ah_attr - Move ah_attr pointed by source to destination.
398  * @dest:       Pointer to destination ah_attr to copy to.
399  *              dest is assumed to be valid or zero'd
400  * @src:        Pointer to the new ah_attr.
401  *
402  * rdma_move_ah_attr() first releases any reference in the destination ah_attr
403  * if it is valid. This also transfers ownership of internal references from
404  * src to dest, making src invalid in the process. No new reference of the src
405  * ah_attr is taken.
406  */
407 void rdma_move_ah_attr(struct rdma_ah_attr *dest, struct rdma_ah_attr *src)
408 {
409         rdma_destroy_ah_attr(dest);
410         *dest = *src;
411         src->grh.sgid_attr = NULL;
412 }
413 EXPORT_SYMBOL(rdma_move_ah_attr);
414
415 /*
416  * Validate that the rdma_ah_attr is valid for the device before passing it
417  * off to the driver.
418  */
419 static int rdma_check_ah_attr(struct ib_device *device,
420                               struct rdma_ah_attr *ah_attr)
421 {
422         if (!rdma_is_port_valid(device, ah_attr->port_num))
423                 return -EINVAL;
424
425         if ((rdma_is_grh_required(device, ah_attr->port_num) ||
426              ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) &&
427             !(ah_attr->ah_flags & IB_AH_GRH))
428                 return -EINVAL;
429
430         if (ah_attr->grh.sgid_attr) {
431                 /*
432                  * Make sure the passed sgid_attr is consistent with the
433                  * parameters
434                  */
435                 if (ah_attr->grh.sgid_attr->index != ah_attr->grh.sgid_index ||
436                     ah_attr->grh.sgid_attr->port_num != ah_attr->port_num)
437                         return -EINVAL;
438         }
439         return 0;
440 }
441
442 /*
443  * If the ah requires a GRH then ensure that sgid_attr pointer is filled in.
444  * On success the caller is responsible to call rdma_unfill_sgid_attr().
445  */
446 static int rdma_fill_sgid_attr(struct ib_device *device,
447                                struct rdma_ah_attr *ah_attr,
448                                const struct ib_gid_attr **old_sgid_attr)
449 {
450         const struct ib_gid_attr *sgid_attr;
451         struct ib_global_route *grh;
452         int ret;
453
454         *old_sgid_attr = ah_attr->grh.sgid_attr;
455
456         ret = rdma_check_ah_attr(device, ah_attr);
457         if (ret)
458                 return ret;
459
460         if (!(ah_attr->ah_flags & IB_AH_GRH))
461                 return 0;
462
463         grh = rdma_ah_retrieve_grh(ah_attr);
464         if (grh->sgid_attr)
465                 return 0;
466
467         sgid_attr =
468                 rdma_get_gid_attr(device, ah_attr->port_num, grh->sgid_index);
469         if (IS_ERR(sgid_attr))
470                 return PTR_ERR(sgid_attr);
471
472         /* Move ownerhip of the kref into the ah_attr */
473         grh->sgid_attr = sgid_attr;
474         return 0;
475 }
476
477 static void rdma_unfill_sgid_attr(struct rdma_ah_attr *ah_attr,
478                                   const struct ib_gid_attr *old_sgid_attr)
479 {
480         /*
481          * Fill didn't change anything, the caller retains ownership of
482          * whatever it passed
483          */
484         if (ah_attr->grh.sgid_attr == old_sgid_attr)
485                 return;
486
487         /*
488          * Otherwise, we need to undo what rdma_fill_sgid_attr so the caller
489          * doesn't see any change in the rdma_ah_attr. If we get here
490          * old_sgid_attr is NULL.
491          */
492         rdma_destroy_ah_attr(ah_attr);
493 }
494
495 static const struct ib_gid_attr *
496 rdma_update_sgid_attr(struct rdma_ah_attr *ah_attr,
497                       const struct ib_gid_attr *old_attr)
498 {
499         if (old_attr)
500                 rdma_put_gid_attr(old_attr);
501         if (ah_attr->ah_flags & IB_AH_GRH) {
502                 rdma_hold_gid_attr(ah_attr->grh.sgid_attr);
503                 return ah_attr->grh.sgid_attr;
504         }
505         return NULL;
506 }
507
508 static struct ib_ah *_rdma_create_ah(struct ib_pd *pd,
509                                      struct rdma_ah_attr *ah_attr,
510                                      u32 flags,
511                                      struct ib_udata *udata,
512                                      struct net_device *xmit_slave)
513 {
514         struct rdma_ah_init_attr init_attr = {};
515         struct ib_device *device = pd->device;
516         struct ib_ah *ah;
517         int ret;
518
519         might_sleep_if(flags & RDMA_CREATE_AH_SLEEPABLE);
520
521         if (!udata && !device->ops.create_ah)
522                 return ERR_PTR(-EOPNOTSUPP);
523
524         ah = rdma_zalloc_drv_obj_gfp(
525                 device, ib_ah,
526                 (flags & RDMA_CREATE_AH_SLEEPABLE) ? GFP_KERNEL : GFP_ATOMIC);
527         if (!ah)
528                 return ERR_PTR(-ENOMEM);
529
530         ah->device = device;
531         ah->pd = pd;
532         ah->type = ah_attr->type;
533         ah->sgid_attr = rdma_update_sgid_attr(ah_attr, NULL);
534         init_attr.ah_attr = ah_attr;
535         init_attr.flags = flags;
536         init_attr.xmit_slave = xmit_slave;
537
538         if (udata)
539                 ret = device->ops.create_user_ah(ah, &init_attr, udata);
540         else
541                 ret = device->ops.create_ah(ah, &init_attr, NULL);
542         if (ret) {
543                 kfree(ah);
544                 return ERR_PTR(ret);
545         }
546
547         atomic_inc(&pd->usecnt);
548         return ah;
549 }
550
551 /**
552  * rdma_create_ah - Creates an address handle for the
553  * given address vector.
554  * @pd: The protection domain associated with the address handle.
555  * @ah_attr: The attributes of the address vector.
556  * @flags: Create address handle flags (see enum rdma_create_ah_flags).
557  *
558  * It returns 0 on success and returns appropriate error code on error.
559  * The address handle is used to reference a local or global destination
560  * in all UD QP post sends.
561  */
562 struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
563                              u32 flags)
564 {
565         const struct ib_gid_attr *old_sgid_attr;
566         struct net_device *slave;
567         struct ib_ah *ah;
568         int ret;
569
570         ret = rdma_fill_sgid_attr(pd->device, ah_attr, &old_sgid_attr);
571         if (ret)
572                 return ERR_PTR(ret);
573         slave = rdma_lag_get_ah_roce_slave(pd->device, ah_attr,
574                                            (flags & RDMA_CREATE_AH_SLEEPABLE) ?
575                                            GFP_KERNEL : GFP_ATOMIC);
576         if (IS_ERR(slave)) {
577                 rdma_unfill_sgid_attr(ah_attr, old_sgid_attr);
578                 return (void *)slave;
579         }
580         ah = _rdma_create_ah(pd, ah_attr, flags, NULL, slave);
581         rdma_lag_put_ah_roce_slave(slave);
582         rdma_unfill_sgid_attr(ah_attr, old_sgid_attr);
583         return ah;
584 }
585 EXPORT_SYMBOL(rdma_create_ah);
586
587 /**
588  * rdma_create_user_ah - Creates an address handle for the
589  * given address vector.
590  * It resolves destination mac address for ah attribute of RoCE type.
591  * @pd: The protection domain associated with the address handle.
592  * @ah_attr: The attributes of the address vector.
593  * @udata: pointer to user's input output buffer information need by
594  *         provider driver.
595  *
596  * It returns 0 on success and returns appropriate error code on error.
597  * The address handle is used to reference a local or global destination
598  * in all UD QP post sends.
599  */
600 struct ib_ah *rdma_create_user_ah(struct ib_pd *pd,
601                                   struct rdma_ah_attr *ah_attr,
602                                   struct ib_udata *udata)
603 {
604         const struct ib_gid_attr *old_sgid_attr;
605         struct ib_ah *ah;
606         int err;
607
608         err = rdma_fill_sgid_attr(pd->device, ah_attr, &old_sgid_attr);
609         if (err)
610                 return ERR_PTR(err);
611
612         if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
613                 err = ib_resolve_eth_dmac(pd->device, ah_attr);
614                 if (err) {
615                         ah = ERR_PTR(err);
616                         goto out;
617                 }
618         }
619
620         ah = _rdma_create_ah(pd, ah_attr, RDMA_CREATE_AH_SLEEPABLE,
621                              udata, NULL);
622
623 out:
624         rdma_unfill_sgid_attr(ah_attr, old_sgid_attr);
625         return ah;
626 }
627 EXPORT_SYMBOL(rdma_create_user_ah);
628
629 int ib_get_rdma_header_version(const union rdma_network_hdr *hdr)
630 {
631         const struct iphdr *ip4h = (struct iphdr *)&hdr->roce4grh;
632         struct iphdr ip4h_checked;
633         const struct ipv6hdr *ip6h = (struct ipv6hdr *)&hdr->ibgrh;
634
635         /* If it's IPv6, the version must be 6, otherwise, the first
636          * 20 bytes (before the IPv4 header) are garbled.
637          */
638         if (ip6h->version != 6)
639                 return (ip4h->version == 4) ? 4 : 0;
640         /* version may be 6 or 4 because the first 20 bytes could be garbled */
641
642         /* RoCE v2 requires no options, thus header length
643          * must be 5 words
644          */
645         if (ip4h->ihl != 5)
646                 return 6;
647
648         /* Verify checksum.
649          * We can't write on scattered buffers so we need to copy to
650          * temp buffer.
651          */
652         memcpy(&ip4h_checked, ip4h, sizeof(ip4h_checked));
653         ip4h_checked.check = 0;
654         ip4h_checked.check = ip_fast_csum((u8 *)&ip4h_checked, 5);
655         /* if IPv4 header checksum is OK, believe it */
656         if (ip4h->check == ip4h_checked.check)
657                 return 4;
658         return 6;
659 }
660 EXPORT_SYMBOL(ib_get_rdma_header_version);
661
662 static enum rdma_network_type ib_get_net_type_by_grh(struct ib_device *device,
663                                                      u32 port_num,
664                                                      const struct ib_grh *grh)
665 {
666         int grh_version;
667
668         if (rdma_protocol_ib(device, port_num))
669                 return RDMA_NETWORK_IB;
670
671         grh_version = ib_get_rdma_header_version((union rdma_network_hdr *)grh);
672
673         if (grh_version == 4)
674                 return RDMA_NETWORK_IPV4;
675
676         if (grh->next_hdr == IPPROTO_UDP)
677                 return RDMA_NETWORK_IPV6;
678
679         return RDMA_NETWORK_ROCE_V1;
680 }
681
682 struct find_gid_index_context {
683         u16 vlan_id;
684         enum ib_gid_type gid_type;
685 };
686
687 static bool find_gid_index(const union ib_gid *gid,
688                            const struct ib_gid_attr *gid_attr,
689                            void *context)
690 {
691         struct find_gid_index_context *ctx = context;
692         u16 vlan_id = 0xffff;
693         int ret;
694
695         if (ctx->gid_type != gid_attr->gid_type)
696                 return false;
697
698         ret = rdma_read_gid_l2_fields(gid_attr, &vlan_id, NULL);
699         if (ret)
700                 return false;
701
702         return ctx->vlan_id == vlan_id;
703 }
704
705 static const struct ib_gid_attr *
706 get_sgid_attr_from_eth(struct ib_device *device, u32 port_num,
707                        u16 vlan_id, const union ib_gid *sgid,
708                        enum ib_gid_type gid_type)
709 {
710         struct find_gid_index_context context = {.vlan_id = vlan_id,
711                                                  .gid_type = gid_type};
712
713         return rdma_find_gid_by_filter(device, sgid, port_num, find_gid_index,
714                                        &context);
715 }
716
717 int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
718                               enum rdma_network_type net_type,
719                               union ib_gid *sgid, union ib_gid *dgid)
720 {
721         struct sockaddr_in  src_in;
722         struct sockaddr_in  dst_in;
723         __be32 src_saddr, dst_saddr;
724
725         if (!sgid || !dgid)
726                 return -EINVAL;
727
728         if (net_type == RDMA_NETWORK_IPV4) {
729                 memcpy(&src_in.sin_addr.s_addr,
730                        &hdr->roce4grh.saddr, 4);
731                 memcpy(&dst_in.sin_addr.s_addr,
732                        &hdr->roce4grh.daddr, 4);
733                 src_saddr = src_in.sin_addr.s_addr;
734                 dst_saddr = dst_in.sin_addr.s_addr;
735                 ipv6_addr_set_v4mapped(src_saddr,
736                                        (struct in6_addr *)sgid);
737                 ipv6_addr_set_v4mapped(dst_saddr,
738                                        (struct in6_addr *)dgid);
739                 return 0;
740         } else if (net_type == RDMA_NETWORK_IPV6 ||
741                    net_type == RDMA_NETWORK_IB || RDMA_NETWORK_ROCE_V1) {
742                 *dgid = hdr->ibgrh.dgid;
743                 *sgid = hdr->ibgrh.sgid;
744                 return 0;
745         } else {
746                 return -EINVAL;
747         }
748 }
749 EXPORT_SYMBOL(ib_get_gids_from_rdma_hdr);
750
751 /* Resolve destination mac address and hop limit for unicast destination
752  * GID entry, considering the source GID entry as well.
753  * ah_attribute must have have valid port_num, sgid_index.
754  */
755 static int ib_resolve_unicast_gid_dmac(struct ib_device *device,
756                                        struct rdma_ah_attr *ah_attr)
757 {
758         struct ib_global_route *grh = rdma_ah_retrieve_grh(ah_attr);
759         const struct ib_gid_attr *sgid_attr = grh->sgid_attr;
760         int hop_limit = 0xff;
761         int ret = 0;
762
763         /* If destination is link local and source GID is RoCEv1,
764          * IP stack is not used.
765          */
766         if (rdma_link_local_addr((struct in6_addr *)grh->dgid.raw) &&
767             sgid_attr->gid_type == IB_GID_TYPE_ROCE) {
768                 rdma_get_ll_mac((struct in6_addr *)grh->dgid.raw,
769                                 ah_attr->roce.dmac);
770                 return ret;
771         }
772
773         ret = rdma_addr_find_l2_eth_by_grh(&sgid_attr->gid, &grh->dgid,
774                                            ah_attr->roce.dmac,
775                                            sgid_attr, &hop_limit);
776
777         grh->hop_limit = hop_limit;
778         return ret;
779 }
780
781 /*
782  * This function initializes address handle attributes from the incoming packet.
783  * Incoming packet has dgid of the receiver node on which this code is
784  * getting executed and, sgid contains the GID of the sender.
785  *
786  * When resolving mac address of destination, the arrived dgid is used
787  * as sgid and, sgid is used as dgid because sgid contains destinations
788  * GID whom to respond to.
789  *
790  * On success the caller is responsible to call rdma_destroy_ah_attr on the
791  * attr.
792  */
793 int ib_init_ah_attr_from_wc(struct ib_device *device, u32 port_num,
794                             const struct ib_wc *wc, const struct ib_grh *grh,
795                             struct rdma_ah_attr *ah_attr)
796 {
797         u32 flow_class;
798         int ret;
799         enum rdma_network_type net_type = RDMA_NETWORK_IB;
800         enum ib_gid_type gid_type = IB_GID_TYPE_IB;
801         const struct ib_gid_attr *sgid_attr;
802         int hoplimit = 0xff;
803         union ib_gid dgid;
804         union ib_gid sgid;
805
806         might_sleep();
807
808         memset(ah_attr, 0, sizeof *ah_attr);
809         ah_attr->type = rdma_ah_find_type(device, port_num);
810         if (rdma_cap_eth_ah(device, port_num)) {
811                 if (wc->wc_flags & IB_WC_WITH_NETWORK_HDR_TYPE)
812                         net_type = wc->network_hdr_type;
813                 else
814                         net_type = ib_get_net_type_by_grh(device, port_num, grh);
815                 gid_type = ib_network_to_gid_type(net_type);
816         }
817         ret = ib_get_gids_from_rdma_hdr((union rdma_network_hdr *)grh, net_type,
818                                         &sgid, &dgid);
819         if (ret)
820                 return ret;
821
822         rdma_ah_set_sl(ah_attr, wc->sl);
823         rdma_ah_set_port_num(ah_attr, port_num);
824
825         if (rdma_protocol_roce(device, port_num)) {
826                 u16 vlan_id = wc->wc_flags & IB_WC_WITH_VLAN ?
827                                 wc->vlan_id : 0xffff;
828
829                 if (!(wc->wc_flags & IB_WC_GRH))
830                         return -EPROTOTYPE;
831
832                 sgid_attr = get_sgid_attr_from_eth(device, port_num,
833                                                    vlan_id, &dgid,
834                                                    gid_type);
835                 if (IS_ERR(sgid_attr))
836                         return PTR_ERR(sgid_attr);
837
838                 flow_class = be32_to_cpu(grh->version_tclass_flow);
839                 rdma_move_grh_sgid_attr(ah_attr,
840                                         &sgid,
841                                         flow_class & 0xFFFFF,
842                                         hoplimit,
843                                         (flow_class >> 20) & 0xFF,
844                                         sgid_attr);
845
846                 ret = ib_resolve_unicast_gid_dmac(device, ah_attr);
847                 if (ret)
848                         rdma_destroy_ah_attr(ah_attr);
849
850                 return ret;
851         } else {
852                 rdma_ah_set_dlid(ah_attr, wc->slid);
853                 rdma_ah_set_path_bits(ah_attr, wc->dlid_path_bits);
854
855                 if ((wc->wc_flags & IB_WC_GRH) == 0)
856                         return 0;
857
858                 if (dgid.global.interface_id !=
859                                         cpu_to_be64(IB_SA_WELL_KNOWN_GUID)) {
860                         sgid_attr = rdma_find_gid_by_port(
861                                 device, &dgid, IB_GID_TYPE_IB, port_num, NULL);
862                 } else
863                         sgid_attr = rdma_get_gid_attr(device, port_num, 0);
864
865                 if (IS_ERR(sgid_attr))
866                         return PTR_ERR(sgid_attr);
867                 flow_class = be32_to_cpu(grh->version_tclass_flow);
868                 rdma_move_grh_sgid_attr(ah_attr,
869                                         &sgid,
870                                         flow_class & 0xFFFFF,
871                                         hoplimit,
872                                         (flow_class >> 20) & 0xFF,
873                                         sgid_attr);
874
875                 return 0;
876         }
877 }
878 EXPORT_SYMBOL(ib_init_ah_attr_from_wc);
879
880 /**
881  * rdma_move_grh_sgid_attr - Sets the sgid attribute of GRH, taking ownership
882  * of the reference
883  *
884  * @attr:       Pointer to AH attribute structure
885  * @dgid:       Destination GID
886  * @flow_label: Flow label
887  * @hop_limit:  Hop limit
888  * @traffic_class: traffic class
889  * @sgid_attr:  Pointer to SGID attribute
890  *
891  * This takes ownership of the sgid_attr reference. The caller must ensure
892  * rdma_destroy_ah_attr() is called before destroying the rdma_ah_attr after
893  * calling this function.
894  */
895 void rdma_move_grh_sgid_attr(struct rdma_ah_attr *attr, union ib_gid *dgid,
896                              u32 flow_label, u8 hop_limit, u8 traffic_class,
897                              const struct ib_gid_attr *sgid_attr)
898 {
899         rdma_ah_set_grh(attr, dgid, flow_label, sgid_attr->index, hop_limit,
900                         traffic_class);
901         attr->grh.sgid_attr = sgid_attr;
902 }
903 EXPORT_SYMBOL(rdma_move_grh_sgid_attr);
904
905 /**
906  * rdma_destroy_ah_attr - Release reference to SGID attribute of
907  * ah attribute.
908  * @ah_attr: Pointer to ah attribute
909  *
910  * Release reference to the SGID attribute of the ah attribute if it is
911  * non NULL. It is safe to call this multiple times, and safe to call it on
912  * a zero initialized ah_attr.
913  */
914 void rdma_destroy_ah_attr(struct rdma_ah_attr *ah_attr)
915 {
916         if (ah_attr->grh.sgid_attr) {
917                 rdma_put_gid_attr(ah_attr->grh.sgid_attr);
918                 ah_attr->grh.sgid_attr = NULL;
919         }
920 }
921 EXPORT_SYMBOL(rdma_destroy_ah_attr);
922
923 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
924                                    const struct ib_grh *grh, u32 port_num)
925 {
926         struct rdma_ah_attr ah_attr;
927         struct ib_ah *ah;
928         int ret;
929
930         ret = ib_init_ah_attr_from_wc(pd->device, port_num, wc, grh, &ah_attr);
931         if (ret)
932                 return ERR_PTR(ret);
933
934         ah = rdma_create_ah(pd, &ah_attr, RDMA_CREATE_AH_SLEEPABLE);
935
936         rdma_destroy_ah_attr(&ah_attr);
937         return ah;
938 }
939 EXPORT_SYMBOL(ib_create_ah_from_wc);
940
941 int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr)
942 {
943         const struct ib_gid_attr *old_sgid_attr;
944         int ret;
945
946         if (ah->type != ah_attr->type)
947                 return -EINVAL;
948
949         ret = rdma_fill_sgid_attr(ah->device, ah_attr, &old_sgid_attr);
950         if (ret)
951                 return ret;
952
953         ret = ah->device->ops.modify_ah ?
954                 ah->device->ops.modify_ah(ah, ah_attr) :
955                 -EOPNOTSUPP;
956
957         ah->sgid_attr = rdma_update_sgid_attr(ah_attr, ah->sgid_attr);
958         rdma_unfill_sgid_attr(ah_attr, old_sgid_attr);
959         return ret;
960 }
961 EXPORT_SYMBOL(rdma_modify_ah);
962
963 int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr)
964 {
965         ah_attr->grh.sgid_attr = NULL;
966
967         return ah->device->ops.query_ah ?
968                 ah->device->ops.query_ah(ah, ah_attr) :
969                 -EOPNOTSUPP;
970 }
971 EXPORT_SYMBOL(rdma_query_ah);
972
973 int rdma_destroy_ah_user(struct ib_ah *ah, u32 flags, struct ib_udata *udata)
974 {
975         const struct ib_gid_attr *sgid_attr = ah->sgid_attr;
976         struct ib_pd *pd;
977         int ret;
978
979         might_sleep_if(flags & RDMA_DESTROY_AH_SLEEPABLE);
980
981         pd = ah->pd;
982
983         ret = ah->device->ops.destroy_ah(ah, flags);
984         if (ret)
985                 return ret;
986
987         atomic_dec(&pd->usecnt);
988         if (sgid_attr)
989                 rdma_put_gid_attr(sgid_attr);
990
991         kfree(ah);
992         return ret;
993 }
994 EXPORT_SYMBOL(rdma_destroy_ah_user);
995
996 /* Shared receive queues */
997
998 /**
999  * ib_create_srq_user - Creates a SRQ associated with the specified protection
1000  *   domain.
1001  * @pd: The protection domain associated with the SRQ.
1002  * @srq_init_attr: A list of initial attributes required to create the
1003  *   SRQ.  If SRQ creation succeeds, then the attributes are updated to
1004  *   the actual capabilities of the created SRQ.
1005  * @uobject: uobject pointer if this is not a kernel SRQ
1006  * @udata: udata pointer if this is not a kernel SRQ
1007  *
1008  * srq_attr->max_wr and srq_attr->max_sge are read the determine the
1009  * requested size of the SRQ, and set to the actual values allocated
1010  * on return.  If ib_create_srq() succeeds, then max_wr and max_sge
1011  * will always be at least as large as the requested values.
1012  */
1013 struct ib_srq *ib_create_srq_user(struct ib_pd *pd,
1014                                   struct ib_srq_init_attr *srq_init_attr,
1015                                   struct ib_usrq_object *uobject,
1016                                   struct ib_udata *udata)
1017 {
1018         struct ib_srq *srq;
1019         int ret;
1020
1021         srq = rdma_zalloc_drv_obj(pd->device, ib_srq);
1022         if (!srq)
1023                 return ERR_PTR(-ENOMEM);
1024
1025         srq->device = pd->device;
1026         srq->pd = pd;
1027         srq->event_handler = srq_init_attr->event_handler;
1028         srq->srq_context = srq_init_attr->srq_context;
1029         srq->srq_type = srq_init_attr->srq_type;
1030         srq->uobject = uobject;
1031
1032         if (ib_srq_has_cq(srq->srq_type)) {
1033                 srq->ext.cq = srq_init_attr->ext.cq;
1034                 atomic_inc(&srq->ext.cq->usecnt);
1035         }
1036         if (srq->srq_type == IB_SRQT_XRC) {
1037                 srq->ext.xrc.xrcd = srq_init_attr->ext.xrc.xrcd;
1038                 if (srq->ext.xrc.xrcd)
1039                         atomic_inc(&srq->ext.xrc.xrcd->usecnt);
1040         }
1041         atomic_inc(&pd->usecnt);
1042
1043         rdma_restrack_new(&srq->res, RDMA_RESTRACK_SRQ);
1044         rdma_restrack_parent_name(&srq->res, &pd->res);
1045
1046         ret = pd->device->ops.create_srq(srq, srq_init_attr, udata);
1047         if (ret) {
1048                 rdma_restrack_put(&srq->res);
1049                 atomic_dec(&srq->pd->usecnt);
1050                 if (srq->srq_type == IB_SRQT_XRC && srq->ext.xrc.xrcd)
1051                         atomic_dec(&srq->ext.xrc.xrcd->usecnt);
1052                 if (ib_srq_has_cq(srq->srq_type))
1053                         atomic_dec(&srq->ext.cq->usecnt);
1054                 kfree(srq);
1055                 return ERR_PTR(ret);
1056         }
1057
1058         rdma_restrack_add(&srq->res);
1059
1060         return srq;
1061 }
1062 EXPORT_SYMBOL(ib_create_srq_user);
1063
1064 int ib_modify_srq(struct ib_srq *srq,
1065                   struct ib_srq_attr *srq_attr,
1066                   enum ib_srq_attr_mask srq_attr_mask)
1067 {
1068         return srq->device->ops.modify_srq ?
1069                 srq->device->ops.modify_srq(srq, srq_attr, srq_attr_mask,
1070                                             NULL) : -EOPNOTSUPP;
1071 }
1072 EXPORT_SYMBOL(ib_modify_srq);
1073
1074 int ib_query_srq(struct ib_srq *srq,
1075                  struct ib_srq_attr *srq_attr)
1076 {
1077         return srq->device->ops.query_srq ?
1078                 srq->device->ops.query_srq(srq, srq_attr) : -EOPNOTSUPP;
1079 }
1080 EXPORT_SYMBOL(ib_query_srq);
1081
1082 int ib_destroy_srq_user(struct ib_srq *srq, struct ib_udata *udata)
1083 {
1084         int ret;
1085
1086         if (atomic_read(&srq->usecnt))
1087                 return -EBUSY;
1088
1089         ret = srq->device->ops.destroy_srq(srq, udata);
1090         if (ret)
1091                 return ret;
1092
1093         atomic_dec(&srq->pd->usecnt);
1094         if (srq->srq_type == IB_SRQT_XRC && srq->ext.xrc.xrcd)
1095                 atomic_dec(&srq->ext.xrc.xrcd->usecnt);
1096         if (ib_srq_has_cq(srq->srq_type))
1097                 atomic_dec(&srq->ext.cq->usecnt);
1098         rdma_restrack_del(&srq->res);
1099         kfree(srq);
1100
1101         return ret;
1102 }
1103 EXPORT_SYMBOL(ib_destroy_srq_user);
1104
1105 /* Queue pairs */
1106
1107 static void __ib_shared_qp_event_handler(struct ib_event *event, void *context)
1108 {
1109         struct ib_qp *qp = context;
1110         unsigned long flags;
1111
1112         spin_lock_irqsave(&qp->device->qp_open_list_lock, flags);
1113         list_for_each_entry(event->element.qp, &qp->open_list, open_list)
1114                 if (event->element.qp->event_handler)
1115                         event->element.qp->event_handler(event, event->element.qp->qp_context);
1116         spin_unlock_irqrestore(&qp->device->qp_open_list_lock, flags);
1117 }
1118
1119 static struct ib_qp *__ib_open_qp(struct ib_qp *real_qp,
1120                                   void (*event_handler)(struct ib_event *, void *),
1121                                   void *qp_context)
1122 {
1123         struct ib_qp *qp;
1124         unsigned long flags;
1125         int err;
1126
1127         qp = kzalloc(sizeof *qp, GFP_KERNEL);
1128         if (!qp)
1129                 return ERR_PTR(-ENOMEM);
1130
1131         qp->real_qp = real_qp;
1132         err = ib_open_shared_qp_security(qp, real_qp->device);
1133         if (err) {
1134                 kfree(qp);
1135                 return ERR_PTR(err);
1136         }
1137
1138         qp->real_qp = real_qp;
1139         atomic_inc(&real_qp->usecnt);
1140         qp->device = real_qp->device;
1141         qp->event_handler = event_handler;
1142         qp->qp_context = qp_context;
1143         qp->qp_num = real_qp->qp_num;
1144         qp->qp_type = real_qp->qp_type;
1145
1146         spin_lock_irqsave(&real_qp->device->qp_open_list_lock, flags);
1147         list_add(&qp->open_list, &real_qp->open_list);
1148         spin_unlock_irqrestore(&real_qp->device->qp_open_list_lock, flags);
1149
1150         return qp;
1151 }
1152
1153 struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
1154                          struct ib_qp_open_attr *qp_open_attr)
1155 {
1156         struct ib_qp *qp, *real_qp;
1157
1158         if (qp_open_attr->qp_type != IB_QPT_XRC_TGT)
1159                 return ERR_PTR(-EINVAL);
1160
1161         down_read(&xrcd->tgt_qps_rwsem);
1162         real_qp = xa_load(&xrcd->tgt_qps, qp_open_attr->qp_num);
1163         if (!real_qp) {
1164                 up_read(&xrcd->tgt_qps_rwsem);
1165                 return ERR_PTR(-EINVAL);
1166         }
1167         qp = __ib_open_qp(real_qp, qp_open_attr->event_handler,
1168                           qp_open_attr->qp_context);
1169         up_read(&xrcd->tgt_qps_rwsem);
1170         return qp;
1171 }
1172 EXPORT_SYMBOL(ib_open_qp);
1173
1174 static struct ib_qp *create_xrc_qp_user(struct ib_qp *qp,
1175                                         struct ib_qp_init_attr *qp_init_attr)
1176 {
1177         struct ib_qp *real_qp = qp;
1178         int err;
1179
1180         qp->event_handler = __ib_shared_qp_event_handler;
1181         qp->qp_context = qp;
1182         qp->pd = NULL;
1183         qp->send_cq = qp->recv_cq = NULL;
1184         qp->srq = NULL;
1185         qp->xrcd = qp_init_attr->xrcd;
1186         atomic_inc(&qp_init_attr->xrcd->usecnt);
1187         INIT_LIST_HEAD(&qp->open_list);
1188
1189         qp = __ib_open_qp(real_qp, qp_init_attr->event_handler,
1190                           qp_init_attr->qp_context);
1191         if (IS_ERR(qp))
1192                 return qp;
1193
1194         err = xa_err(xa_store(&qp_init_attr->xrcd->tgt_qps, real_qp->qp_num,
1195                               real_qp, GFP_KERNEL));
1196         if (err) {
1197                 ib_close_qp(qp);
1198                 return ERR_PTR(err);
1199         }
1200         return qp;
1201 }
1202
1203 static struct ib_qp *create_qp(struct ib_device *dev, struct ib_pd *pd,
1204                                struct ib_qp_init_attr *attr,
1205                                struct ib_udata *udata,
1206                                struct ib_uqp_object *uobj, const char *caller)
1207 {
1208         struct ib_udata dummy = {};
1209         struct ib_qp *qp;
1210         int ret;
1211
1212         if (!dev->ops.create_qp)
1213                 return ERR_PTR(-EOPNOTSUPP);
1214
1215         qp = rdma_zalloc_drv_obj_numa(dev, ib_qp);
1216         if (!qp)
1217                 return ERR_PTR(-ENOMEM);
1218
1219         qp->device = dev;
1220         qp->pd = pd;
1221         qp->uobject = uobj;
1222         qp->real_qp = qp;
1223
1224         qp->qp_type = attr->qp_type;
1225         qp->rwq_ind_tbl = attr->rwq_ind_tbl;
1226         qp->srq = attr->srq;
1227         qp->event_handler = attr->event_handler;
1228         qp->port = attr->port_num;
1229         qp->qp_context = attr->qp_context;
1230
1231         spin_lock_init(&qp->mr_lock);
1232         INIT_LIST_HEAD(&qp->rdma_mrs);
1233         INIT_LIST_HEAD(&qp->sig_mrs);
1234
1235         rdma_restrack_new(&qp->res, RDMA_RESTRACK_QP);
1236         WARN_ONCE(!udata && !caller, "Missing kernel QP owner");
1237         rdma_restrack_set_name(&qp->res, udata ? NULL : caller);
1238         ret = dev->ops.create_qp(qp, attr, udata);
1239         if (ret)
1240                 goto err_create;
1241
1242         /*
1243          * TODO: The mlx4 internally overwrites send_cq and recv_cq.
1244          * Unfortunately, it is not an easy task to fix that driver.
1245          */
1246         qp->send_cq = attr->send_cq;
1247         qp->recv_cq = attr->recv_cq;
1248
1249         ret = ib_create_qp_security(qp, dev);
1250         if (ret)
1251                 goto err_security;
1252
1253         rdma_restrack_add(&qp->res);
1254         return qp;
1255
1256 err_security:
1257         qp->device->ops.destroy_qp(qp, udata ? &dummy : NULL);
1258 err_create:
1259         rdma_restrack_put(&qp->res);
1260         kfree(qp);
1261         return ERR_PTR(ret);
1262
1263 }
1264
1265 /**
1266  * ib_create_qp_user - Creates a QP associated with the specified protection
1267  *   domain.
1268  * @dev: IB device
1269  * @pd: The protection domain associated with the QP.
1270  * @attr: A list of initial attributes required to create the
1271  *   QP.  If QP creation succeeds, then the attributes are updated to
1272  *   the actual capabilities of the created QP.
1273  * @udata: User data
1274  * @uobj: uverbs obect
1275  * @caller: caller's build-time module name
1276  */
1277 struct ib_qp *ib_create_qp_user(struct ib_device *dev, struct ib_pd *pd,
1278                                 struct ib_qp_init_attr *attr,
1279                                 struct ib_udata *udata,
1280                                 struct ib_uqp_object *uobj, const char *caller)
1281 {
1282         struct ib_qp *qp, *xrc_qp;
1283
1284         if (attr->qp_type == IB_QPT_XRC_TGT)
1285                 qp = create_qp(dev, pd, attr, NULL, NULL, caller);
1286         else
1287                 qp = create_qp(dev, pd, attr, udata, uobj, NULL);
1288         if (attr->qp_type != IB_QPT_XRC_TGT || IS_ERR(qp))
1289                 return qp;
1290
1291         xrc_qp = create_xrc_qp_user(qp, attr);
1292         if (IS_ERR(xrc_qp)) {
1293                 ib_destroy_qp(qp);
1294                 return xrc_qp;
1295         }
1296
1297         xrc_qp->uobject = uobj;
1298         return xrc_qp;
1299 }
1300 EXPORT_SYMBOL(ib_create_qp_user);
1301
1302 void ib_qp_usecnt_inc(struct ib_qp *qp)
1303 {
1304         if (qp->pd)
1305                 atomic_inc(&qp->pd->usecnt);
1306         if (qp->send_cq)
1307                 atomic_inc(&qp->send_cq->usecnt);
1308         if (qp->recv_cq)
1309                 atomic_inc(&qp->recv_cq->usecnt);
1310         if (qp->srq)
1311                 atomic_inc(&qp->srq->usecnt);
1312         if (qp->rwq_ind_tbl)
1313                 atomic_inc(&qp->rwq_ind_tbl->usecnt);
1314 }
1315 EXPORT_SYMBOL(ib_qp_usecnt_inc);
1316
1317 void ib_qp_usecnt_dec(struct ib_qp *qp)
1318 {
1319         if (qp->rwq_ind_tbl)
1320                 atomic_dec(&qp->rwq_ind_tbl->usecnt);
1321         if (qp->srq)
1322                 atomic_dec(&qp->srq->usecnt);
1323         if (qp->recv_cq)
1324                 atomic_dec(&qp->recv_cq->usecnt);
1325         if (qp->send_cq)
1326                 atomic_dec(&qp->send_cq->usecnt);
1327         if (qp->pd)
1328                 atomic_dec(&qp->pd->usecnt);
1329 }
1330 EXPORT_SYMBOL(ib_qp_usecnt_dec);
1331
1332 struct ib_qp *ib_create_qp_kernel(struct ib_pd *pd,
1333                                   struct ib_qp_init_attr *qp_init_attr,
1334                                   const char *caller)
1335 {
1336         struct ib_device *device = pd->device;
1337         struct ib_qp *qp;
1338         int ret;
1339
1340         /*
1341          * If the callers is using the RDMA API calculate the resources
1342          * needed for the RDMA READ/WRITE operations.
1343          *
1344          * Note that these callers need to pass in a port number.
1345          */
1346         if (qp_init_attr->cap.max_rdma_ctxs)
1347                 rdma_rw_init_qp(device, qp_init_attr);
1348
1349         qp = create_qp(device, pd, qp_init_attr, NULL, NULL, caller);
1350         if (IS_ERR(qp))
1351                 return qp;
1352
1353         ib_qp_usecnt_inc(qp);
1354
1355         if (qp_init_attr->cap.max_rdma_ctxs) {
1356                 ret = rdma_rw_init_mrs(qp, qp_init_attr);
1357                 if (ret)
1358                         goto err;
1359         }
1360
1361         /*
1362          * Note: all hw drivers guarantee that max_send_sge is lower than
1363          * the device RDMA WRITE SGE limit but not all hw drivers ensure that
1364          * max_send_sge <= max_sge_rd.
1365          */
1366         qp->max_write_sge = qp_init_attr->cap.max_send_sge;
1367         qp->max_read_sge = min_t(u32, qp_init_attr->cap.max_send_sge,
1368                                  device->attrs.max_sge_rd);
1369         if (qp_init_attr->create_flags & IB_QP_CREATE_INTEGRITY_EN)
1370                 qp->integrity_en = true;
1371
1372         return qp;
1373
1374 err:
1375         ib_destroy_qp(qp);
1376         return ERR_PTR(ret);
1377
1378 }
1379 EXPORT_SYMBOL(ib_create_qp_kernel);
1380
1381 static const struct {
1382         int                     valid;
1383         enum ib_qp_attr_mask    req_param[IB_QPT_MAX];
1384         enum ib_qp_attr_mask    opt_param[IB_QPT_MAX];
1385 } qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
1386         [IB_QPS_RESET] = {
1387                 [IB_QPS_RESET] = { .valid = 1 },
1388                 [IB_QPS_INIT]  = {
1389                         .valid = 1,
1390                         .req_param = {
1391                                 [IB_QPT_UD]  = (IB_QP_PKEY_INDEX                |
1392                                                 IB_QP_PORT                      |
1393                                                 IB_QP_QKEY),
1394                                 [IB_QPT_RAW_PACKET] = IB_QP_PORT,
1395                                 [IB_QPT_UC]  = (IB_QP_PKEY_INDEX                |
1396                                                 IB_QP_PORT                      |
1397                                                 IB_QP_ACCESS_FLAGS),
1398                                 [IB_QPT_RC]  = (IB_QP_PKEY_INDEX                |
1399                                                 IB_QP_PORT                      |
1400                                                 IB_QP_ACCESS_FLAGS),
1401                                 [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX            |
1402                                                 IB_QP_PORT                      |
1403                                                 IB_QP_ACCESS_FLAGS),
1404                                 [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX            |
1405                                                 IB_QP_PORT                      |
1406                                                 IB_QP_ACCESS_FLAGS),
1407                                 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX                |
1408                                                 IB_QP_QKEY),
1409                                 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX                |
1410                                                 IB_QP_QKEY),
1411                         }
1412                 },
1413         },
1414         [IB_QPS_INIT]  = {
1415                 [IB_QPS_RESET] = { .valid = 1 },
1416                 [IB_QPS_ERR] =   { .valid = 1 },
1417                 [IB_QPS_INIT]  = {
1418                         .valid = 1,
1419                         .opt_param = {
1420                                 [IB_QPT_UD]  = (IB_QP_PKEY_INDEX                |
1421                                                 IB_QP_PORT                      |
1422                                                 IB_QP_QKEY),
1423                                 [IB_QPT_UC]  = (IB_QP_PKEY_INDEX                |
1424                                                 IB_QP_PORT                      |
1425                                                 IB_QP_ACCESS_FLAGS),
1426                                 [IB_QPT_RC]  = (IB_QP_PKEY_INDEX                |
1427                                                 IB_QP_PORT                      |
1428                                                 IB_QP_ACCESS_FLAGS),
1429                                 [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX            |
1430                                                 IB_QP_PORT                      |
1431                                                 IB_QP_ACCESS_FLAGS),
1432                                 [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX            |
1433                                                 IB_QP_PORT                      |
1434                                                 IB_QP_ACCESS_FLAGS),
1435                                 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX                |
1436                                                 IB_QP_QKEY),
1437                                 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX                |
1438                                                 IB_QP_QKEY),
1439                         }
1440                 },
1441                 [IB_QPS_RTR]   = {
1442                         .valid = 1,
1443                         .req_param = {
1444                                 [IB_QPT_UC]  = (IB_QP_AV                        |
1445                                                 IB_QP_PATH_MTU                  |
1446                                                 IB_QP_DEST_QPN                  |
1447                                                 IB_QP_RQ_PSN),
1448                                 [IB_QPT_RC]  = (IB_QP_AV                        |
1449                                                 IB_QP_PATH_MTU                  |
1450                                                 IB_QP_DEST_QPN                  |
1451                                                 IB_QP_RQ_PSN                    |
1452                                                 IB_QP_MAX_DEST_RD_ATOMIC        |
1453                                                 IB_QP_MIN_RNR_TIMER),
1454                                 [IB_QPT_XRC_INI] = (IB_QP_AV                    |
1455                                                 IB_QP_PATH_MTU                  |
1456                                                 IB_QP_DEST_QPN                  |
1457                                                 IB_QP_RQ_PSN),
1458                                 [IB_QPT_XRC_TGT] = (IB_QP_AV                    |
1459                                                 IB_QP_PATH_MTU                  |
1460                                                 IB_QP_DEST_QPN                  |
1461                                                 IB_QP_RQ_PSN                    |
1462                                                 IB_QP_MAX_DEST_RD_ATOMIC        |
1463                                                 IB_QP_MIN_RNR_TIMER),
1464                         },
1465                         .opt_param = {
1466                                  [IB_QPT_UD]  = (IB_QP_PKEY_INDEX               |
1467                                                  IB_QP_QKEY),
1468                                  [IB_QPT_UC]  = (IB_QP_ALT_PATH                 |
1469                                                  IB_QP_ACCESS_FLAGS             |
1470                                                  IB_QP_PKEY_INDEX),
1471                                  [IB_QPT_RC]  = (IB_QP_ALT_PATH                 |
1472                                                  IB_QP_ACCESS_FLAGS             |
1473                                                  IB_QP_PKEY_INDEX),
1474                                  [IB_QPT_XRC_INI] = (IB_QP_ALT_PATH             |
1475                                                  IB_QP_ACCESS_FLAGS             |
1476                                                  IB_QP_PKEY_INDEX),
1477                                  [IB_QPT_XRC_TGT] = (IB_QP_ALT_PATH             |
1478                                                  IB_QP_ACCESS_FLAGS             |
1479                                                  IB_QP_PKEY_INDEX),
1480                                  [IB_QPT_SMI] = (IB_QP_PKEY_INDEX               |
1481                                                  IB_QP_QKEY),
1482                                  [IB_QPT_GSI] = (IB_QP_PKEY_INDEX               |
1483                                                  IB_QP_QKEY),
1484                          },
1485                 },
1486         },
1487         [IB_QPS_RTR]   = {
1488                 [IB_QPS_RESET] = { .valid = 1 },
1489                 [IB_QPS_ERR] =   { .valid = 1 },
1490                 [IB_QPS_RTS]   = {
1491                         .valid = 1,
1492                         .req_param = {
1493                                 [IB_QPT_UD]  = IB_QP_SQ_PSN,
1494                                 [IB_QPT_UC]  = IB_QP_SQ_PSN,
1495                                 [IB_QPT_RC]  = (IB_QP_TIMEOUT                   |
1496                                                 IB_QP_RETRY_CNT                 |
1497                                                 IB_QP_RNR_RETRY                 |
1498                                                 IB_QP_SQ_PSN                    |
1499                                                 IB_QP_MAX_QP_RD_ATOMIC),
1500                                 [IB_QPT_XRC_INI] = (IB_QP_TIMEOUT               |
1501                                                 IB_QP_RETRY_CNT                 |
1502                                                 IB_QP_RNR_RETRY                 |
1503                                                 IB_QP_SQ_PSN                    |
1504                                                 IB_QP_MAX_QP_RD_ATOMIC),
1505                                 [IB_QPT_XRC_TGT] = (IB_QP_TIMEOUT               |
1506                                                 IB_QP_SQ_PSN),
1507                                 [IB_QPT_SMI] = IB_QP_SQ_PSN,
1508                                 [IB_QPT_GSI] = IB_QP_SQ_PSN,
1509                         },
1510                         .opt_param = {
1511                                  [IB_QPT_UD]  = (IB_QP_CUR_STATE                |
1512                                                  IB_QP_QKEY),
1513                                  [IB_QPT_UC]  = (IB_QP_CUR_STATE                |
1514                                                  IB_QP_ALT_PATH                 |
1515                                                  IB_QP_ACCESS_FLAGS             |
1516                                                  IB_QP_PATH_MIG_STATE),
1517                                  [IB_QPT_RC]  = (IB_QP_CUR_STATE                |
1518                                                  IB_QP_ALT_PATH                 |
1519                                                  IB_QP_ACCESS_FLAGS             |
1520                                                  IB_QP_MIN_RNR_TIMER            |
1521                                                  IB_QP_PATH_MIG_STATE),
1522                                  [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE            |
1523                                                  IB_QP_ALT_PATH                 |
1524                                                  IB_QP_ACCESS_FLAGS             |
1525                                                  IB_QP_PATH_MIG_STATE),
1526                                  [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE            |
1527                                                  IB_QP_ALT_PATH                 |
1528                                                  IB_QP_ACCESS_FLAGS             |
1529                                                  IB_QP_MIN_RNR_TIMER            |
1530                                                  IB_QP_PATH_MIG_STATE),
1531                                  [IB_QPT_SMI] = (IB_QP_CUR_STATE                |
1532                                                  IB_QP_QKEY),
1533                                  [IB_QPT_GSI] = (IB_QP_CUR_STATE                |
1534                                                  IB_QP_QKEY),
1535                                  [IB_QPT_RAW_PACKET] = IB_QP_RATE_LIMIT,
1536                          }
1537                 }
1538         },
1539         [IB_QPS_RTS]   = {
1540                 [IB_QPS_RESET] = { .valid = 1 },
1541                 [IB_QPS_ERR] =   { .valid = 1 },
1542                 [IB_QPS_RTS]   = {
1543                         .valid = 1,
1544                         .opt_param = {
1545                                 [IB_QPT_UD]  = (IB_QP_CUR_STATE                 |
1546                                                 IB_QP_QKEY),
1547                                 [IB_QPT_UC]  = (IB_QP_CUR_STATE                 |
1548                                                 IB_QP_ACCESS_FLAGS              |
1549                                                 IB_QP_ALT_PATH                  |
1550                                                 IB_QP_PATH_MIG_STATE),
1551                                 [IB_QPT_RC]  = (IB_QP_CUR_STATE                 |
1552                                                 IB_QP_ACCESS_FLAGS              |
1553                                                 IB_QP_ALT_PATH                  |
1554                                                 IB_QP_PATH_MIG_STATE            |
1555                                                 IB_QP_MIN_RNR_TIMER),
1556                                 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE             |
1557                                                 IB_QP_ACCESS_FLAGS              |
1558                                                 IB_QP_ALT_PATH                  |
1559                                                 IB_QP_PATH_MIG_STATE),
1560                                 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE             |
1561                                                 IB_QP_ACCESS_FLAGS              |
1562                                                 IB_QP_ALT_PATH                  |
1563                                                 IB_QP_PATH_MIG_STATE            |
1564                                                 IB_QP_MIN_RNR_TIMER),
1565                                 [IB_QPT_SMI] = (IB_QP_CUR_STATE                 |
1566                                                 IB_QP_QKEY),
1567                                 [IB_QPT_GSI] = (IB_QP_CUR_STATE                 |
1568                                                 IB_QP_QKEY),
1569                                 [IB_QPT_RAW_PACKET] = IB_QP_RATE_LIMIT,
1570                         }
1571                 },
1572                 [IB_QPS_SQD]   = {
1573                         .valid = 1,
1574                         .opt_param = {
1575                                 [IB_QPT_UD]  = IB_QP_EN_SQD_ASYNC_NOTIFY,
1576                                 [IB_QPT_UC]  = IB_QP_EN_SQD_ASYNC_NOTIFY,
1577                                 [IB_QPT_RC]  = IB_QP_EN_SQD_ASYNC_NOTIFY,
1578                                 [IB_QPT_XRC_INI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1579                                 [IB_QPT_XRC_TGT] = IB_QP_EN_SQD_ASYNC_NOTIFY, /* ??? */
1580                                 [IB_QPT_SMI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1581                                 [IB_QPT_GSI] = IB_QP_EN_SQD_ASYNC_NOTIFY
1582                         }
1583                 },
1584         },
1585         [IB_QPS_SQD]   = {
1586                 [IB_QPS_RESET] = { .valid = 1 },
1587                 [IB_QPS_ERR] =   { .valid = 1 },
1588                 [IB_QPS_RTS]   = {
1589                         .valid = 1,
1590                         .opt_param = {
1591                                 [IB_QPT_UD]  = (IB_QP_CUR_STATE                 |
1592                                                 IB_QP_QKEY),
1593                                 [IB_QPT_UC]  = (IB_QP_CUR_STATE                 |
1594                                                 IB_QP_ALT_PATH                  |
1595                                                 IB_QP_ACCESS_FLAGS              |
1596                                                 IB_QP_PATH_MIG_STATE),
1597                                 [IB_QPT_RC]  = (IB_QP_CUR_STATE                 |
1598                                                 IB_QP_ALT_PATH                  |
1599                                                 IB_QP_ACCESS_FLAGS              |
1600                                                 IB_QP_MIN_RNR_TIMER             |
1601                                                 IB_QP_PATH_MIG_STATE),
1602                                 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE             |
1603                                                 IB_QP_ALT_PATH                  |
1604                                                 IB_QP_ACCESS_FLAGS              |
1605                                                 IB_QP_PATH_MIG_STATE),
1606                                 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE             |
1607                                                 IB_QP_ALT_PATH                  |
1608                                                 IB_QP_ACCESS_FLAGS              |
1609                                                 IB_QP_MIN_RNR_TIMER             |
1610                                                 IB_QP_PATH_MIG_STATE),
1611                                 [IB_QPT_SMI] = (IB_QP_CUR_STATE                 |
1612                                                 IB_QP_QKEY),
1613                                 [IB_QPT_GSI] = (IB_QP_CUR_STATE                 |
1614                                                 IB_QP_QKEY),
1615                         }
1616                 },
1617                 [IB_QPS_SQD]   = {
1618                         .valid = 1,
1619                         .opt_param = {
1620                                 [IB_QPT_UD]  = (IB_QP_PKEY_INDEX                |
1621                                                 IB_QP_QKEY),
1622                                 [IB_QPT_UC]  = (IB_QP_AV                        |
1623                                                 IB_QP_ALT_PATH                  |
1624                                                 IB_QP_ACCESS_FLAGS              |
1625                                                 IB_QP_PKEY_INDEX                |
1626                                                 IB_QP_PATH_MIG_STATE),
1627                                 [IB_QPT_RC]  = (IB_QP_PORT                      |
1628                                                 IB_QP_AV                        |
1629                                                 IB_QP_TIMEOUT                   |
1630                                                 IB_QP_RETRY_CNT                 |
1631                                                 IB_QP_RNR_RETRY                 |
1632                                                 IB_QP_MAX_QP_RD_ATOMIC          |
1633                                                 IB_QP_MAX_DEST_RD_ATOMIC        |
1634                                                 IB_QP_ALT_PATH                  |
1635                                                 IB_QP_ACCESS_FLAGS              |
1636                                                 IB_QP_PKEY_INDEX                |
1637                                                 IB_QP_MIN_RNR_TIMER             |
1638                                                 IB_QP_PATH_MIG_STATE),
1639                                 [IB_QPT_XRC_INI] = (IB_QP_PORT                  |
1640                                                 IB_QP_AV                        |
1641                                                 IB_QP_TIMEOUT                   |
1642                                                 IB_QP_RETRY_CNT                 |
1643                                                 IB_QP_RNR_RETRY                 |
1644                                                 IB_QP_MAX_QP_RD_ATOMIC          |
1645                                                 IB_QP_ALT_PATH                  |
1646                                                 IB_QP_ACCESS_FLAGS              |
1647                                                 IB_QP_PKEY_INDEX                |
1648                                                 IB_QP_PATH_MIG_STATE),
1649                                 [IB_QPT_XRC_TGT] = (IB_QP_PORT                  |
1650                                                 IB_QP_AV                        |
1651                                                 IB_QP_TIMEOUT                   |
1652                                                 IB_QP_MAX_DEST_RD_ATOMIC        |
1653                                                 IB_QP_ALT_PATH                  |
1654                                                 IB_QP_ACCESS_FLAGS              |
1655                                                 IB_QP_PKEY_INDEX                |
1656                                                 IB_QP_MIN_RNR_TIMER             |
1657                                                 IB_QP_PATH_MIG_STATE),
1658                                 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX                |
1659                                                 IB_QP_QKEY),
1660                                 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX                |
1661                                                 IB_QP_QKEY),
1662                         }
1663                 }
1664         },
1665         [IB_QPS_SQE]   = {
1666                 [IB_QPS_RESET] = { .valid = 1 },
1667                 [IB_QPS_ERR] =   { .valid = 1 },
1668                 [IB_QPS_RTS]   = {
1669                         .valid = 1,
1670                         .opt_param = {
1671                                 [IB_QPT_UD]  = (IB_QP_CUR_STATE                 |
1672                                                 IB_QP_QKEY),
1673                                 [IB_QPT_UC]  = (IB_QP_CUR_STATE                 |
1674                                                 IB_QP_ACCESS_FLAGS),
1675                                 [IB_QPT_SMI] = (IB_QP_CUR_STATE                 |
1676                                                 IB_QP_QKEY),
1677                                 [IB_QPT_GSI] = (IB_QP_CUR_STATE                 |
1678                                                 IB_QP_QKEY),
1679                         }
1680                 }
1681         },
1682         [IB_QPS_ERR] = {
1683                 [IB_QPS_RESET] = { .valid = 1 },
1684                 [IB_QPS_ERR] =   { .valid = 1 }
1685         }
1686 };
1687
1688 bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
1689                         enum ib_qp_type type, enum ib_qp_attr_mask mask)
1690 {
1691         enum ib_qp_attr_mask req_param, opt_param;
1692
1693         if (mask & IB_QP_CUR_STATE  &&
1694             cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS &&
1695             cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE)
1696                 return false;
1697
1698         if (!qp_state_table[cur_state][next_state].valid)
1699                 return false;
1700
1701         req_param = qp_state_table[cur_state][next_state].req_param[type];
1702         opt_param = qp_state_table[cur_state][next_state].opt_param[type];
1703
1704         if ((mask & req_param) != req_param)
1705                 return false;
1706
1707         if (mask & ~(req_param | opt_param | IB_QP_STATE))
1708                 return false;
1709
1710         return true;
1711 }
1712 EXPORT_SYMBOL(ib_modify_qp_is_ok);
1713
1714 /**
1715  * ib_resolve_eth_dmac - Resolve destination mac address
1716  * @device:             Device to consider
1717  * @ah_attr:            address handle attribute which describes the
1718  *                      source and destination parameters
1719  * ib_resolve_eth_dmac() resolves destination mac address and L3 hop limit It
1720  * returns 0 on success or appropriate error code. It initializes the
1721  * necessary ah_attr fields when call is successful.
1722  */
1723 static int ib_resolve_eth_dmac(struct ib_device *device,
1724                                struct rdma_ah_attr *ah_attr)
1725 {
1726         int ret = 0;
1727
1728         if (rdma_is_multicast_addr((struct in6_addr *)ah_attr->grh.dgid.raw)) {
1729                 if (ipv6_addr_v4mapped((struct in6_addr *)ah_attr->grh.dgid.raw)) {
1730                         __be32 addr = 0;
1731
1732                         memcpy(&addr, ah_attr->grh.dgid.raw + 12, 4);
1733                         ip_eth_mc_map(addr, (char *)ah_attr->roce.dmac);
1734                 } else {
1735                         ipv6_eth_mc_map((struct in6_addr *)ah_attr->grh.dgid.raw,
1736                                         (char *)ah_attr->roce.dmac);
1737                 }
1738         } else {
1739                 ret = ib_resolve_unicast_gid_dmac(device, ah_attr);
1740         }
1741         return ret;
1742 }
1743
1744 static bool is_qp_type_connected(const struct ib_qp *qp)
1745 {
1746         return (qp->qp_type == IB_QPT_UC ||
1747                 qp->qp_type == IB_QPT_RC ||
1748                 qp->qp_type == IB_QPT_XRC_INI ||
1749                 qp->qp_type == IB_QPT_XRC_TGT);
1750 }
1751
1752 /*
1753  * IB core internal function to perform QP attributes modification.
1754  */
1755 static int _ib_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
1756                          int attr_mask, struct ib_udata *udata)
1757 {
1758         u32 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
1759         const struct ib_gid_attr *old_sgid_attr_av;
1760         const struct ib_gid_attr *old_sgid_attr_alt_av;
1761         int ret;
1762
1763         attr->xmit_slave = NULL;
1764         if (attr_mask & IB_QP_AV) {
1765                 ret = rdma_fill_sgid_attr(qp->device, &attr->ah_attr,
1766                                           &old_sgid_attr_av);
1767                 if (ret)
1768                         return ret;
1769
1770                 if (attr->ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE &&
1771                     is_qp_type_connected(qp)) {
1772                         struct net_device *slave;
1773
1774                         /*
1775                          * If the user provided the qp_attr then we have to
1776                          * resolve it. Kerne users have to provide already
1777                          * resolved rdma_ah_attr's.
1778                          */
1779                         if (udata) {
1780                                 ret = ib_resolve_eth_dmac(qp->device,
1781                                                           &attr->ah_attr);
1782                                 if (ret)
1783                                         goto out_av;
1784                         }
1785                         slave = rdma_lag_get_ah_roce_slave(qp->device,
1786                                                            &attr->ah_attr,
1787                                                            GFP_KERNEL);
1788                         if (IS_ERR(slave)) {
1789                                 ret = PTR_ERR(slave);
1790                                 goto out_av;
1791                         }
1792                         attr->xmit_slave = slave;
1793                 }
1794         }
1795         if (attr_mask & IB_QP_ALT_PATH) {
1796                 /*
1797                  * FIXME: This does not track the migration state, so if the
1798                  * user loads a new alternate path after the HW has migrated
1799                  * from primary->alternate we will keep the wrong
1800                  * references. This is OK for IB because the reference
1801                  * counting does not serve any functional purpose.
1802                  */
1803                 ret = rdma_fill_sgid_attr(qp->device, &attr->alt_ah_attr,
1804                                           &old_sgid_attr_alt_av);
1805                 if (ret)
1806                         goto out_av;
1807
1808                 /*
1809                  * Today the core code can only handle alternate paths and APM
1810                  * for IB. Ban them in roce mode.
1811                  */
1812                 if (!(rdma_protocol_ib(qp->device,
1813                                        attr->alt_ah_attr.port_num) &&
1814                       rdma_protocol_ib(qp->device, port))) {
1815                         ret = -EINVAL;
1816                         goto out;
1817                 }
1818         }
1819
1820         if (rdma_ib_or_roce(qp->device, port)) {
1821                 if (attr_mask & IB_QP_RQ_PSN && attr->rq_psn & ~0xffffff) {
1822                         dev_warn(&qp->device->dev,
1823                                  "%s rq_psn overflow, masking to 24 bits\n",
1824                                  __func__);
1825                         attr->rq_psn &= 0xffffff;
1826                 }
1827
1828                 if (attr_mask & IB_QP_SQ_PSN && attr->sq_psn & ~0xffffff) {
1829                         dev_warn(&qp->device->dev,
1830                                  " %s sq_psn overflow, masking to 24 bits\n",
1831                                  __func__);
1832                         attr->sq_psn &= 0xffffff;
1833                 }
1834         }
1835
1836         /*
1837          * Bind this qp to a counter automatically based on the rdma counter
1838          * rules. This only set in RST2INIT with port specified
1839          */
1840         if (!qp->counter && (attr_mask & IB_QP_PORT) &&
1841             ((attr_mask & IB_QP_STATE) && attr->qp_state == IB_QPS_INIT))
1842                 rdma_counter_bind_qp_auto(qp, attr->port_num);
1843
1844         ret = ib_security_modify_qp(qp, attr, attr_mask, udata);
1845         if (ret)
1846                 goto out;
1847
1848         if (attr_mask & IB_QP_PORT)
1849                 qp->port = attr->port_num;
1850         if (attr_mask & IB_QP_AV)
1851                 qp->av_sgid_attr =
1852                         rdma_update_sgid_attr(&attr->ah_attr, qp->av_sgid_attr);
1853         if (attr_mask & IB_QP_ALT_PATH)
1854                 qp->alt_path_sgid_attr = rdma_update_sgid_attr(
1855                         &attr->alt_ah_attr, qp->alt_path_sgid_attr);
1856
1857 out:
1858         if (attr_mask & IB_QP_ALT_PATH)
1859                 rdma_unfill_sgid_attr(&attr->alt_ah_attr, old_sgid_attr_alt_av);
1860 out_av:
1861         if (attr_mask & IB_QP_AV) {
1862                 rdma_lag_put_ah_roce_slave(attr->xmit_slave);
1863                 rdma_unfill_sgid_attr(&attr->ah_attr, old_sgid_attr_av);
1864         }
1865         return ret;
1866 }
1867
1868 /**
1869  * ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
1870  * @ib_qp: The QP to modify.
1871  * @attr: On input, specifies the QP attributes to modify.  On output,
1872  *   the current values of selected QP attributes are returned.
1873  * @attr_mask: A bit-mask used to specify which attributes of the QP
1874  *   are being modified.
1875  * @udata: pointer to user's input output buffer information
1876  *   are being modified.
1877  * It returns 0 on success and returns appropriate error code on error.
1878  */
1879 int ib_modify_qp_with_udata(struct ib_qp *ib_qp, struct ib_qp_attr *attr,
1880                             int attr_mask, struct ib_udata *udata)
1881 {
1882         return _ib_modify_qp(ib_qp->real_qp, attr, attr_mask, udata);
1883 }
1884 EXPORT_SYMBOL(ib_modify_qp_with_udata);
1885
1886 int ib_get_eth_speed(struct ib_device *dev, u32 port_num, u16 *speed, u8 *width)
1887 {
1888         int rc;
1889         u32 netdev_speed;
1890         struct net_device *netdev;
1891         struct ethtool_link_ksettings lksettings;
1892
1893         if (rdma_port_get_link_layer(dev, port_num) != IB_LINK_LAYER_ETHERNET)
1894                 return -EINVAL;
1895
1896         netdev = ib_device_get_netdev(dev, port_num);
1897         if (!netdev)
1898                 return -ENODEV;
1899
1900         rtnl_lock();
1901         rc = __ethtool_get_link_ksettings(netdev, &lksettings);
1902         rtnl_unlock();
1903
1904         dev_put(netdev);
1905
1906         if (!rc && lksettings.base.speed != (u32)SPEED_UNKNOWN) {
1907                 netdev_speed = lksettings.base.speed;
1908         } else {
1909                 netdev_speed = SPEED_1000;
1910                 pr_warn("%s speed is unknown, defaulting to %u\n", netdev->name,
1911                         netdev_speed);
1912         }
1913
1914         if (netdev_speed <= SPEED_1000) {
1915                 *width = IB_WIDTH_1X;
1916                 *speed = IB_SPEED_SDR;
1917         } else if (netdev_speed <= SPEED_10000) {
1918                 *width = IB_WIDTH_1X;
1919                 *speed = IB_SPEED_FDR10;
1920         } else if (netdev_speed <= SPEED_20000) {
1921                 *width = IB_WIDTH_4X;
1922                 *speed = IB_SPEED_DDR;
1923         } else if (netdev_speed <= SPEED_25000) {
1924                 *width = IB_WIDTH_1X;
1925                 *speed = IB_SPEED_EDR;
1926         } else if (netdev_speed <= SPEED_40000) {
1927                 *width = IB_WIDTH_4X;
1928                 *speed = IB_SPEED_FDR10;
1929         } else {
1930                 *width = IB_WIDTH_4X;
1931                 *speed = IB_SPEED_EDR;
1932         }
1933
1934         return 0;
1935 }
1936 EXPORT_SYMBOL(ib_get_eth_speed);
1937
1938 int ib_modify_qp(struct ib_qp *qp,
1939                  struct ib_qp_attr *qp_attr,
1940                  int qp_attr_mask)
1941 {
1942         return _ib_modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL);
1943 }
1944 EXPORT_SYMBOL(ib_modify_qp);
1945
1946 int ib_query_qp(struct ib_qp *qp,
1947                 struct ib_qp_attr *qp_attr,
1948                 int qp_attr_mask,
1949                 struct ib_qp_init_attr *qp_init_attr)
1950 {
1951         qp_attr->ah_attr.grh.sgid_attr = NULL;
1952         qp_attr->alt_ah_attr.grh.sgid_attr = NULL;
1953
1954         return qp->device->ops.query_qp ?
1955                 qp->device->ops.query_qp(qp->real_qp, qp_attr, qp_attr_mask,
1956                                          qp_init_attr) : -EOPNOTSUPP;
1957 }
1958 EXPORT_SYMBOL(ib_query_qp);
1959
1960 int ib_close_qp(struct ib_qp *qp)
1961 {
1962         struct ib_qp *real_qp;
1963         unsigned long flags;
1964
1965         real_qp = qp->real_qp;
1966         if (real_qp == qp)
1967                 return -EINVAL;
1968
1969         spin_lock_irqsave(&real_qp->device->qp_open_list_lock, flags);
1970         list_del(&qp->open_list);
1971         spin_unlock_irqrestore(&real_qp->device->qp_open_list_lock, flags);
1972
1973         atomic_dec(&real_qp->usecnt);
1974         if (qp->qp_sec)
1975                 ib_close_shared_qp_security(qp->qp_sec);
1976         kfree(qp);
1977
1978         return 0;
1979 }
1980 EXPORT_SYMBOL(ib_close_qp);
1981
1982 static int __ib_destroy_shared_qp(struct ib_qp *qp)
1983 {
1984         struct ib_xrcd *xrcd;
1985         struct ib_qp *real_qp;
1986         int ret;
1987
1988         real_qp = qp->real_qp;
1989         xrcd = real_qp->xrcd;
1990         down_write(&xrcd->tgt_qps_rwsem);
1991         ib_close_qp(qp);
1992         if (atomic_read(&real_qp->usecnt) == 0)
1993                 xa_erase(&xrcd->tgt_qps, real_qp->qp_num);
1994         else
1995                 real_qp = NULL;
1996         up_write(&xrcd->tgt_qps_rwsem);
1997
1998         if (real_qp) {
1999                 ret = ib_destroy_qp(real_qp);
2000                 if (!ret)
2001                         atomic_dec(&xrcd->usecnt);
2002         }
2003
2004         return 0;
2005 }
2006
2007 int ib_destroy_qp_user(struct ib_qp *qp, struct ib_udata *udata)
2008 {
2009         const struct ib_gid_attr *alt_path_sgid_attr = qp->alt_path_sgid_attr;
2010         const struct ib_gid_attr *av_sgid_attr = qp->av_sgid_attr;
2011         struct ib_qp_security *sec;
2012         int ret;
2013
2014         WARN_ON_ONCE(qp->mrs_used > 0);
2015
2016         if (atomic_read(&qp->usecnt))
2017                 return -EBUSY;
2018
2019         if (qp->real_qp != qp)
2020                 return __ib_destroy_shared_qp(qp);
2021
2022         sec  = qp->qp_sec;
2023         if (sec)
2024                 ib_destroy_qp_security_begin(sec);
2025
2026         if (!qp->uobject)
2027                 rdma_rw_cleanup_mrs(qp);
2028
2029         rdma_counter_unbind_qp(qp, true);
2030         ret = qp->device->ops.destroy_qp(qp, udata);
2031         if (ret) {
2032                 if (sec)
2033                         ib_destroy_qp_security_abort(sec);
2034                 return ret;
2035         }
2036
2037         if (alt_path_sgid_attr)
2038                 rdma_put_gid_attr(alt_path_sgid_attr);
2039         if (av_sgid_attr)
2040                 rdma_put_gid_attr(av_sgid_attr);
2041
2042         ib_qp_usecnt_dec(qp);
2043         if (sec)
2044                 ib_destroy_qp_security_end(sec);
2045
2046         rdma_restrack_del(&qp->res);
2047         kfree(qp);
2048         return ret;
2049 }
2050 EXPORT_SYMBOL(ib_destroy_qp_user);
2051
2052 /* Completion queues */
2053
2054 struct ib_cq *__ib_create_cq(struct ib_device *device,
2055                              ib_comp_handler comp_handler,
2056                              void (*event_handler)(struct ib_event *, void *),
2057                              void *cq_context,
2058                              const struct ib_cq_init_attr *cq_attr,
2059                              const char *caller)
2060 {
2061         struct ib_cq *cq;
2062         int ret;
2063
2064         cq = rdma_zalloc_drv_obj(device, ib_cq);
2065         if (!cq)
2066                 return ERR_PTR(-ENOMEM);
2067
2068         cq->device = device;
2069         cq->uobject = NULL;
2070         cq->comp_handler = comp_handler;
2071         cq->event_handler = event_handler;
2072         cq->cq_context = cq_context;
2073         atomic_set(&cq->usecnt, 0);
2074
2075         rdma_restrack_new(&cq->res, RDMA_RESTRACK_CQ);
2076         rdma_restrack_set_name(&cq->res, caller);
2077
2078         ret = device->ops.create_cq(cq, cq_attr, NULL);
2079         if (ret) {
2080                 rdma_restrack_put(&cq->res);
2081                 kfree(cq);
2082                 return ERR_PTR(ret);
2083         }
2084
2085         rdma_restrack_add(&cq->res);
2086         return cq;
2087 }
2088 EXPORT_SYMBOL(__ib_create_cq);
2089
2090 int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period)
2091 {
2092         if (cq->shared)
2093                 return -EOPNOTSUPP;
2094
2095         return cq->device->ops.modify_cq ?
2096                 cq->device->ops.modify_cq(cq, cq_count,
2097                                           cq_period) : -EOPNOTSUPP;
2098 }
2099 EXPORT_SYMBOL(rdma_set_cq_moderation);
2100
2101 int ib_destroy_cq_user(struct ib_cq *cq, struct ib_udata *udata)
2102 {
2103         int ret;
2104
2105         if (WARN_ON_ONCE(cq->shared))
2106                 return -EOPNOTSUPP;
2107
2108         if (atomic_read(&cq->usecnt))
2109                 return -EBUSY;
2110
2111         ret = cq->device->ops.destroy_cq(cq, udata);
2112         if (ret)
2113                 return ret;
2114
2115         rdma_restrack_del(&cq->res);
2116         kfree(cq);
2117         return ret;
2118 }
2119 EXPORT_SYMBOL(ib_destroy_cq_user);
2120
2121 int ib_resize_cq(struct ib_cq *cq, int cqe)
2122 {
2123         if (cq->shared)
2124                 return -EOPNOTSUPP;
2125
2126         return cq->device->ops.resize_cq ?
2127                 cq->device->ops.resize_cq(cq, cqe, NULL) : -EOPNOTSUPP;
2128 }
2129 EXPORT_SYMBOL(ib_resize_cq);
2130
2131 /* Memory regions */
2132
2133 struct ib_mr *ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
2134                              u64 virt_addr, int access_flags)
2135 {
2136         struct ib_mr *mr;
2137
2138         if (access_flags & IB_ACCESS_ON_DEMAND) {
2139                 if (!(pd->device->attrs.device_cap_flags &
2140                       IB_DEVICE_ON_DEMAND_PAGING)) {
2141                         pr_debug("ODP support not available\n");
2142                         return ERR_PTR(-EINVAL);
2143                 }
2144         }
2145
2146         mr = pd->device->ops.reg_user_mr(pd, start, length, virt_addr,
2147                                          access_flags, NULL);
2148
2149         if (IS_ERR(mr))
2150                 return mr;
2151
2152         mr->device = pd->device;
2153         mr->pd = pd;
2154         mr->dm = NULL;
2155         atomic_inc(&pd->usecnt);
2156
2157         rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR);
2158         rdma_restrack_parent_name(&mr->res, &pd->res);
2159         rdma_restrack_add(&mr->res);
2160
2161         return mr;
2162 }
2163 EXPORT_SYMBOL(ib_reg_user_mr);
2164
2165 int ib_advise_mr(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice,
2166                  u32 flags, struct ib_sge *sg_list, u32 num_sge)
2167 {
2168         if (!pd->device->ops.advise_mr)
2169                 return -EOPNOTSUPP;
2170
2171         if (!num_sge)
2172                 return 0;
2173
2174         return pd->device->ops.advise_mr(pd, advice, flags, sg_list, num_sge,
2175                                          NULL);
2176 }
2177 EXPORT_SYMBOL(ib_advise_mr);
2178
2179 int ib_dereg_mr_user(struct ib_mr *mr, struct ib_udata *udata)
2180 {
2181         struct ib_pd *pd = mr->pd;
2182         struct ib_dm *dm = mr->dm;
2183         struct ib_sig_attrs *sig_attrs = mr->sig_attrs;
2184         int ret;
2185
2186         trace_mr_dereg(mr);
2187         rdma_restrack_del(&mr->res);
2188         ret = mr->device->ops.dereg_mr(mr, udata);
2189         if (!ret) {
2190                 atomic_dec(&pd->usecnt);
2191                 if (dm)
2192                         atomic_dec(&dm->usecnt);
2193                 kfree(sig_attrs);
2194         }
2195
2196         return ret;
2197 }
2198 EXPORT_SYMBOL(ib_dereg_mr_user);
2199
2200 /**
2201  * ib_alloc_mr() - Allocates a memory region
2202  * @pd:            protection domain associated with the region
2203  * @mr_type:       memory region type
2204  * @max_num_sg:    maximum sg entries available for registration.
2205  *
2206  * Notes:
2207  * Memory registeration page/sg lists must not exceed max_num_sg.
2208  * For mr_type IB_MR_TYPE_MEM_REG, the total length cannot exceed
2209  * max_num_sg * used_page_size.
2210  *
2211  */
2212 struct ib_mr *ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
2213                           u32 max_num_sg)
2214 {
2215         struct ib_mr *mr;
2216
2217         if (!pd->device->ops.alloc_mr) {
2218                 mr = ERR_PTR(-EOPNOTSUPP);
2219                 goto out;
2220         }
2221
2222         if (mr_type == IB_MR_TYPE_INTEGRITY) {
2223                 WARN_ON_ONCE(1);
2224                 mr = ERR_PTR(-EINVAL);
2225                 goto out;
2226         }
2227
2228         mr = pd->device->ops.alloc_mr(pd, mr_type, max_num_sg);
2229         if (IS_ERR(mr))
2230                 goto out;
2231
2232         mr->device = pd->device;
2233         mr->pd = pd;
2234         mr->dm = NULL;
2235         mr->uobject = NULL;
2236         atomic_inc(&pd->usecnt);
2237         mr->need_inval = false;
2238         mr->type = mr_type;
2239         mr->sig_attrs = NULL;
2240
2241         rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR);
2242         rdma_restrack_parent_name(&mr->res, &pd->res);
2243         rdma_restrack_add(&mr->res);
2244 out:
2245         trace_mr_alloc(pd, mr_type, max_num_sg, mr);
2246         return mr;
2247 }
2248 EXPORT_SYMBOL(ib_alloc_mr);
2249
2250 /**
2251  * ib_alloc_mr_integrity() - Allocates an integrity memory region
2252  * @pd:                      protection domain associated with the region
2253  * @max_num_data_sg:         maximum data sg entries available for registration
2254  * @max_num_meta_sg:         maximum metadata sg entries available for
2255  *                           registration
2256  *
2257  * Notes:
2258  * Memory registration page/sg lists must not exceed max_num_sg,
2259  * also the integrity page/sg lists must not exceed max_num_meta_sg.
2260  *
2261  */
2262 struct ib_mr *ib_alloc_mr_integrity(struct ib_pd *pd,
2263                                     u32 max_num_data_sg,
2264                                     u32 max_num_meta_sg)
2265 {
2266         struct ib_mr *mr;
2267         struct ib_sig_attrs *sig_attrs;
2268
2269         if (!pd->device->ops.alloc_mr_integrity ||
2270             !pd->device->ops.map_mr_sg_pi) {
2271                 mr = ERR_PTR(-EOPNOTSUPP);
2272                 goto out;
2273         }
2274
2275         if (!max_num_meta_sg) {
2276                 mr = ERR_PTR(-EINVAL);
2277                 goto out;
2278         }
2279
2280         sig_attrs = kzalloc(sizeof(struct ib_sig_attrs), GFP_KERNEL);
2281         if (!sig_attrs) {
2282                 mr = ERR_PTR(-ENOMEM);
2283                 goto out;
2284         }
2285
2286         mr = pd->device->ops.alloc_mr_integrity(pd, max_num_data_sg,
2287                                                 max_num_meta_sg);
2288         if (IS_ERR(mr)) {
2289                 kfree(sig_attrs);
2290                 goto out;
2291         }
2292
2293         mr->device = pd->device;
2294         mr->pd = pd;
2295         mr->dm = NULL;
2296         mr->uobject = NULL;
2297         atomic_inc(&pd->usecnt);
2298         mr->need_inval = false;
2299         mr->type = IB_MR_TYPE_INTEGRITY;
2300         mr->sig_attrs = sig_attrs;
2301
2302         rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR);
2303         rdma_restrack_parent_name(&mr->res, &pd->res);
2304         rdma_restrack_add(&mr->res);
2305 out:
2306         trace_mr_integ_alloc(pd, max_num_data_sg, max_num_meta_sg, mr);
2307         return mr;
2308 }
2309 EXPORT_SYMBOL(ib_alloc_mr_integrity);
2310
2311 /* Multicast groups */
2312
2313 static bool is_valid_mcast_lid(struct ib_qp *qp, u16 lid)
2314 {
2315         struct ib_qp_init_attr init_attr = {};
2316         struct ib_qp_attr attr = {};
2317         int num_eth_ports = 0;
2318         unsigned int port;
2319
2320         /* If QP state >= init, it is assigned to a port and we can check this
2321          * port only.
2322          */
2323         if (!ib_query_qp(qp, &attr, IB_QP_STATE | IB_QP_PORT, &init_attr)) {
2324                 if (attr.qp_state >= IB_QPS_INIT) {
2325                         if (rdma_port_get_link_layer(qp->device, attr.port_num) !=
2326                             IB_LINK_LAYER_INFINIBAND)
2327                                 return true;
2328                         goto lid_check;
2329                 }
2330         }
2331
2332         /* Can't get a quick answer, iterate over all ports */
2333         rdma_for_each_port(qp->device, port)
2334                 if (rdma_port_get_link_layer(qp->device, port) !=
2335                     IB_LINK_LAYER_INFINIBAND)
2336                         num_eth_ports++;
2337
2338         /* If we have at lease one Ethernet port, RoCE annex declares that
2339          * multicast LID should be ignored. We can't tell at this step if the
2340          * QP belongs to an IB or Ethernet port.
2341          */
2342         if (num_eth_ports)
2343                 return true;
2344
2345         /* If all the ports are IB, we can check according to IB spec. */
2346 lid_check:
2347         return !(lid < be16_to_cpu(IB_MULTICAST_LID_BASE) ||
2348                  lid == be16_to_cpu(IB_LID_PERMISSIVE));
2349 }
2350
2351 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
2352 {
2353         int ret;
2354
2355         if (!qp->device->ops.attach_mcast)
2356                 return -EOPNOTSUPP;
2357
2358         if (!rdma_is_multicast_addr((struct in6_addr *)gid->raw) ||
2359             qp->qp_type != IB_QPT_UD || !is_valid_mcast_lid(qp, lid))
2360                 return -EINVAL;
2361
2362         ret = qp->device->ops.attach_mcast(qp, gid, lid);
2363         if (!ret)
2364                 atomic_inc(&qp->usecnt);
2365         return ret;
2366 }
2367 EXPORT_SYMBOL(ib_attach_mcast);
2368
2369 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
2370 {
2371         int ret;
2372
2373         if (!qp->device->ops.detach_mcast)
2374                 return -EOPNOTSUPP;
2375
2376         if (!rdma_is_multicast_addr((struct in6_addr *)gid->raw) ||
2377             qp->qp_type != IB_QPT_UD || !is_valid_mcast_lid(qp, lid))
2378                 return -EINVAL;
2379
2380         ret = qp->device->ops.detach_mcast(qp, gid, lid);
2381         if (!ret)
2382                 atomic_dec(&qp->usecnt);
2383         return ret;
2384 }
2385 EXPORT_SYMBOL(ib_detach_mcast);
2386
2387 /**
2388  * ib_alloc_xrcd_user - Allocates an XRC domain.
2389  * @device: The device on which to allocate the XRC domain.
2390  * @inode: inode to connect XRCD
2391  * @udata: Valid user data or NULL for kernel object
2392  */
2393 struct ib_xrcd *ib_alloc_xrcd_user(struct ib_device *device,
2394                                    struct inode *inode, struct ib_udata *udata)
2395 {
2396         struct ib_xrcd *xrcd;
2397         int ret;
2398
2399         if (!device->ops.alloc_xrcd)
2400                 return ERR_PTR(-EOPNOTSUPP);
2401
2402         xrcd = rdma_zalloc_drv_obj(device, ib_xrcd);
2403         if (!xrcd)
2404                 return ERR_PTR(-ENOMEM);
2405
2406         xrcd->device = device;
2407         xrcd->inode = inode;
2408         atomic_set(&xrcd->usecnt, 0);
2409         init_rwsem(&xrcd->tgt_qps_rwsem);
2410         xa_init(&xrcd->tgt_qps);
2411
2412         ret = device->ops.alloc_xrcd(xrcd, udata);
2413         if (ret)
2414                 goto err;
2415         return xrcd;
2416 err:
2417         kfree(xrcd);
2418         return ERR_PTR(ret);
2419 }
2420 EXPORT_SYMBOL(ib_alloc_xrcd_user);
2421
2422 /**
2423  * ib_dealloc_xrcd_user - Deallocates an XRC domain.
2424  * @xrcd: The XRC domain to deallocate.
2425  * @udata: Valid user data or NULL for kernel object
2426  */
2427 int ib_dealloc_xrcd_user(struct ib_xrcd *xrcd, struct ib_udata *udata)
2428 {
2429         int ret;
2430
2431         if (atomic_read(&xrcd->usecnt))
2432                 return -EBUSY;
2433
2434         WARN_ON(!xa_empty(&xrcd->tgt_qps));
2435         ret = xrcd->device->ops.dealloc_xrcd(xrcd, udata);
2436         if (ret)
2437                 return ret;
2438         kfree(xrcd);
2439         return ret;
2440 }
2441 EXPORT_SYMBOL(ib_dealloc_xrcd_user);
2442
2443 /**
2444  * ib_create_wq - Creates a WQ associated with the specified protection
2445  * domain.
2446  * @pd: The protection domain associated with the WQ.
2447  * @wq_attr: A list of initial attributes required to create the
2448  * WQ. If WQ creation succeeds, then the attributes are updated to
2449  * the actual capabilities of the created WQ.
2450  *
2451  * wq_attr->max_wr and wq_attr->max_sge determine
2452  * the requested size of the WQ, and set to the actual values allocated
2453  * on return.
2454  * If ib_create_wq() succeeds, then max_wr and max_sge will always be
2455  * at least as large as the requested values.
2456  */
2457 struct ib_wq *ib_create_wq(struct ib_pd *pd,
2458                            struct ib_wq_init_attr *wq_attr)
2459 {
2460         struct ib_wq *wq;
2461
2462         if (!pd->device->ops.create_wq)
2463                 return ERR_PTR(-EOPNOTSUPP);
2464
2465         wq = pd->device->ops.create_wq(pd, wq_attr, NULL);
2466         if (!IS_ERR(wq)) {
2467                 wq->event_handler = wq_attr->event_handler;
2468                 wq->wq_context = wq_attr->wq_context;
2469                 wq->wq_type = wq_attr->wq_type;
2470                 wq->cq = wq_attr->cq;
2471                 wq->device = pd->device;
2472                 wq->pd = pd;
2473                 wq->uobject = NULL;
2474                 atomic_inc(&pd->usecnt);
2475                 atomic_inc(&wq_attr->cq->usecnt);
2476                 atomic_set(&wq->usecnt, 0);
2477         }
2478         return wq;
2479 }
2480 EXPORT_SYMBOL(ib_create_wq);
2481
2482 /**
2483  * ib_destroy_wq_user - Destroys the specified user WQ.
2484  * @wq: The WQ to destroy.
2485  * @udata: Valid user data
2486  */
2487 int ib_destroy_wq_user(struct ib_wq *wq, struct ib_udata *udata)
2488 {
2489         struct ib_cq *cq = wq->cq;
2490         struct ib_pd *pd = wq->pd;
2491         int ret;
2492
2493         if (atomic_read(&wq->usecnt))
2494                 return -EBUSY;
2495
2496         ret = wq->device->ops.destroy_wq(wq, udata);
2497         if (ret)
2498                 return ret;
2499
2500         atomic_dec(&pd->usecnt);
2501         atomic_dec(&cq->usecnt);
2502         return ret;
2503 }
2504 EXPORT_SYMBOL(ib_destroy_wq_user);
2505
2506 int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
2507                        struct ib_mr_status *mr_status)
2508 {
2509         if (!mr->device->ops.check_mr_status)
2510                 return -EOPNOTSUPP;
2511
2512         return mr->device->ops.check_mr_status(mr, check_mask, mr_status);
2513 }
2514 EXPORT_SYMBOL(ib_check_mr_status);
2515
2516 int ib_set_vf_link_state(struct ib_device *device, int vf, u32 port,
2517                          int state)
2518 {
2519         if (!device->ops.set_vf_link_state)
2520                 return -EOPNOTSUPP;
2521
2522         return device->ops.set_vf_link_state(device, vf, port, state);
2523 }
2524 EXPORT_SYMBOL(ib_set_vf_link_state);
2525
2526 int ib_get_vf_config(struct ib_device *device, int vf, u32 port,
2527                      struct ifla_vf_info *info)
2528 {
2529         if (!device->ops.get_vf_config)
2530                 return -EOPNOTSUPP;
2531
2532         return device->ops.get_vf_config(device, vf, port, info);
2533 }
2534 EXPORT_SYMBOL(ib_get_vf_config);
2535
2536 int ib_get_vf_stats(struct ib_device *device, int vf, u32 port,
2537                     struct ifla_vf_stats *stats)
2538 {
2539         if (!device->ops.get_vf_stats)
2540                 return -EOPNOTSUPP;
2541
2542         return device->ops.get_vf_stats(device, vf, port, stats);
2543 }
2544 EXPORT_SYMBOL(ib_get_vf_stats);
2545
2546 int ib_set_vf_guid(struct ib_device *device, int vf, u32 port, u64 guid,
2547                    int type)
2548 {
2549         if (!device->ops.set_vf_guid)
2550                 return -EOPNOTSUPP;
2551
2552         return device->ops.set_vf_guid(device, vf, port, guid, type);
2553 }
2554 EXPORT_SYMBOL(ib_set_vf_guid);
2555
2556 int ib_get_vf_guid(struct ib_device *device, int vf, u32 port,
2557                    struct ifla_vf_guid *node_guid,
2558                    struct ifla_vf_guid *port_guid)
2559 {
2560         if (!device->ops.get_vf_guid)
2561                 return -EOPNOTSUPP;
2562
2563         return device->ops.get_vf_guid(device, vf, port, node_guid, port_guid);
2564 }
2565 EXPORT_SYMBOL(ib_get_vf_guid);
2566 /**
2567  * ib_map_mr_sg_pi() - Map the dma mapped SG lists for PI (protection
2568  *     information) and set an appropriate memory region for registration.
2569  * @mr:             memory region
2570  * @data_sg:        dma mapped scatterlist for data
2571  * @data_sg_nents:  number of entries in data_sg
2572  * @data_sg_offset: offset in bytes into data_sg
2573  * @meta_sg:        dma mapped scatterlist for metadata
2574  * @meta_sg_nents:  number of entries in meta_sg
2575  * @meta_sg_offset: offset in bytes into meta_sg
2576  * @page_size:      page vector desired page size
2577  *
2578  * Constraints:
2579  * - The MR must be allocated with type IB_MR_TYPE_INTEGRITY.
2580  *
2581  * Return: 0 on success.
2582  *
2583  * After this completes successfully, the  memory region
2584  * is ready for registration.
2585  */
2586 int ib_map_mr_sg_pi(struct ib_mr *mr, struct scatterlist *data_sg,
2587                     int data_sg_nents, unsigned int *data_sg_offset,
2588                     struct scatterlist *meta_sg, int meta_sg_nents,
2589                     unsigned int *meta_sg_offset, unsigned int page_size)
2590 {
2591         if (unlikely(!mr->device->ops.map_mr_sg_pi ||
2592                      WARN_ON_ONCE(mr->type != IB_MR_TYPE_INTEGRITY)))
2593                 return -EOPNOTSUPP;
2594
2595         mr->page_size = page_size;
2596
2597         return mr->device->ops.map_mr_sg_pi(mr, data_sg, data_sg_nents,
2598                                             data_sg_offset, meta_sg,
2599                                             meta_sg_nents, meta_sg_offset);
2600 }
2601 EXPORT_SYMBOL(ib_map_mr_sg_pi);
2602
2603 /**
2604  * ib_map_mr_sg() - Map the largest prefix of a dma mapped SG list
2605  *     and set it the memory region.
2606  * @mr:            memory region
2607  * @sg:            dma mapped scatterlist
2608  * @sg_nents:      number of entries in sg
2609  * @sg_offset:     offset in bytes into sg
2610  * @page_size:     page vector desired page size
2611  *
2612  * Constraints:
2613  *
2614  * - The first sg element is allowed to have an offset.
2615  * - Each sg element must either be aligned to page_size or virtually
2616  *   contiguous to the previous element. In case an sg element has a
2617  *   non-contiguous offset, the mapping prefix will not include it.
2618  * - The last sg element is allowed to have length less than page_size.
2619  * - If sg_nents total byte length exceeds the mr max_num_sge * page_size
2620  *   then only max_num_sg entries will be mapped.
2621  * - If the MR was allocated with type IB_MR_TYPE_SG_GAPS, none of these
2622  *   constraints holds and the page_size argument is ignored.
2623  *
2624  * Returns the number of sg elements that were mapped to the memory region.
2625  *
2626  * After this completes successfully, the  memory region
2627  * is ready for registration.
2628  */
2629 int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
2630                  unsigned int *sg_offset, unsigned int page_size)
2631 {
2632         if (unlikely(!mr->device->ops.map_mr_sg))
2633                 return -EOPNOTSUPP;
2634
2635         mr->page_size = page_size;
2636
2637         return mr->device->ops.map_mr_sg(mr, sg, sg_nents, sg_offset);
2638 }
2639 EXPORT_SYMBOL(ib_map_mr_sg);
2640
2641 /**
2642  * ib_sg_to_pages() - Convert the largest prefix of a sg list
2643  *     to a page vector
2644  * @mr:            memory region
2645  * @sgl:           dma mapped scatterlist
2646  * @sg_nents:      number of entries in sg
2647  * @sg_offset_p:   ==== =======================================================
2648  *                 IN   start offset in bytes into sg
2649  *                 OUT  offset in bytes for element n of the sg of the first
2650  *                      byte that has not been processed where n is the return
2651  *                      value of this function.
2652  *                 ==== =======================================================
2653  * @set_page:      driver page assignment function pointer
2654  *
2655  * Core service helper for drivers to convert the largest
2656  * prefix of given sg list to a page vector. The sg list
2657  * prefix converted is the prefix that meet the requirements
2658  * of ib_map_mr_sg.
2659  *
2660  * Returns the number of sg elements that were assigned to
2661  * a page vector.
2662  */
2663 int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
2664                 unsigned int *sg_offset_p, int (*set_page)(struct ib_mr *, u64))
2665 {
2666         struct scatterlist *sg;
2667         u64 last_end_dma_addr = 0;
2668         unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
2669         unsigned int last_page_off = 0;
2670         u64 page_mask = ~((u64)mr->page_size - 1);
2671         int i, ret;
2672
2673         if (unlikely(sg_nents <= 0 || sg_offset > sg_dma_len(&sgl[0])))
2674                 return -EINVAL;
2675
2676         mr->iova = sg_dma_address(&sgl[0]) + sg_offset;
2677         mr->length = 0;
2678
2679         for_each_sg(sgl, sg, sg_nents, i) {
2680                 u64 dma_addr = sg_dma_address(sg) + sg_offset;
2681                 u64 prev_addr = dma_addr;
2682                 unsigned int dma_len = sg_dma_len(sg) - sg_offset;
2683                 u64 end_dma_addr = dma_addr + dma_len;
2684                 u64 page_addr = dma_addr & page_mask;
2685
2686                 /*
2687                  * For the second and later elements, check whether either the
2688                  * end of element i-1 or the start of element i is not aligned
2689                  * on a page boundary.
2690                  */
2691                 if (i && (last_page_off != 0 || page_addr != dma_addr)) {
2692                         /* Stop mapping if there is a gap. */
2693                         if (last_end_dma_addr != dma_addr)
2694                                 break;
2695
2696                         /*
2697                          * Coalesce this element with the last. If it is small
2698                          * enough just update mr->length. Otherwise start
2699                          * mapping from the next page.
2700                          */
2701                         goto next_page;
2702                 }
2703
2704                 do {
2705                         ret = set_page(mr, page_addr);
2706                         if (unlikely(ret < 0)) {
2707                                 sg_offset = prev_addr - sg_dma_address(sg);
2708                                 mr->length += prev_addr - dma_addr;
2709                                 if (sg_offset_p)
2710                                         *sg_offset_p = sg_offset;
2711                                 return i || sg_offset ? i : ret;
2712                         }
2713                         prev_addr = page_addr;
2714 next_page:
2715                         page_addr += mr->page_size;
2716                 } while (page_addr < end_dma_addr);
2717
2718                 mr->length += dma_len;
2719                 last_end_dma_addr = end_dma_addr;
2720                 last_page_off = end_dma_addr & ~page_mask;
2721
2722                 sg_offset = 0;
2723         }
2724
2725         if (sg_offset_p)
2726                 *sg_offset_p = 0;
2727         return i;
2728 }
2729 EXPORT_SYMBOL(ib_sg_to_pages);
2730
2731 struct ib_drain_cqe {
2732         struct ib_cqe cqe;
2733         struct completion done;
2734 };
2735
2736 static void ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc)
2737 {
2738         struct ib_drain_cqe *cqe = container_of(wc->wr_cqe, struct ib_drain_cqe,
2739                                                 cqe);
2740
2741         complete(&cqe->done);
2742 }
2743
2744 /*
2745  * Post a WR and block until its completion is reaped for the SQ.
2746  */
2747 static void __ib_drain_sq(struct ib_qp *qp)
2748 {
2749         struct ib_cq *cq = qp->send_cq;
2750         struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
2751         struct ib_drain_cqe sdrain;
2752         struct ib_rdma_wr swr = {
2753                 .wr = {
2754                         .next = NULL,
2755                         { .wr_cqe       = &sdrain.cqe, },
2756                         .opcode = IB_WR_RDMA_WRITE,
2757                 },
2758         };
2759         int ret;
2760
2761         ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
2762         if (ret) {
2763                 WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
2764                 return;
2765         }
2766
2767         sdrain.cqe.done = ib_drain_qp_done;
2768         init_completion(&sdrain.done);
2769
2770         ret = ib_post_send(qp, &swr.wr, NULL);
2771         if (ret) {
2772                 WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
2773                 return;
2774         }
2775
2776         if (cq->poll_ctx == IB_POLL_DIRECT)
2777                 while (wait_for_completion_timeout(&sdrain.done, HZ / 10) <= 0)
2778                         ib_process_cq_direct(cq, -1);
2779         else
2780                 wait_for_completion(&sdrain.done);
2781 }
2782
2783 /*
2784  * Post a WR and block until its completion is reaped for the RQ.
2785  */
2786 static void __ib_drain_rq(struct ib_qp *qp)
2787 {
2788         struct ib_cq *cq = qp->recv_cq;
2789         struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
2790         struct ib_drain_cqe rdrain;
2791         struct ib_recv_wr rwr = {};
2792         int ret;
2793
2794         ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
2795         if (ret) {
2796                 WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
2797                 return;
2798         }
2799
2800         rwr.wr_cqe = &rdrain.cqe;
2801         rdrain.cqe.done = ib_drain_qp_done;
2802         init_completion(&rdrain.done);
2803
2804         ret = ib_post_recv(qp, &rwr, NULL);
2805         if (ret) {
2806                 WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
2807                 return;
2808         }
2809
2810         if (cq->poll_ctx == IB_POLL_DIRECT)
2811                 while (wait_for_completion_timeout(&rdrain.done, HZ / 10) <= 0)
2812                         ib_process_cq_direct(cq, -1);
2813         else
2814                 wait_for_completion(&rdrain.done);
2815 }
2816
2817 /**
2818  * ib_drain_sq() - Block until all SQ CQEs have been consumed by the
2819  *                 application.
2820  * @qp:            queue pair to drain
2821  *
2822  * If the device has a provider-specific drain function, then
2823  * call that.  Otherwise call the generic drain function
2824  * __ib_drain_sq().
2825  *
2826  * The caller must:
2827  *
2828  * ensure there is room in the CQ and SQ for the drain work request and
2829  * completion.
2830  *
2831  * allocate the CQ using ib_alloc_cq().
2832  *
2833  * ensure that there are no other contexts that are posting WRs concurrently.
2834  * Otherwise the drain is not guaranteed.
2835  */
2836 void ib_drain_sq(struct ib_qp *qp)
2837 {
2838         if (qp->device->ops.drain_sq)
2839                 qp->device->ops.drain_sq(qp);
2840         else
2841                 __ib_drain_sq(qp);
2842         trace_cq_drain_complete(qp->send_cq);
2843 }
2844 EXPORT_SYMBOL(ib_drain_sq);
2845
2846 /**
2847  * ib_drain_rq() - Block until all RQ CQEs have been consumed by the
2848  *                 application.
2849  * @qp:            queue pair to drain
2850  *
2851  * If the device has a provider-specific drain function, then
2852  * call that.  Otherwise call the generic drain function
2853  * __ib_drain_rq().
2854  *
2855  * The caller must:
2856  *
2857  * ensure there is room in the CQ and RQ for the drain work request and
2858  * completion.
2859  *
2860  * allocate the CQ using ib_alloc_cq().
2861  *
2862  * ensure that there are no other contexts that are posting WRs concurrently.
2863  * Otherwise the drain is not guaranteed.
2864  */
2865 void ib_drain_rq(struct ib_qp *qp)
2866 {
2867         if (qp->device->ops.drain_rq)
2868                 qp->device->ops.drain_rq(qp);
2869         else
2870                 __ib_drain_rq(qp);
2871         trace_cq_drain_complete(qp->recv_cq);
2872 }
2873 EXPORT_SYMBOL(ib_drain_rq);
2874
2875 /**
2876  * ib_drain_qp() - Block until all CQEs have been consumed by the
2877  *                 application on both the RQ and SQ.
2878  * @qp:            queue pair to drain
2879  *
2880  * The caller must:
2881  *
2882  * ensure there is room in the CQ(s), SQ, and RQ for drain work requests
2883  * and completions.
2884  *
2885  * allocate the CQs using ib_alloc_cq().
2886  *
2887  * ensure that there are no other contexts that are posting WRs concurrently.
2888  * Otherwise the drain is not guaranteed.
2889  */
2890 void ib_drain_qp(struct ib_qp *qp)
2891 {
2892         ib_drain_sq(qp);
2893         if (!qp->srq)
2894                 ib_drain_rq(qp);
2895 }
2896 EXPORT_SYMBOL(ib_drain_qp);
2897
2898 struct net_device *rdma_alloc_netdev(struct ib_device *device, u32 port_num,
2899                                      enum rdma_netdev_t type, const char *name,
2900                                      unsigned char name_assign_type,
2901                                      void (*setup)(struct net_device *))
2902 {
2903         struct rdma_netdev_alloc_params params;
2904         struct net_device *netdev;
2905         int rc;
2906
2907         if (!device->ops.rdma_netdev_get_params)
2908                 return ERR_PTR(-EOPNOTSUPP);
2909
2910         rc = device->ops.rdma_netdev_get_params(device, port_num, type,
2911                                                 &params);
2912         if (rc)
2913                 return ERR_PTR(rc);
2914
2915         netdev = alloc_netdev_mqs(params.sizeof_priv, name, name_assign_type,
2916                                   setup, params.txqs, params.rxqs);
2917         if (!netdev)
2918                 return ERR_PTR(-ENOMEM);
2919
2920         return netdev;
2921 }
2922 EXPORT_SYMBOL(rdma_alloc_netdev);
2923
2924 int rdma_init_netdev(struct ib_device *device, u32 port_num,
2925                      enum rdma_netdev_t type, const char *name,
2926                      unsigned char name_assign_type,
2927                      void (*setup)(struct net_device *),
2928                      struct net_device *netdev)
2929 {
2930         struct rdma_netdev_alloc_params params;
2931         int rc;
2932
2933         if (!device->ops.rdma_netdev_get_params)
2934                 return -EOPNOTSUPP;
2935
2936         rc = device->ops.rdma_netdev_get_params(device, port_num, type,
2937                                                 &params);
2938         if (rc)
2939                 return rc;
2940
2941         return params.initialize_rdma_netdev(device, port_num,
2942                                              netdev, params.param);
2943 }
2944 EXPORT_SYMBOL(rdma_init_netdev);
2945
2946 void __rdma_block_iter_start(struct ib_block_iter *biter,
2947                              struct scatterlist *sglist, unsigned int nents,
2948                              unsigned long pgsz)
2949 {
2950         memset(biter, 0, sizeof(struct ib_block_iter));
2951         biter->__sg = sglist;
2952         biter->__sg_nents = nents;
2953
2954         /* Driver provides best block size to use */
2955         biter->__pg_bit = __fls(pgsz);
2956 }
2957 EXPORT_SYMBOL(__rdma_block_iter_start);
2958
2959 bool __rdma_block_iter_next(struct ib_block_iter *biter)
2960 {
2961         unsigned int block_offset;
2962
2963         if (!biter->__sg_nents || !biter->__sg)
2964                 return false;
2965
2966         biter->__dma_addr = sg_dma_address(biter->__sg) + biter->__sg_advance;
2967         block_offset = biter->__dma_addr & (BIT_ULL(biter->__pg_bit) - 1);
2968         biter->__sg_advance += BIT_ULL(biter->__pg_bit) - block_offset;
2969
2970         if (biter->__sg_advance >= sg_dma_len(biter->__sg)) {
2971                 biter->__sg_advance = 0;
2972                 biter->__sg = sg_next(biter->__sg);
2973                 biter->__sg_nents--;
2974         }
2975
2976         return true;
2977 }
2978 EXPORT_SYMBOL(__rdma_block_iter_next);