Merge tag 'upstream-5.2-rc1' of ssh://gitolite.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / drivers / infiniband / hw / hns / hns_roce_qp.c
1 /*
2  * Copyright (c) 2016 Hisilicon Limited.
3  * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33
34 #include <linux/pci.h>
35 #include <linux/platform_device.h>
36 #include <rdma/ib_addr.h>
37 #include <rdma/ib_umem.h>
38 #include <rdma/uverbs_ioctl.h>
39 #include "hns_roce_common.h"
40 #include "hns_roce_device.h"
41 #include "hns_roce_hem.h"
42 #include <rdma/hns-abi.h>
43
44 #define SQP_NUM                         (2 * HNS_ROCE_MAX_PORTS)
45
46 void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
47 {
48         struct device *dev = hr_dev->dev;
49         struct hns_roce_qp *qp;
50
51         xa_lock(&hr_dev->qp_table_xa);
52         qp = __hns_roce_qp_lookup(hr_dev, qpn);
53         if (qp)
54                 atomic_inc(&qp->refcount);
55         xa_unlock(&hr_dev->qp_table_xa);
56
57         if (!qp) {
58                 dev_warn(dev, "Async event for bogus QP %08x\n", qpn);
59                 return;
60         }
61
62         qp->event(qp, (enum hns_roce_event)event_type);
63
64         if (atomic_dec_and_test(&qp->refcount))
65                 complete(&qp->free);
66 }
67 EXPORT_SYMBOL_GPL(hns_roce_qp_event);
68
69 static void hns_roce_ib_qp_event(struct hns_roce_qp *hr_qp,
70                                  enum hns_roce_event type)
71 {
72         struct ib_event event;
73         struct ib_qp *ibqp = &hr_qp->ibqp;
74
75         if (ibqp->event_handler) {
76                 event.device = ibqp->device;
77                 event.element.qp = ibqp;
78                 switch (type) {
79                 case HNS_ROCE_EVENT_TYPE_PATH_MIG:
80                         event.event = IB_EVENT_PATH_MIG;
81                         break;
82                 case HNS_ROCE_EVENT_TYPE_COMM_EST:
83                         event.event = IB_EVENT_COMM_EST;
84                         break;
85                 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
86                         event.event = IB_EVENT_SQ_DRAINED;
87                         break;
88                 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
89                         event.event = IB_EVENT_QP_LAST_WQE_REACHED;
90                         break;
91                 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
92                         event.event = IB_EVENT_QP_FATAL;
93                         break;
94                 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
95                         event.event = IB_EVENT_PATH_MIG_ERR;
96                         break;
97                 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
98                         event.event = IB_EVENT_QP_REQ_ERR;
99                         break;
100                 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
101                         event.event = IB_EVENT_QP_ACCESS_ERR;
102                         break;
103                 default:
104                         dev_dbg(ibqp->device->dev.parent, "roce_ib: Unexpected event type %d on QP %06lx\n",
105                                 type, hr_qp->qpn);
106                         return;
107                 }
108                 ibqp->event_handler(&event, ibqp->qp_context);
109         }
110 }
111
112 static int hns_roce_reserve_range_qp(struct hns_roce_dev *hr_dev, int cnt,
113                                      int align, unsigned long *base)
114 {
115         struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
116
117         return hns_roce_bitmap_alloc_range(&qp_table->bitmap, cnt, align,
118                                            base) ?
119                        -ENOMEM :
120                        0;
121 }
122
123 enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state)
124 {
125         switch (state) {
126         case IB_QPS_RESET:
127                 return HNS_ROCE_QP_STATE_RST;
128         case IB_QPS_INIT:
129                 return HNS_ROCE_QP_STATE_INIT;
130         case IB_QPS_RTR:
131                 return HNS_ROCE_QP_STATE_RTR;
132         case IB_QPS_RTS:
133                 return HNS_ROCE_QP_STATE_RTS;
134         case IB_QPS_SQD:
135                 return HNS_ROCE_QP_STATE_SQD;
136         case IB_QPS_ERR:
137                 return HNS_ROCE_QP_STATE_ERR;
138         default:
139                 return HNS_ROCE_QP_NUM_STATE;
140         }
141 }
142 EXPORT_SYMBOL_GPL(to_hns_roce_state);
143
144 static int hns_roce_gsi_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
145                                  struct hns_roce_qp *hr_qp)
146 {
147         struct xarray *xa = &hr_dev->qp_table_xa;
148         int ret;
149
150         if (!qpn)
151                 return -EINVAL;
152
153         hr_qp->qpn = qpn;
154         atomic_set(&hr_qp->refcount, 1);
155         init_completion(&hr_qp->free);
156
157         ret = xa_err(xa_store_irq(xa, hr_qp->qpn & (hr_dev->caps.num_qps - 1),
158                                 hr_qp, GFP_KERNEL));
159         if (ret)
160                 dev_err(hr_dev->dev, "QPC xa_store failed\n");
161
162         return ret;
163 }
164
165 static int hns_roce_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
166                              struct hns_roce_qp *hr_qp)
167 {
168         struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
169         struct device *dev = hr_dev->dev;
170         int ret;
171
172         if (!qpn)
173                 return -EINVAL;
174
175         hr_qp->qpn = qpn;
176
177         /* Alloc memory for QPC */
178         ret = hns_roce_table_get(hr_dev, &qp_table->qp_table, hr_qp->qpn);
179         if (ret) {
180                 dev_err(dev, "QPC table get failed\n");
181                 goto err_out;
182         }
183
184         /* Alloc memory for IRRL */
185         ret = hns_roce_table_get(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
186         if (ret) {
187                 dev_err(dev, "IRRL table get failed\n");
188                 goto err_put_qp;
189         }
190
191         if (hr_dev->caps.trrl_entry_sz) {
192                 /* Alloc memory for TRRL */
193                 ret = hns_roce_table_get(hr_dev, &qp_table->trrl_table,
194                                          hr_qp->qpn);
195                 if (ret) {
196                         dev_err(dev, "TRRL table get failed\n");
197                         goto err_put_irrl;
198                 }
199         }
200
201         if (hr_dev->caps.sccc_entry_sz) {
202                 /* Alloc memory for SCC CTX */
203                 ret = hns_roce_table_get(hr_dev, &qp_table->sccc_table,
204                                          hr_qp->qpn);
205                 if (ret) {
206                         dev_err(dev, "SCC CTX table get failed\n");
207                         goto err_put_trrl;
208                 }
209         }
210
211         ret = hns_roce_gsi_qp_alloc(hr_dev, qpn, hr_qp);
212         if (ret)
213                 goto err_put_sccc;
214
215         return 0;
216
217 err_put_sccc:
218         if (hr_dev->caps.sccc_entry_sz)
219                 hns_roce_table_put(hr_dev, &qp_table->sccc_table,
220                                    hr_qp->qpn);
221
222 err_put_trrl:
223         if (hr_dev->caps.trrl_entry_sz)
224                 hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn);
225
226 err_put_irrl:
227         hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
228
229 err_put_qp:
230         hns_roce_table_put(hr_dev, &qp_table->qp_table, hr_qp->qpn);
231
232 err_out:
233         return ret;
234 }
235
236 void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
237 {
238         struct xarray *xa = &hr_dev->qp_table_xa;
239         unsigned long flags;
240
241         xa_lock_irqsave(xa, flags);
242         __xa_erase(xa, hr_qp->qpn & (hr_dev->caps.num_qps - 1));
243         xa_unlock_irqrestore(xa, flags);
244 }
245 EXPORT_SYMBOL_GPL(hns_roce_qp_remove);
246
247 void hns_roce_qp_free(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
248 {
249         struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
250
251         if (atomic_dec_and_test(&hr_qp->refcount))
252                 complete(&hr_qp->free);
253         wait_for_completion(&hr_qp->free);
254
255         if ((hr_qp->ibqp.qp_type) != IB_QPT_GSI) {
256                 if (hr_dev->caps.trrl_entry_sz)
257                         hns_roce_table_put(hr_dev, &qp_table->trrl_table,
258                                            hr_qp->qpn);
259                 hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
260                 hns_roce_table_put(hr_dev, &qp_table->qp_table, hr_qp->qpn);
261         }
262 }
263 EXPORT_SYMBOL_GPL(hns_roce_qp_free);
264
265 void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn,
266                                int cnt)
267 {
268         struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
269
270         if (base_qpn < SQP_NUM)
271                 return;
272
273         hns_roce_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt, BITMAP_RR);
274 }
275 EXPORT_SYMBOL_GPL(hns_roce_release_range_qp);
276
277 static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev,
278                                 struct ib_qp_cap *cap, bool is_user, int has_rq,
279                                 struct hns_roce_qp *hr_qp)
280 {
281         struct device *dev = hr_dev->dev;
282         u32 max_cnt;
283
284         /* Check the validity of QP support capacity */
285         if (cap->max_recv_wr > hr_dev->caps.max_wqes ||
286             cap->max_recv_sge > hr_dev->caps.max_rq_sg) {
287                 dev_err(dev, "RQ WR or sge error!max_recv_wr=%d max_recv_sge=%d\n",
288                         cap->max_recv_wr, cap->max_recv_sge);
289                 return -EINVAL;
290         }
291
292         /* If srq exist, set zero for relative number of rq */
293         if (!has_rq) {
294                 hr_qp->rq.wqe_cnt = 0;
295                 hr_qp->rq.max_gs = 0;
296                 cap->max_recv_wr = 0;
297                 cap->max_recv_sge = 0;
298         } else {
299                 if (is_user && (!cap->max_recv_wr || !cap->max_recv_sge)) {
300                         dev_err(dev, "user space no need config max_recv_wr max_recv_sge\n");
301                         return -EINVAL;
302                 }
303
304                 if (hr_dev->caps.min_wqes)
305                         max_cnt = max(cap->max_recv_wr, hr_dev->caps.min_wqes);
306                 else
307                         max_cnt = cap->max_recv_wr;
308
309                 hr_qp->rq.wqe_cnt = roundup_pow_of_two(max_cnt);
310
311                 if ((u32)hr_qp->rq.wqe_cnt > hr_dev->caps.max_wqes) {
312                         dev_err(dev, "while setting rq size, rq.wqe_cnt too large\n");
313                         return -EINVAL;
314                 }
315
316                 max_cnt = max(1U, cap->max_recv_sge);
317                 hr_qp->rq.max_gs = roundup_pow_of_two(max_cnt);
318                 if (hr_dev->caps.max_rq_sg <= 2)
319                         hr_qp->rq.wqe_shift =
320                                         ilog2(hr_dev->caps.max_rq_desc_sz);
321                 else
322                         hr_qp->rq.wqe_shift =
323                                         ilog2(hr_dev->caps.max_rq_desc_sz
324                                               * hr_qp->rq.max_gs);
325         }
326
327         cap->max_recv_wr = hr_qp->rq.max_post = hr_qp->rq.wqe_cnt;
328         cap->max_recv_sge = hr_qp->rq.max_gs;
329
330         return 0;
331 }
332
333 static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
334                                      struct ib_qp_cap *cap,
335                                      struct hns_roce_qp *hr_qp,
336                                      struct hns_roce_ib_create_qp *ucmd)
337 {
338         u32 roundup_sq_stride = roundup_pow_of_two(hr_dev->caps.max_sq_desc_sz);
339         u8 max_sq_stride = ilog2(roundup_sq_stride);
340         u32 ex_sge_num;
341         u32 page_size;
342         u32 max_cnt;
343
344         /* Sanity check SQ size before proceeding */
345         if ((u32)(1 << ucmd->log_sq_bb_count) > hr_dev->caps.max_wqes ||
346              ucmd->log_sq_stride > max_sq_stride ||
347              ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) {
348                 dev_err(hr_dev->dev, "check SQ size error!\n");
349                 return -EINVAL;
350         }
351
352         if (cap->max_send_sge > hr_dev->caps.max_sq_sg) {
353                 dev_err(hr_dev->dev, "SQ sge error! max_send_sge=%d\n",
354                         cap->max_send_sge);
355                 return -EINVAL;
356         }
357
358         hr_qp->sq.wqe_cnt = 1 << ucmd->log_sq_bb_count;
359         hr_qp->sq.wqe_shift = ucmd->log_sq_stride;
360
361         max_cnt = max(1U, cap->max_send_sge);
362         if (hr_dev->caps.max_sq_sg <= 2)
363                 hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt);
364         else
365                 hr_qp->sq.max_gs = max_cnt;
366
367         if (hr_qp->sq.max_gs > 2)
368                 hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
369                                                         (hr_qp->sq.max_gs - 2));
370
371         if ((hr_qp->sq.max_gs > 2) && (hr_dev->pci_dev->revision == 0x20)) {
372                 if (hr_qp->sge.sge_cnt > hr_dev->caps.max_extend_sg) {
373                         dev_err(hr_dev->dev,
374                                 "The extended sge cnt error! sge_cnt=%d\n",
375                                 hr_qp->sge.sge_cnt);
376                         return -EINVAL;
377                 }
378         }
379
380         hr_qp->sge.sge_shift = 4;
381         ex_sge_num = hr_qp->sge.sge_cnt;
382
383         /* Get buf size, SQ and RQ  are aligned to page_szie */
384         if (hr_dev->caps.max_sq_sg <= 2) {
385                 hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt <<
386                                              hr_qp->rq.wqe_shift), PAGE_SIZE) +
387                                    HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
388                                              hr_qp->sq.wqe_shift), PAGE_SIZE);
389
390                 hr_qp->sq.offset = 0;
391                 hr_qp->rq.offset = HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
392                                              hr_qp->sq.wqe_shift), PAGE_SIZE);
393         } else {
394                 page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
395                 hr_qp->sge.sge_cnt =
396                        max(page_size / (1 << hr_qp->sge.sge_shift), ex_sge_num);
397                 hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt <<
398                                              hr_qp->rq.wqe_shift), page_size) +
399                                    HNS_ROCE_ALOGN_UP((hr_qp->sge.sge_cnt <<
400                                              hr_qp->sge.sge_shift), page_size) +
401                                    HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
402                                              hr_qp->sq.wqe_shift), page_size);
403
404                 hr_qp->sq.offset = 0;
405                 if (ex_sge_num) {
406                         hr_qp->sge.offset = HNS_ROCE_ALOGN_UP(
407                                                         (hr_qp->sq.wqe_cnt <<
408                                                         hr_qp->sq.wqe_shift),
409                                                         page_size);
410                         hr_qp->rq.offset = hr_qp->sge.offset +
411                                         HNS_ROCE_ALOGN_UP((hr_qp->sge.sge_cnt <<
412                                                 hr_qp->sge.sge_shift),
413                                                 page_size);
414                 } else {
415                         hr_qp->rq.offset = HNS_ROCE_ALOGN_UP(
416                                                         (hr_qp->sq.wqe_cnt <<
417                                                         hr_qp->sq.wqe_shift),
418                                                         page_size);
419                 }
420         }
421
422         return 0;
423 }
424
425 static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
426                                        struct ib_qp_cap *cap,
427                                        struct hns_roce_qp *hr_qp)
428 {
429         struct device *dev = hr_dev->dev;
430         u32 page_size;
431         u32 max_cnt;
432         int size;
433
434         if (cap->max_send_wr  > hr_dev->caps.max_wqes  ||
435             cap->max_send_sge > hr_dev->caps.max_sq_sg ||
436             cap->max_inline_data > hr_dev->caps.max_sq_inline) {
437                 dev_err(dev, "SQ WR or sge or inline data error!\n");
438                 return -EINVAL;
439         }
440
441         hr_qp->sq.wqe_shift = ilog2(hr_dev->caps.max_sq_desc_sz);
442         hr_qp->sq_max_wqes_per_wr = 1;
443         hr_qp->sq_spare_wqes = 0;
444
445         if (hr_dev->caps.min_wqes)
446                 max_cnt = max(cap->max_send_wr, hr_dev->caps.min_wqes);
447         else
448                 max_cnt = cap->max_send_wr;
449
450         hr_qp->sq.wqe_cnt = roundup_pow_of_two(max_cnt);
451         if ((u32)hr_qp->sq.wqe_cnt > hr_dev->caps.max_wqes) {
452                 dev_err(dev, "while setting kernel sq size, sq.wqe_cnt too large\n");
453                 return -EINVAL;
454         }
455
456         /* Get data_seg numbers */
457         max_cnt = max(1U, cap->max_send_sge);
458         if (hr_dev->caps.max_sq_sg <= 2)
459                 hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt);
460         else
461                 hr_qp->sq.max_gs = max_cnt;
462
463         if (hr_qp->sq.max_gs > 2) {
464                 hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
465                                      (hr_qp->sq.max_gs - 2));
466                 hr_qp->sge.sge_shift = 4;
467         }
468
469         /* ud sqwqe's sge use extend sge */
470         if (hr_dev->caps.max_sq_sg > 2 && hr_qp->ibqp.qp_type == IB_QPT_GSI) {
471                 hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
472                                      hr_qp->sq.max_gs);
473                 hr_qp->sge.sge_shift = 4;
474         }
475
476         if ((hr_qp->sq.max_gs > 2) && hr_dev->pci_dev->revision == 0x20) {
477                 if (hr_qp->sge.sge_cnt > hr_dev->caps.max_extend_sg) {
478                         dev_err(dev, "The extended sge cnt error! sge_cnt=%d\n",
479                                 hr_qp->sge.sge_cnt);
480                         return -EINVAL;
481                 }
482         }
483
484         /* Get buf size, SQ and RQ are aligned to PAGE_SIZE */
485         page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
486         hr_qp->sq.offset = 0;
487         size = HNS_ROCE_ALOGN_UP(hr_qp->sq.wqe_cnt << hr_qp->sq.wqe_shift,
488                                  page_size);
489
490         if (hr_dev->caps.max_sq_sg > 2 && hr_qp->sge.sge_cnt) {
491                 hr_qp->sge.sge_cnt = max(page_size/(1 << hr_qp->sge.sge_shift),
492                                         (u32)hr_qp->sge.sge_cnt);
493                 hr_qp->sge.offset = size;
494                 size += HNS_ROCE_ALOGN_UP(hr_qp->sge.sge_cnt <<
495                                           hr_qp->sge.sge_shift, page_size);
496         }
497
498         hr_qp->rq.offset = size;
499         size += HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt << hr_qp->rq.wqe_shift),
500                                   page_size);
501         hr_qp->buff_size = size;
502
503         /* Get wr and sge number which send */
504         cap->max_send_wr = hr_qp->sq.max_post = hr_qp->sq.wqe_cnt;
505         cap->max_send_sge = hr_qp->sq.max_gs;
506
507         /* We don't support inline sends for kernel QPs (yet) */
508         cap->max_inline_data = 0;
509
510         return 0;
511 }
512
513 static int hns_roce_qp_has_sq(struct ib_qp_init_attr *attr)
514 {
515         if (attr->qp_type == IB_QPT_XRC_TGT || !attr->cap.max_send_wr)
516                 return 0;
517
518         return 1;
519 }
520
521 static int hns_roce_qp_has_rq(struct ib_qp_init_attr *attr)
522 {
523         if (attr->qp_type == IB_QPT_XRC_INI ||
524             attr->qp_type == IB_QPT_XRC_TGT || attr->srq ||
525             !attr->cap.max_recv_wr)
526                 return 0;
527
528         return 1;
529 }
530
531 static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
532                                      struct ib_pd *ib_pd,
533                                      struct ib_qp_init_attr *init_attr,
534                                      struct ib_udata *udata, unsigned long sqpn,
535                                      struct hns_roce_qp *hr_qp)
536 {
537         struct device *dev = hr_dev->dev;
538         struct hns_roce_ib_create_qp ucmd;
539         struct hns_roce_ib_create_qp_resp resp = {};
540         struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context(
541                 udata, struct hns_roce_ucontext, ibucontext);
542         unsigned long qpn = 0;
543         int ret = 0;
544         u32 page_shift;
545         u32 npages;
546         int i;
547
548         mutex_init(&hr_qp->mutex);
549         spin_lock_init(&hr_qp->sq.lock);
550         spin_lock_init(&hr_qp->rq.lock);
551
552         hr_qp->state = IB_QPS_RESET;
553
554         hr_qp->ibqp.qp_type = init_attr->qp_type;
555
556         if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
557                 hr_qp->sq_signal_bits = cpu_to_le32(IB_SIGNAL_ALL_WR);
558         else
559                 hr_qp->sq_signal_bits = cpu_to_le32(IB_SIGNAL_REQ_WR);
560
561         ret = hns_roce_set_rq_size(hr_dev, &init_attr->cap, udata,
562                                    hns_roce_qp_has_rq(init_attr), hr_qp);
563         if (ret) {
564                 dev_err(dev, "hns_roce_set_rq_size failed\n");
565                 goto err_out;
566         }
567
568         if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) &&
569             hns_roce_qp_has_rq(init_attr)) {
570                 /* allocate recv inline buf */
571                 hr_qp->rq_inl_buf.wqe_list = kcalloc(hr_qp->rq.wqe_cnt,
572                                                sizeof(struct hns_roce_rinl_wqe),
573                                                GFP_KERNEL);
574                 if (!hr_qp->rq_inl_buf.wqe_list) {
575                         ret = -ENOMEM;
576                         goto err_out;
577                 }
578
579                 hr_qp->rq_inl_buf.wqe_cnt = hr_qp->rq.wqe_cnt;
580
581                 /* Firstly, allocate a list of sge space buffer */
582                 hr_qp->rq_inl_buf.wqe_list[0].sg_list =
583                                         kcalloc(hr_qp->rq_inl_buf.wqe_cnt,
584                                                init_attr->cap.max_recv_sge *
585                                                sizeof(struct hns_roce_rinl_sge),
586                                                GFP_KERNEL);
587                 if (!hr_qp->rq_inl_buf.wqe_list[0].sg_list) {
588                         ret = -ENOMEM;
589                         goto err_wqe_list;
590                 }
591
592                 for (i = 1; i < hr_qp->rq_inl_buf.wqe_cnt; i++)
593                         /* Secondly, reallocate the buffer */
594                         hr_qp->rq_inl_buf.wqe_list[i].sg_list =
595                                 &hr_qp->rq_inl_buf.wqe_list[0].sg_list[i *
596                                 init_attr->cap.max_recv_sge];
597         }
598
599         if (udata) {
600                 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
601                         dev_err(dev, "ib_copy_from_udata error for create qp\n");
602                         ret = -EFAULT;
603                         goto err_rq_sge_list;
604                 }
605
606                 ret = hns_roce_set_user_sq_size(hr_dev, &init_attr->cap, hr_qp,
607                                                 &ucmd);
608                 if (ret) {
609                         dev_err(dev, "hns_roce_set_user_sq_size error for create qp\n");
610                         goto err_rq_sge_list;
611                 }
612
613                 hr_qp->umem = ib_umem_get(udata, ucmd.buf_addr,
614                                           hr_qp->buff_size, 0, 0);
615                 if (IS_ERR(hr_qp->umem)) {
616                         dev_err(dev, "ib_umem_get error for create qp\n");
617                         ret = PTR_ERR(hr_qp->umem);
618                         goto err_rq_sge_list;
619                 }
620
621                 hr_qp->mtt.mtt_type = MTT_TYPE_WQE;
622                 page_shift = PAGE_SHIFT;
623                 if (hr_dev->caps.mtt_buf_pg_sz) {
624                         npages = (ib_umem_page_count(hr_qp->umem) +
625                                   (1 << hr_dev->caps.mtt_buf_pg_sz) - 1) /
626                                  (1 << hr_dev->caps.mtt_buf_pg_sz);
627                         page_shift += hr_dev->caps.mtt_buf_pg_sz;
628                         ret = hns_roce_mtt_init(hr_dev, npages,
629                                     page_shift,
630                                     &hr_qp->mtt);
631                 } else {
632                         ret = hns_roce_mtt_init(hr_dev,
633                                                 ib_umem_page_count(hr_qp->umem),
634                                                 page_shift, &hr_qp->mtt);
635                 }
636                 if (ret) {
637                         dev_err(dev, "hns_roce_mtt_init error for create qp\n");
638                         goto err_buf;
639                 }
640
641                 ret = hns_roce_ib_umem_write_mtt(hr_dev, &hr_qp->mtt,
642                                                  hr_qp->umem);
643                 if (ret) {
644                         dev_err(dev, "hns_roce_ib_umem_write_mtt error for create qp\n");
645                         goto err_mtt;
646                 }
647
648                 if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SQ_RECORD_DB) &&
649                     (udata->inlen >= sizeof(ucmd)) &&
650                     (udata->outlen >= sizeof(resp)) &&
651                     hns_roce_qp_has_sq(init_attr)) {
652                         ret = hns_roce_db_map_user(uctx, udata, ucmd.sdb_addr,
653                                                    &hr_qp->sdb);
654                         if (ret) {
655                                 dev_err(dev, "sq record doorbell map failed!\n");
656                                 goto err_mtt;
657                         }
658
659                         /* indicate kernel supports sq record db */
660                         resp.cap_flags |= HNS_ROCE_SUPPORT_SQ_RECORD_DB;
661                         hr_qp->sdb_en = 1;
662                 }
663
664                 if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
665                     (udata->outlen >= sizeof(resp)) &&
666                     hns_roce_qp_has_rq(init_attr)) {
667                         ret = hns_roce_db_map_user(uctx, udata, ucmd.db_addr,
668                                                    &hr_qp->rdb);
669                         if (ret) {
670                                 dev_err(dev, "rq record doorbell map failed!\n");
671                                 goto err_sq_dbmap;
672                         }
673
674                         /* indicate kernel supports rq record db */
675                         resp.cap_flags |= HNS_ROCE_SUPPORT_RQ_RECORD_DB;
676                         hr_qp->rdb_en = 1;
677                 }
678         } else {
679                 if (init_attr->create_flags &
680                     IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
681                         dev_err(dev, "init_attr->create_flags error!\n");
682                         ret = -EINVAL;
683                         goto err_rq_sge_list;
684                 }
685
686                 if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) {
687                         dev_err(dev, "init_attr->create_flags error!\n");
688                         ret = -EINVAL;
689                         goto err_rq_sge_list;
690                 }
691
692                 /* Set SQ size */
693                 ret = hns_roce_set_kernel_sq_size(hr_dev, &init_attr->cap,
694                                                   hr_qp);
695                 if (ret) {
696                         dev_err(dev, "hns_roce_set_kernel_sq_size error!\n");
697                         goto err_rq_sge_list;
698                 }
699
700                 /* QP doorbell register address */
701                 hr_qp->sq.db_reg_l = hr_dev->reg_base + hr_dev->sdb_offset +
702                                      DB_REG_OFFSET * hr_dev->priv_uar.index;
703                 hr_qp->rq.db_reg_l = hr_dev->reg_base + hr_dev->odb_offset +
704                                      DB_REG_OFFSET * hr_dev->priv_uar.index;
705
706                 if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
707                     hns_roce_qp_has_rq(init_attr)) {
708                         ret = hns_roce_alloc_db(hr_dev, &hr_qp->rdb, 0);
709                         if (ret) {
710                                 dev_err(dev, "rq record doorbell alloc failed!\n");
711                                 goto err_rq_sge_list;
712                         }
713                         *hr_qp->rdb.db_record = 0;
714                         hr_qp->rdb_en = 1;
715                 }
716
717                 /* Allocate QP buf */
718                 page_shift = PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz;
719                 if (hns_roce_buf_alloc(hr_dev, hr_qp->buff_size,
720                                        (1 << page_shift) * 2,
721                                        &hr_qp->hr_buf, page_shift)) {
722                         dev_err(dev, "hns_roce_buf_alloc error!\n");
723                         ret = -ENOMEM;
724                         goto err_db;
725                 }
726
727                 hr_qp->mtt.mtt_type = MTT_TYPE_WQE;
728                 /* Write MTT */
729                 ret = hns_roce_mtt_init(hr_dev, hr_qp->hr_buf.npages,
730                                         hr_qp->hr_buf.page_shift, &hr_qp->mtt);
731                 if (ret) {
732                         dev_err(dev, "hns_roce_mtt_init error for kernel create qp\n");
733                         goto err_buf;
734                 }
735
736                 ret = hns_roce_buf_write_mtt(hr_dev, &hr_qp->mtt,
737                                              &hr_qp->hr_buf);
738                 if (ret) {
739                         dev_err(dev, "hns_roce_buf_write_mtt error for kernel create qp\n");
740                         goto err_mtt;
741                 }
742
743                 hr_qp->sq.wrid = kcalloc(hr_qp->sq.wqe_cnt, sizeof(u64),
744                                          GFP_KERNEL);
745                 hr_qp->rq.wrid = kcalloc(hr_qp->rq.wqe_cnt, sizeof(u64),
746                                          GFP_KERNEL);
747                 if (!hr_qp->sq.wrid || !hr_qp->rq.wrid) {
748                         ret = -ENOMEM;
749                         goto err_wrid;
750                 }
751         }
752
753         if (sqpn) {
754                 qpn = sqpn;
755         } else {
756                 /* Get QPN */
757                 ret = hns_roce_reserve_range_qp(hr_dev, 1, 1, &qpn);
758                 if (ret) {
759                         dev_err(dev, "hns_roce_reserve_range_qp alloc qpn error\n");
760                         goto err_wrid;
761                 }
762         }
763
764         if (init_attr->qp_type == IB_QPT_GSI &&
765             hr_dev->hw_rev == HNS_ROCE_HW_VER1) {
766                 /* In v1 engine, GSI QP context in RoCE engine's register */
767                 ret = hns_roce_gsi_qp_alloc(hr_dev, qpn, hr_qp);
768                 if (ret) {
769                         dev_err(dev, "hns_roce_qp_alloc failed!\n");
770                         goto err_qpn;
771                 }
772         } else {
773                 ret = hns_roce_qp_alloc(hr_dev, qpn, hr_qp);
774                 if (ret) {
775                         dev_err(dev, "hns_roce_qp_alloc failed!\n");
776                         goto err_qpn;
777                 }
778         }
779
780         if (sqpn)
781                 hr_qp->doorbell_qpn = 1;
782         else
783                 hr_qp->doorbell_qpn = cpu_to_le64(hr_qp->qpn);
784
785         if (udata) {
786                 ret = ib_copy_to_udata(udata, &resp,
787                                        min(udata->outlen, sizeof(resp)));
788                 if (ret)
789                         goto err_qp;
790         }
791
792         if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) {
793                 ret = hr_dev->hw->qp_flow_control_init(hr_dev, hr_qp);
794                 if (ret)
795                         goto err_qp;
796         }
797
798         hr_qp->event = hns_roce_ib_qp_event;
799
800         return 0;
801
802 err_qp:
803         if (init_attr->qp_type == IB_QPT_GSI &&
804                 hr_dev->hw_rev == HNS_ROCE_HW_VER1)
805                 hns_roce_qp_remove(hr_dev, hr_qp);
806         else
807                 hns_roce_qp_free(hr_dev, hr_qp);
808
809 err_qpn:
810         if (!sqpn)
811                 hns_roce_release_range_qp(hr_dev, qpn, 1);
812
813 err_wrid:
814         if (udata) {
815                 if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
816                     (udata->outlen >= sizeof(resp)) &&
817                     hns_roce_qp_has_rq(init_attr))
818                         hns_roce_db_unmap_user(uctx, &hr_qp->rdb);
819         } else {
820                 kfree(hr_qp->sq.wrid);
821                 kfree(hr_qp->rq.wrid);
822         }
823
824 err_sq_dbmap:
825         if (udata)
826                 if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SQ_RECORD_DB) &&
827                     (udata->inlen >= sizeof(ucmd)) &&
828                     (udata->outlen >= sizeof(resp)) &&
829                     hns_roce_qp_has_sq(init_attr))
830                         hns_roce_db_unmap_user(uctx, &hr_qp->sdb);
831
832 err_mtt:
833         hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt);
834
835 err_buf:
836         if (hr_qp->umem)
837                 ib_umem_release(hr_qp->umem);
838         else
839                 hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
840
841 err_db:
842         if (!udata && hns_roce_qp_has_rq(init_attr) &&
843             (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB))
844                 hns_roce_free_db(hr_dev, &hr_qp->rdb);
845
846 err_rq_sge_list:
847         if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE)
848                 kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list);
849
850 err_wqe_list:
851         if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE)
852                 kfree(hr_qp->rq_inl_buf.wqe_list);
853
854 err_out:
855         return ret;
856 }
857
858 struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
859                                  struct ib_qp_init_attr *init_attr,
860                                  struct ib_udata *udata)
861 {
862         struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
863         struct device *dev = hr_dev->dev;
864         struct hns_roce_sqp *hr_sqp;
865         struct hns_roce_qp *hr_qp;
866         int ret;
867
868         switch (init_attr->qp_type) {
869         case IB_QPT_RC: {
870                 hr_qp = kzalloc(sizeof(*hr_qp), GFP_KERNEL);
871                 if (!hr_qp)
872                         return ERR_PTR(-ENOMEM);
873
874                 ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, 0,
875                                                 hr_qp);
876                 if (ret) {
877                         dev_err(dev, "Create RC QP failed\n");
878                         kfree(hr_qp);
879                         return ERR_PTR(ret);
880                 }
881
882                 hr_qp->ibqp.qp_num = hr_qp->qpn;
883
884                 break;
885         }
886         case IB_QPT_GSI: {
887                 /* Userspace is not allowed to create special QPs: */
888                 if (udata) {
889                         dev_err(dev, "not support usr space GSI\n");
890                         return ERR_PTR(-EINVAL);
891                 }
892
893                 hr_sqp = kzalloc(sizeof(*hr_sqp), GFP_KERNEL);
894                 if (!hr_sqp)
895                         return ERR_PTR(-ENOMEM);
896
897                 hr_qp = &hr_sqp->hr_qp;
898                 hr_qp->port = init_attr->port_num - 1;
899                 hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
900
901                 /* when hw version is v1, the sqpn is allocated */
902                 if (hr_dev->caps.max_sq_sg <= 2)
903                         hr_qp->ibqp.qp_num = HNS_ROCE_MAX_PORTS +
904                                              hr_dev->iboe.phy_port[hr_qp->port];
905                 else
906                         hr_qp->ibqp.qp_num = 1;
907
908                 ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata,
909                                                 hr_qp->ibqp.qp_num, hr_qp);
910                 if (ret) {
911                         dev_err(dev, "Create GSI QP failed!\n");
912                         kfree(hr_sqp);
913                         return ERR_PTR(ret);
914                 }
915
916                 break;
917         }
918         default:{
919                 dev_err(dev, "not support QP type %d\n", init_attr->qp_type);
920                 return ERR_PTR(-EINVAL);
921         }
922         }
923
924         return &hr_qp->ibqp;
925 }
926 EXPORT_SYMBOL_GPL(hns_roce_create_qp);
927
928 int to_hr_qp_type(int qp_type)
929 {
930         int transport_type;
931
932         if (qp_type == IB_QPT_RC)
933                 transport_type = SERV_TYPE_RC;
934         else if (qp_type == IB_QPT_UC)
935                 transport_type = SERV_TYPE_UC;
936         else if (qp_type == IB_QPT_UD)
937                 transport_type = SERV_TYPE_UD;
938         else if (qp_type == IB_QPT_GSI)
939                 transport_type = SERV_TYPE_UD;
940         else
941                 transport_type = -1;
942
943         return transport_type;
944 }
945 EXPORT_SYMBOL_GPL(to_hr_qp_type);
946
947 int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
948                        int attr_mask, struct ib_udata *udata)
949 {
950         struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
951         struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
952         enum ib_qp_state cur_state, new_state;
953         struct device *dev = hr_dev->dev;
954         int ret = -EINVAL;
955         int p;
956         enum ib_mtu active_mtu;
957
958         mutex_lock(&hr_qp->mutex);
959
960         cur_state = attr_mask & IB_QP_CUR_STATE ?
961                     attr->cur_qp_state : (enum ib_qp_state)hr_qp->state;
962         new_state = attr_mask & IB_QP_STATE ?
963                     attr->qp_state : cur_state;
964
965         if (ibqp->uobject &&
966             (attr_mask & IB_QP_STATE) && new_state == IB_QPS_ERR) {
967                 if (hr_qp->sdb_en == 1) {
968                         hr_qp->sq.head = *(int *)(hr_qp->sdb.virt_addr);
969
970                         if (hr_qp->rdb_en == 1)
971                                 hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr);
972                 } else {
973                         dev_warn(dev, "flush cqe is not supported in userspace!\n");
974                         goto out;
975                 }
976         }
977
978         if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
979                                 attr_mask)) {
980                 dev_err(dev, "ib_modify_qp_is_ok failed\n");
981                 goto out;
982         }
983
984         if ((attr_mask & IB_QP_PORT) &&
985             (attr->port_num == 0 || attr->port_num > hr_dev->caps.num_ports)) {
986                 dev_err(dev, "attr port_num invalid.attr->port_num=%d\n",
987                         attr->port_num);
988                 goto out;
989         }
990
991         if (attr_mask & IB_QP_PKEY_INDEX) {
992                 p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
993                 if (attr->pkey_index >= hr_dev->caps.pkey_table_len[p]) {
994                         dev_err(dev, "attr pkey_index invalid.attr->pkey_index=%d\n",
995                                 attr->pkey_index);
996                         goto out;
997                 }
998         }
999
1000         if (attr_mask & IB_QP_PATH_MTU) {
1001                 p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
1002                 active_mtu = iboe_get_mtu(hr_dev->iboe.netdevs[p]->mtu);
1003
1004                 if ((hr_dev->caps.max_mtu == IB_MTU_4096 &&
1005                     attr->path_mtu > IB_MTU_4096) ||
1006                     (hr_dev->caps.max_mtu == IB_MTU_2048 &&
1007                     attr->path_mtu > IB_MTU_2048) ||
1008                     attr->path_mtu < IB_MTU_256 ||
1009                     attr->path_mtu > active_mtu) {
1010                         dev_err(dev, "attr path_mtu(%d)invalid while modify qp",
1011                                 attr->path_mtu);
1012                         goto out;
1013                 }
1014         }
1015
1016         if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
1017             attr->max_rd_atomic > hr_dev->caps.max_qp_init_rdma) {
1018                 dev_err(dev, "attr max_rd_atomic invalid.attr->max_rd_atomic=%d\n",
1019                         attr->max_rd_atomic);
1020                 goto out;
1021         }
1022
1023         if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
1024             attr->max_dest_rd_atomic > hr_dev->caps.max_qp_dest_rdma) {
1025                 dev_err(dev, "attr max_dest_rd_atomic invalid.attr->max_dest_rd_atomic=%d\n",
1026                         attr->max_dest_rd_atomic);
1027                 goto out;
1028         }
1029
1030         if (cur_state == new_state && cur_state == IB_QPS_RESET) {
1031                 if (hr_dev->caps.min_wqes) {
1032                         ret = -EPERM;
1033                         dev_err(dev, "cur_state=%d new_state=%d\n", cur_state,
1034                                 new_state);
1035                 } else {
1036                         ret = 0;
1037                 }
1038
1039                 goto out;
1040         }
1041
1042         ret = hr_dev->hw->modify_qp(ibqp, attr, attr_mask, cur_state,
1043                                     new_state);
1044
1045 out:
1046         mutex_unlock(&hr_qp->mutex);
1047
1048         return ret;
1049 }
1050
1051 void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq)
1052                        __acquires(&send_cq->lock) __acquires(&recv_cq->lock)
1053 {
1054         if (send_cq == recv_cq) {
1055                 spin_lock_irq(&send_cq->lock);
1056                 __acquire(&recv_cq->lock);
1057         } else if (send_cq->cqn < recv_cq->cqn) {
1058                 spin_lock_irq(&send_cq->lock);
1059                 spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
1060         } else {
1061                 spin_lock_irq(&recv_cq->lock);
1062                 spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
1063         }
1064 }
1065 EXPORT_SYMBOL_GPL(hns_roce_lock_cqs);
1066
1067 void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq,
1068                          struct hns_roce_cq *recv_cq) __releases(&send_cq->lock)
1069                          __releases(&recv_cq->lock)
1070 {
1071         if (send_cq == recv_cq) {
1072                 __release(&recv_cq->lock);
1073                 spin_unlock_irq(&send_cq->lock);
1074         } else if (send_cq->cqn < recv_cq->cqn) {
1075                 spin_unlock(&recv_cq->lock);
1076                 spin_unlock_irq(&send_cq->lock);
1077         } else {
1078                 spin_unlock(&send_cq->lock);
1079                 spin_unlock_irq(&recv_cq->lock);
1080         }
1081 }
1082 EXPORT_SYMBOL_GPL(hns_roce_unlock_cqs);
1083
1084 static void *get_wqe(struct hns_roce_qp *hr_qp, int offset)
1085 {
1086
1087         return hns_roce_buf_offset(&hr_qp->hr_buf, offset);
1088 }
1089
1090 void *get_recv_wqe(struct hns_roce_qp *hr_qp, int n)
1091 {
1092         return get_wqe(hr_qp, hr_qp->rq.offset + (n << hr_qp->rq.wqe_shift));
1093 }
1094 EXPORT_SYMBOL_GPL(get_recv_wqe);
1095
1096 void *get_send_wqe(struct hns_roce_qp *hr_qp, int n)
1097 {
1098         return get_wqe(hr_qp, hr_qp->sq.offset + (n << hr_qp->sq.wqe_shift));
1099 }
1100 EXPORT_SYMBOL_GPL(get_send_wqe);
1101
1102 void *get_send_extend_sge(struct hns_roce_qp *hr_qp, int n)
1103 {
1104         return hns_roce_buf_offset(&hr_qp->hr_buf, hr_qp->sge.offset +
1105                                         (n << hr_qp->sge.sge_shift));
1106 }
1107 EXPORT_SYMBOL_GPL(get_send_extend_sge);
1108
1109 bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq,
1110                           struct ib_cq *ib_cq)
1111 {
1112         struct hns_roce_cq *hr_cq;
1113         u32 cur;
1114
1115         cur = hr_wq->head - hr_wq->tail;
1116         if (likely(cur + nreq < hr_wq->max_post))
1117                 return false;
1118
1119         hr_cq = to_hr_cq(ib_cq);
1120         spin_lock(&hr_cq->lock);
1121         cur = hr_wq->head - hr_wq->tail;
1122         spin_unlock(&hr_cq->lock);
1123
1124         return cur + nreq >= hr_wq->max_post;
1125 }
1126 EXPORT_SYMBOL_GPL(hns_roce_wq_overflow);
1127
1128 int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
1129 {
1130         struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
1131         int reserved_from_top = 0;
1132         int reserved_from_bot;
1133         int ret;
1134
1135         mutex_init(&qp_table->scc_mutex);
1136         xa_init(&hr_dev->qp_table_xa);
1137
1138         /* In hw v1, a port include two SQP, six ports total 12 */
1139         if (hr_dev->caps.max_sq_sg <= 2)
1140                 reserved_from_bot = SQP_NUM;
1141         else
1142                 reserved_from_bot = hr_dev->caps.reserved_qps;
1143
1144         ret = hns_roce_bitmap_init(&qp_table->bitmap, hr_dev->caps.num_qps,
1145                                    hr_dev->caps.num_qps - 1, reserved_from_bot,
1146                                    reserved_from_top);
1147         if (ret) {
1148                 dev_err(hr_dev->dev, "qp bitmap init failed!error=%d\n",
1149                         ret);
1150                 return ret;
1151         }
1152
1153         return 0;
1154 }
1155
1156 void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev)
1157 {
1158         hns_roce_bitmap_cleanup(&hr_dev->qp_table.bitmap);
1159 }