Merge tag 'printk-for-5.13-fixup' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / drivers / infiniband / hw / hns / hns_roce_qp.c
1 /*
2  * Copyright (c) 2016 Hisilicon Limited.
3  * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33
34 #include <linux/pci.h>
35 #include <linux/platform_device.h>
36 #include <rdma/ib_addr.h>
37 #include <rdma/ib_umem.h>
38 #include <rdma/uverbs_ioctl.h>
39 #include "hns_roce_common.h"
40 #include "hns_roce_device.h"
41 #include "hns_roce_hem.h"
42
43 static void flush_work_handle(struct work_struct *work)
44 {
45         struct hns_roce_work *flush_work = container_of(work,
46                                         struct hns_roce_work, work);
47         struct hns_roce_qp *hr_qp = container_of(flush_work,
48                                         struct hns_roce_qp, flush_work);
49         struct device *dev = flush_work->hr_dev->dev;
50         struct ib_qp_attr attr;
51         int attr_mask;
52         int ret;
53
54         attr_mask = IB_QP_STATE;
55         attr.qp_state = IB_QPS_ERR;
56
57         if (test_and_clear_bit(HNS_ROCE_FLUSH_FLAG, &hr_qp->flush_flag)) {
58                 ret = hns_roce_modify_qp(&hr_qp->ibqp, &attr, attr_mask, NULL);
59                 if (ret)
60                         dev_err(dev, "Modify QP to error state failed(%d) during CQE flush\n",
61                                 ret);
62         }
63
64         /*
65          * make sure we signal QP destroy leg that flush QP was completed
66          * so that it can safely proceed ahead now and destroy QP
67          */
68         if (atomic_dec_and_test(&hr_qp->refcount))
69                 complete(&hr_qp->free);
70 }
71
72 void init_flush_work(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
73 {
74         struct hns_roce_work *flush_work = &hr_qp->flush_work;
75
76         flush_work->hr_dev = hr_dev;
77         INIT_WORK(&flush_work->work, flush_work_handle);
78         atomic_inc(&hr_qp->refcount);
79         queue_work(hr_dev->irq_workq, &flush_work->work);
80 }
81
82 void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
83 {
84         struct device *dev = hr_dev->dev;
85         struct hns_roce_qp *qp;
86
87         xa_lock(&hr_dev->qp_table_xa);
88         qp = __hns_roce_qp_lookup(hr_dev, qpn);
89         if (qp)
90                 atomic_inc(&qp->refcount);
91         xa_unlock(&hr_dev->qp_table_xa);
92
93         if (!qp) {
94                 dev_warn(dev, "Async event for bogus QP %08x\n", qpn);
95                 return;
96         }
97
98         if (hr_dev->hw_rev != HNS_ROCE_HW_VER1 &&
99             (event_type == HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR ||
100              event_type == HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR ||
101              event_type == HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR ||
102              event_type == HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION ||
103              event_type == HNS_ROCE_EVENT_TYPE_INVALID_XRCETH)) {
104                 qp->state = IB_QPS_ERR;
105                 if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &qp->flush_flag))
106                         init_flush_work(hr_dev, qp);
107         }
108
109         qp->event(qp, (enum hns_roce_event)event_type);
110
111         if (atomic_dec_and_test(&qp->refcount))
112                 complete(&qp->free);
113 }
114
115 static void hns_roce_ib_qp_event(struct hns_roce_qp *hr_qp,
116                                  enum hns_roce_event type)
117 {
118         struct ib_qp *ibqp = &hr_qp->ibqp;
119         struct ib_event event;
120
121         if (ibqp->event_handler) {
122                 event.device = ibqp->device;
123                 event.element.qp = ibqp;
124                 switch (type) {
125                 case HNS_ROCE_EVENT_TYPE_PATH_MIG:
126                         event.event = IB_EVENT_PATH_MIG;
127                         break;
128                 case HNS_ROCE_EVENT_TYPE_COMM_EST:
129                         event.event = IB_EVENT_COMM_EST;
130                         break;
131                 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
132                         event.event = IB_EVENT_SQ_DRAINED;
133                         break;
134                 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
135                         event.event = IB_EVENT_QP_LAST_WQE_REACHED;
136                         break;
137                 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
138                         event.event = IB_EVENT_QP_FATAL;
139                         break;
140                 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
141                         event.event = IB_EVENT_PATH_MIG_ERR;
142                         break;
143                 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
144                         event.event = IB_EVENT_QP_REQ_ERR;
145                         break;
146                 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
147                 case HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION:
148                 case HNS_ROCE_EVENT_TYPE_INVALID_XRCETH:
149                         event.event = IB_EVENT_QP_ACCESS_ERR;
150                         break;
151                 default:
152                         dev_dbg(ibqp->device->dev.parent, "roce_ib: Unexpected event type %d on QP %06lx\n",
153                                 type, hr_qp->qpn);
154                         return;
155                 }
156                 ibqp->event_handler(&event, ibqp->qp_context);
157         }
158 }
159
160 static u8 get_least_load_bankid_for_qp(struct hns_roce_bank *bank)
161 {
162         u32 least_load = bank[0].inuse;
163         u8 bankid = 0;
164         u32 bankcnt;
165         u8 i;
166
167         for (i = 1; i < HNS_ROCE_QP_BANK_NUM; i++) {
168                 bankcnt = bank[i].inuse;
169                 if (bankcnt < least_load) {
170                         least_load = bankcnt;
171                         bankid = i;
172                 }
173         }
174
175         return bankid;
176 }
177
178 static int alloc_qpn_with_bankid(struct hns_roce_bank *bank, u8 bankid,
179                                  unsigned long *qpn)
180 {
181         int id;
182
183         id = ida_alloc_range(&bank->ida, bank->next, bank->max, GFP_KERNEL);
184         if (id < 0) {
185                 id = ida_alloc_range(&bank->ida, bank->min, bank->max,
186                                      GFP_KERNEL);
187                 if (id < 0)
188                         return id;
189         }
190
191         /* the QPN should keep increasing until the max value is reached. */
192         bank->next = (id + 1) > bank->max ? bank->min : id + 1;
193
194         /* the lower 3 bits is bankid */
195         *qpn = (id << 3) | bankid;
196
197         return 0;
198 }
199 static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
200 {
201         struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
202         unsigned long num = 0;
203         u8 bankid;
204         int ret;
205
206         if (hr_qp->ibqp.qp_type == IB_QPT_GSI) {
207                 /* when hw version is v1, the sqpn is allocated */
208                 if (hr_dev->hw_rev == HNS_ROCE_HW_VER1)
209                         num = HNS_ROCE_MAX_PORTS +
210                               hr_dev->iboe.phy_port[hr_qp->port];
211                 else
212                         num = 1;
213
214                 hr_qp->doorbell_qpn = 1;
215         } else {
216                 mutex_lock(&qp_table->bank_mutex);
217                 bankid = get_least_load_bankid_for_qp(qp_table->bank);
218
219                 ret = alloc_qpn_with_bankid(&qp_table->bank[bankid], bankid,
220                                             &num);
221                 if (ret) {
222                         ibdev_err(&hr_dev->ib_dev,
223                                   "failed to alloc QPN, ret = %d\n", ret);
224                         mutex_unlock(&qp_table->bank_mutex);
225                         return ret;
226                 }
227
228                 qp_table->bank[bankid].inuse++;
229                 mutex_unlock(&qp_table->bank_mutex);
230
231                 hr_qp->doorbell_qpn = (u32)num;
232         }
233
234         hr_qp->qpn = num;
235
236         return 0;
237 }
238
239 enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state)
240 {
241         switch (state) {
242         case IB_QPS_RESET:
243                 return HNS_ROCE_QP_STATE_RST;
244         case IB_QPS_INIT:
245                 return HNS_ROCE_QP_STATE_INIT;
246         case IB_QPS_RTR:
247                 return HNS_ROCE_QP_STATE_RTR;
248         case IB_QPS_RTS:
249                 return HNS_ROCE_QP_STATE_RTS;
250         case IB_QPS_SQD:
251                 return HNS_ROCE_QP_STATE_SQD;
252         case IB_QPS_ERR:
253                 return HNS_ROCE_QP_STATE_ERR;
254         default:
255                 return HNS_ROCE_QP_NUM_STATE;
256         }
257 }
258
259 static void add_qp_to_list(struct hns_roce_dev *hr_dev,
260                            struct hns_roce_qp *hr_qp,
261                            struct ib_cq *send_cq, struct ib_cq *recv_cq)
262 {
263         struct hns_roce_cq *hr_send_cq, *hr_recv_cq;
264         unsigned long flags;
265
266         hr_send_cq = send_cq ? to_hr_cq(send_cq) : NULL;
267         hr_recv_cq = recv_cq ? to_hr_cq(recv_cq) : NULL;
268
269         spin_lock_irqsave(&hr_dev->qp_list_lock, flags);
270         hns_roce_lock_cqs(hr_send_cq, hr_recv_cq);
271
272         list_add_tail(&hr_qp->node, &hr_dev->qp_list);
273         if (hr_send_cq)
274                 list_add_tail(&hr_qp->sq_node, &hr_send_cq->sq_list);
275         if (hr_recv_cq)
276                 list_add_tail(&hr_qp->rq_node, &hr_recv_cq->rq_list);
277
278         hns_roce_unlock_cqs(hr_send_cq, hr_recv_cq);
279         spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags);
280 }
281
282 static int hns_roce_qp_store(struct hns_roce_dev *hr_dev,
283                              struct hns_roce_qp *hr_qp,
284                              struct ib_qp_init_attr *init_attr)
285 {
286         struct xarray *xa = &hr_dev->qp_table_xa;
287         int ret;
288
289         if (!hr_qp->qpn)
290                 return -EINVAL;
291
292         ret = xa_err(xa_store_irq(xa, hr_qp->qpn, hr_qp, GFP_KERNEL));
293         if (ret)
294                 dev_err(hr_dev->dev, "Failed to xa store for QPC\n");
295         else
296                 /* add QP to device's QP list for softwc */
297                 add_qp_to_list(hr_dev, hr_qp, init_attr->send_cq,
298                                init_attr->recv_cq);
299
300         return ret;
301 }
302
303 static int alloc_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
304 {
305         struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
306         struct device *dev = hr_dev->dev;
307         int ret;
308
309         if (!hr_qp->qpn)
310                 return -EINVAL;
311
312         /* In v1 engine, GSI QP context is saved in the RoCE hw's register */
313         if (hr_qp->ibqp.qp_type == IB_QPT_GSI &&
314             hr_dev->hw_rev == HNS_ROCE_HW_VER1)
315                 return 0;
316
317         /* Alloc memory for QPC */
318         ret = hns_roce_table_get(hr_dev, &qp_table->qp_table, hr_qp->qpn);
319         if (ret) {
320                 dev_err(dev, "Failed to get QPC table\n");
321                 goto err_out;
322         }
323
324         /* Alloc memory for IRRL */
325         ret = hns_roce_table_get(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
326         if (ret) {
327                 dev_err(dev, "Failed to get IRRL table\n");
328                 goto err_put_qp;
329         }
330
331         if (hr_dev->caps.trrl_entry_sz) {
332                 /* Alloc memory for TRRL */
333                 ret = hns_roce_table_get(hr_dev, &qp_table->trrl_table,
334                                          hr_qp->qpn);
335                 if (ret) {
336                         dev_err(dev, "Failed to get TRRL table\n");
337                         goto err_put_irrl;
338                 }
339         }
340
341         if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) {
342                 /* Alloc memory for SCC CTX */
343                 ret = hns_roce_table_get(hr_dev, &qp_table->sccc_table,
344                                          hr_qp->qpn);
345                 if (ret) {
346                         dev_err(dev, "Failed to get SCC CTX table\n");
347                         goto err_put_trrl;
348                 }
349         }
350
351         return 0;
352
353 err_put_trrl:
354         if (hr_dev->caps.trrl_entry_sz)
355                 hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn);
356
357 err_put_irrl:
358         hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
359
360 err_put_qp:
361         hns_roce_table_put(hr_dev, &qp_table->qp_table, hr_qp->qpn);
362
363 err_out:
364         return ret;
365 }
366
367 void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
368 {
369         struct xarray *xa = &hr_dev->qp_table_xa;
370         unsigned long flags;
371
372         list_del(&hr_qp->node);
373
374         if (hr_qp->ibqp.qp_type != IB_QPT_XRC_TGT)
375                 list_del(&hr_qp->sq_node);
376
377         if (hr_qp->ibqp.qp_type != IB_QPT_XRC_INI &&
378             hr_qp->ibqp.qp_type != IB_QPT_XRC_TGT)
379                 list_del(&hr_qp->rq_node);
380
381         xa_lock_irqsave(xa, flags);
382         __xa_erase(xa, hr_qp->qpn & (hr_dev->caps.num_qps - 1));
383         xa_unlock_irqrestore(xa, flags);
384 }
385
386 static void free_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
387 {
388         struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
389
390         /* In v1 engine, GSI QP context is saved in the RoCE hw's register */
391         if (hr_qp->ibqp.qp_type == IB_QPT_GSI &&
392             hr_dev->hw_rev == HNS_ROCE_HW_VER1)
393                 return;
394
395         if (hr_dev->caps.trrl_entry_sz)
396                 hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn);
397         hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
398 }
399
400 static inline u8 get_qp_bankid(unsigned long qpn)
401 {
402         /* The lower 3 bits of QPN are used to hash to different banks */
403         return (u8)(qpn & GENMASK(2, 0));
404 }
405
406 static void free_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
407 {
408         u8 bankid;
409
410         if (hr_qp->ibqp.qp_type == IB_QPT_GSI)
411                 return;
412
413         if (hr_qp->qpn < hr_dev->caps.reserved_qps)
414                 return;
415
416         bankid = get_qp_bankid(hr_qp->qpn);
417
418         ida_free(&hr_dev->qp_table.bank[bankid].ida, hr_qp->qpn >> 3);
419
420         mutex_lock(&hr_dev->qp_table.bank_mutex);
421         hr_dev->qp_table.bank[bankid].inuse--;
422         mutex_unlock(&hr_dev->qp_table.bank_mutex);
423 }
424
425 static u32 proc_rq_sge(struct hns_roce_dev *dev, struct hns_roce_qp *hr_qp,
426                        bool user)
427 {
428         u32 max_sge = dev->caps.max_rq_sg;
429
430         if (dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
431                 return max_sge;
432
433         /* Reserve SGEs only for HIP08 in kernel; The userspace driver will
434          * calculate number of max_sge with reserved SGEs when allocating wqe
435          * buf, so there is no need to do this again in kernel. But the number
436          * may exceed the capacity of SGEs recorded in the firmware, so the
437          * kernel driver should just adapt the value accordingly.
438          */
439         if (user)
440                 max_sge = roundup_pow_of_two(max_sge + 1);
441         else
442                 hr_qp->rq.rsv_sge = 1;
443
444         return max_sge;
445 }
446
447 static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap,
448                        struct hns_roce_qp *hr_qp, int has_rq, bool user)
449 {
450         u32 max_sge = proc_rq_sge(hr_dev, hr_qp, user);
451         u32 cnt;
452
453         /* If srq exist, set zero for relative number of rq */
454         if (!has_rq) {
455                 hr_qp->rq.wqe_cnt = 0;
456                 hr_qp->rq.max_gs = 0;
457                 hr_qp->rq_inl_buf.wqe_cnt = 0;
458                 cap->max_recv_wr = 0;
459                 cap->max_recv_sge = 0;
460
461                 return 0;
462         }
463
464         /* Check the validity of QP support capacity */
465         if (!cap->max_recv_wr || cap->max_recv_wr > hr_dev->caps.max_wqes ||
466             cap->max_recv_sge > max_sge) {
467                 ibdev_err(&hr_dev->ib_dev,
468                           "RQ config error, depth = %u, sge = %u\n",
469                           cap->max_recv_wr, cap->max_recv_sge);
470                 return -EINVAL;
471         }
472
473         cnt = roundup_pow_of_two(max(cap->max_recv_wr, hr_dev->caps.min_wqes));
474         if (cnt > hr_dev->caps.max_wqes) {
475                 ibdev_err(&hr_dev->ib_dev, "rq depth %u too large\n",
476                           cap->max_recv_wr);
477                 return -EINVAL;
478         }
479
480         hr_qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge) +
481                                               hr_qp->rq.rsv_sge);
482
483         if (hr_dev->caps.max_rq_sg <= HNS_ROCE_SGE_IN_WQE)
484                 hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz);
485         else
486                 hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz *
487                                             hr_qp->rq.max_gs);
488
489         hr_qp->rq.wqe_cnt = cnt;
490         if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE &&
491             hr_qp->ibqp.qp_type != IB_QPT_UD &&
492             hr_qp->ibqp.qp_type != IB_QPT_GSI)
493                 hr_qp->rq_inl_buf.wqe_cnt = cnt;
494         else
495                 hr_qp->rq_inl_buf.wqe_cnt = 0;
496
497         cap->max_recv_wr = cnt;
498         cap->max_recv_sge = hr_qp->rq.max_gs - hr_qp->rq.rsv_sge;
499
500         return 0;
501 }
502
503 static u32 get_wqe_ext_sge_cnt(struct hns_roce_qp *qp)
504 {
505         /* GSI/UD QP only has extended sge */
506         if (qp->ibqp.qp_type == IB_QPT_GSI || qp->ibqp.qp_type == IB_QPT_UD)
507                 return qp->sq.max_gs;
508
509         if (qp->sq.max_gs > HNS_ROCE_SGE_IN_WQE)
510                 return qp->sq.max_gs - HNS_ROCE_SGE_IN_WQE;
511
512         return 0;
513 }
514
515 static void set_ext_sge_param(struct hns_roce_dev *hr_dev, u32 sq_wqe_cnt,
516                               struct hns_roce_qp *hr_qp, struct ib_qp_cap *cap)
517 {
518         u32 total_sge_cnt;
519         u32 wqe_sge_cnt;
520
521         hr_qp->sge.sge_shift = HNS_ROCE_SGE_SHIFT;
522
523         if (hr_dev->hw_rev == HNS_ROCE_HW_VER1) {
524                 hr_qp->sq.max_gs = HNS_ROCE_SGE_IN_WQE;
525                 return;
526         }
527
528         hr_qp->sq.max_gs = max(1U, cap->max_send_sge);
529
530         wqe_sge_cnt = get_wqe_ext_sge_cnt(hr_qp);
531
532         /* If the number of extended sge is not zero, they MUST use the
533          * space of HNS_HW_PAGE_SIZE at least.
534          */
535         if (wqe_sge_cnt) {
536                 total_sge_cnt = roundup_pow_of_two(sq_wqe_cnt * wqe_sge_cnt);
537                 hr_qp->sge.sge_cnt = max(total_sge_cnt,
538                                 (u32)HNS_HW_PAGE_SIZE / HNS_ROCE_SGE_SIZE);
539         }
540 }
541
542 static int check_sq_size_with_integrity(struct hns_roce_dev *hr_dev,
543                                         struct ib_qp_cap *cap,
544                                         struct hns_roce_ib_create_qp *ucmd)
545 {
546         u32 roundup_sq_stride = roundup_pow_of_two(hr_dev->caps.max_sq_desc_sz);
547         u8 max_sq_stride = ilog2(roundup_sq_stride);
548
549         /* Sanity check SQ size before proceeding */
550         if (ucmd->log_sq_stride > max_sq_stride ||
551             ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) {
552                 ibdev_err(&hr_dev->ib_dev, "failed to check SQ stride size.\n");
553                 return -EINVAL;
554         }
555
556         if (cap->max_send_sge > hr_dev->caps.max_sq_sg) {
557                 ibdev_err(&hr_dev->ib_dev, "failed to check SQ SGE size %u.\n",
558                           cap->max_send_sge);
559                 return -EINVAL;
560         }
561
562         return 0;
563 }
564
565 static int set_user_sq_size(struct hns_roce_dev *hr_dev,
566                             struct ib_qp_cap *cap, struct hns_roce_qp *hr_qp,
567                             struct hns_roce_ib_create_qp *ucmd)
568 {
569         struct ib_device *ibdev = &hr_dev->ib_dev;
570         u32 cnt = 0;
571         int ret;
572
573         if (check_shl_overflow(1, ucmd->log_sq_bb_count, &cnt) ||
574             cnt > hr_dev->caps.max_wqes)
575                 return -EINVAL;
576
577         ret = check_sq_size_with_integrity(hr_dev, cap, ucmd);
578         if (ret) {
579                 ibdev_err(ibdev, "failed to check user SQ size, ret = %d.\n",
580                           ret);
581                 return ret;
582         }
583
584         set_ext_sge_param(hr_dev, cnt, hr_qp, cap);
585
586         hr_qp->sq.wqe_shift = ucmd->log_sq_stride;
587         hr_qp->sq.wqe_cnt = cnt;
588
589         return 0;
590 }
591
592 static int set_wqe_buf_attr(struct hns_roce_dev *hr_dev,
593                             struct hns_roce_qp *hr_qp,
594                             struct hns_roce_buf_attr *buf_attr)
595 {
596         int buf_size;
597         int idx = 0;
598
599         hr_qp->buff_size = 0;
600
601         /* SQ WQE */
602         hr_qp->sq.offset = 0;
603         buf_size = to_hr_hem_entries_size(hr_qp->sq.wqe_cnt,
604                                           hr_qp->sq.wqe_shift);
605         if (buf_size > 0 && idx < ARRAY_SIZE(buf_attr->region)) {
606                 buf_attr->region[idx].size = buf_size;
607                 buf_attr->region[idx].hopnum = hr_dev->caps.wqe_sq_hop_num;
608                 idx++;
609                 hr_qp->buff_size += buf_size;
610         }
611
612         /* extend SGE WQE in SQ */
613         hr_qp->sge.offset = hr_qp->buff_size;
614         buf_size = to_hr_hem_entries_size(hr_qp->sge.sge_cnt,
615                                           hr_qp->sge.sge_shift);
616         if (buf_size > 0 && idx < ARRAY_SIZE(buf_attr->region)) {
617                 buf_attr->region[idx].size = buf_size;
618                 buf_attr->region[idx].hopnum = hr_dev->caps.wqe_sge_hop_num;
619                 idx++;
620                 hr_qp->buff_size += buf_size;
621         }
622
623         /* RQ WQE */
624         hr_qp->rq.offset = hr_qp->buff_size;
625         buf_size = to_hr_hem_entries_size(hr_qp->rq.wqe_cnt,
626                                           hr_qp->rq.wqe_shift);
627         if (buf_size > 0 && idx < ARRAY_SIZE(buf_attr->region)) {
628                 buf_attr->region[idx].size = buf_size;
629                 buf_attr->region[idx].hopnum = hr_dev->caps.wqe_rq_hop_num;
630                 idx++;
631                 hr_qp->buff_size += buf_size;
632         }
633
634         if (hr_qp->buff_size < 1)
635                 return -EINVAL;
636
637         buf_attr->page_shift = HNS_HW_PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz;
638         buf_attr->region_count = idx;
639
640         return 0;
641 }
642
643 static int set_kernel_sq_size(struct hns_roce_dev *hr_dev,
644                               struct ib_qp_cap *cap, struct hns_roce_qp *hr_qp)
645 {
646         struct ib_device *ibdev = &hr_dev->ib_dev;
647         u32 cnt;
648
649         if (!cap->max_send_wr || cap->max_send_wr > hr_dev->caps.max_wqes ||
650             cap->max_send_sge > hr_dev->caps.max_sq_sg) {
651                 ibdev_err(ibdev,
652                           "failed to check SQ WR or SGE num, ret = %d.\n",
653                           -EINVAL);
654                 return -EINVAL;
655         }
656
657         cnt = roundup_pow_of_two(max(cap->max_send_wr, hr_dev->caps.min_wqes));
658         if (cnt > hr_dev->caps.max_wqes) {
659                 ibdev_err(ibdev, "failed to check WQE num, WQE num = %u.\n",
660                           cnt);
661                 return -EINVAL;
662         }
663
664         hr_qp->sq.wqe_shift = ilog2(hr_dev->caps.max_sq_desc_sz);
665         hr_qp->sq.wqe_cnt = cnt;
666
667         set_ext_sge_param(hr_dev, cnt, hr_qp, cap);
668
669         /* sync the parameters of kernel QP to user's configuration */
670         cap->max_send_wr = cnt;
671         cap->max_send_sge = hr_qp->sq.max_gs;
672
673         return 0;
674 }
675
676 static int hns_roce_qp_has_sq(struct ib_qp_init_attr *attr)
677 {
678         if (attr->qp_type == IB_QPT_XRC_TGT || !attr->cap.max_send_wr)
679                 return 0;
680
681         return 1;
682 }
683
684 static int hns_roce_qp_has_rq(struct ib_qp_init_attr *attr)
685 {
686         if (attr->qp_type == IB_QPT_XRC_INI ||
687             attr->qp_type == IB_QPT_XRC_TGT || attr->srq ||
688             !attr->cap.max_recv_wr)
689                 return 0;
690
691         return 1;
692 }
693
694 static int alloc_rq_inline_buf(struct hns_roce_qp *hr_qp,
695                                struct ib_qp_init_attr *init_attr)
696 {
697         u32 max_recv_sge = init_attr->cap.max_recv_sge;
698         u32 wqe_cnt = hr_qp->rq_inl_buf.wqe_cnt;
699         struct hns_roce_rinl_wqe *wqe_list;
700         int i;
701
702         /* allocate recv inline buf */
703         wqe_list = kcalloc(wqe_cnt, sizeof(struct hns_roce_rinl_wqe),
704                            GFP_KERNEL);
705
706         if (!wqe_list)
707                 goto err;
708
709         /* Allocate a continuous buffer for all inline sge we need */
710         wqe_list[0].sg_list = kcalloc(wqe_cnt, (max_recv_sge *
711                                       sizeof(struct hns_roce_rinl_sge)),
712                                       GFP_KERNEL);
713         if (!wqe_list[0].sg_list)
714                 goto err_wqe_list;
715
716         /* Assign buffers of sg_list to each inline wqe */
717         for (i = 1; i < wqe_cnt; i++)
718                 wqe_list[i].sg_list = &wqe_list[0].sg_list[i * max_recv_sge];
719
720         hr_qp->rq_inl_buf.wqe_list = wqe_list;
721
722         return 0;
723
724 err_wqe_list:
725         kfree(wqe_list);
726
727 err:
728         return -ENOMEM;
729 }
730
731 static void free_rq_inline_buf(struct hns_roce_qp *hr_qp)
732 {
733         if (hr_qp->rq_inl_buf.wqe_list)
734                 kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list);
735         kfree(hr_qp->rq_inl_buf.wqe_list);
736 }
737
738 static int alloc_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
739                         struct ib_qp_init_attr *init_attr,
740                         struct ib_udata *udata, unsigned long addr)
741 {
742         struct ib_device *ibdev = &hr_dev->ib_dev;
743         struct hns_roce_buf_attr buf_attr = {};
744         int ret;
745
746         if (!udata && hr_qp->rq_inl_buf.wqe_cnt) {
747                 ret = alloc_rq_inline_buf(hr_qp, init_attr);
748                 if (ret) {
749                         ibdev_err(ibdev,
750                                   "failed to alloc inline buf, ret = %d.\n",
751                                   ret);
752                         return ret;
753                 }
754         } else {
755                 hr_qp->rq_inl_buf.wqe_list = NULL;
756         }
757
758         ret = set_wqe_buf_attr(hr_dev, hr_qp, &buf_attr);
759         if (ret) {
760                 ibdev_err(ibdev, "failed to split WQE buf, ret = %d.\n", ret);
761                 goto err_inline;
762         }
763         ret = hns_roce_mtr_create(hr_dev, &hr_qp->mtr, &buf_attr,
764                                   HNS_HW_PAGE_SHIFT + hr_dev->caps.mtt_ba_pg_sz,
765                                   udata, addr);
766         if (ret) {
767                 ibdev_err(ibdev, "failed to create WQE mtr, ret = %d.\n", ret);
768                 goto err_inline;
769         }
770
771         return 0;
772 err_inline:
773         free_rq_inline_buf(hr_qp);
774
775         return ret;
776 }
777
778 static void free_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
779 {
780         hns_roce_mtr_destroy(hr_dev, &hr_qp->mtr);
781         free_rq_inline_buf(hr_qp);
782 }
783
784 static inline bool user_qp_has_sdb(struct hns_roce_dev *hr_dev,
785                                    struct ib_qp_init_attr *init_attr,
786                                    struct ib_udata *udata,
787                                    struct hns_roce_ib_create_qp_resp *resp,
788                                    struct hns_roce_ib_create_qp *ucmd)
789 {
790         return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) &&
791                 udata->outlen >= offsetofend(typeof(*resp), cap_flags) &&
792                 hns_roce_qp_has_sq(init_attr) &&
793                 udata->inlen >= offsetofend(typeof(*ucmd), sdb_addr));
794 }
795
796 static inline bool user_qp_has_rdb(struct hns_roce_dev *hr_dev,
797                                    struct ib_qp_init_attr *init_attr,
798                                    struct ib_udata *udata,
799                                    struct hns_roce_ib_create_qp_resp *resp)
800 {
801         return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) &&
802                 udata->outlen >= offsetofend(typeof(*resp), cap_flags) &&
803                 hns_roce_qp_has_rq(init_attr));
804 }
805
806 static inline bool kernel_qp_has_rdb(struct hns_roce_dev *hr_dev,
807                                      struct ib_qp_init_attr *init_attr)
808 {
809         return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) &&
810                 hns_roce_qp_has_rq(init_attr));
811 }
812
813 static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
814                        struct ib_qp_init_attr *init_attr,
815                        struct ib_udata *udata,
816                        struct hns_roce_ib_create_qp *ucmd,
817                        struct hns_roce_ib_create_qp_resp *resp)
818 {
819         struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context(
820                 udata, struct hns_roce_ucontext, ibucontext);
821         struct ib_device *ibdev = &hr_dev->ib_dev;
822         int ret;
823
824         if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SDI_MODE)
825                 hr_qp->en_flags |= HNS_ROCE_QP_CAP_OWNER_DB;
826
827         if (udata) {
828                 if (user_qp_has_sdb(hr_dev, init_attr, udata, resp, ucmd)) {
829                         ret = hns_roce_db_map_user(uctx, udata, ucmd->sdb_addr,
830                                                    &hr_qp->sdb);
831                         if (ret) {
832                                 ibdev_err(ibdev,
833                                           "failed to map user SQ doorbell, ret = %d.\n",
834                                           ret);
835                                 goto err_out;
836                         }
837                         hr_qp->en_flags |= HNS_ROCE_QP_CAP_SQ_RECORD_DB;
838                         resp->cap_flags |= HNS_ROCE_QP_CAP_SQ_RECORD_DB;
839                 }
840
841                 if (user_qp_has_rdb(hr_dev, init_attr, udata, resp)) {
842                         ret = hns_roce_db_map_user(uctx, udata, ucmd->db_addr,
843                                                    &hr_qp->rdb);
844                         if (ret) {
845                                 ibdev_err(ibdev,
846                                           "failed to map user RQ doorbell, ret = %d.\n",
847                                           ret);
848                                 goto err_sdb;
849                         }
850                         hr_qp->en_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB;
851                         resp->cap_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB;
852                 }
853         } else {
854                 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
855                         hr_qp->sq.db_reg = hr_dev->mem_base +
856                                            HNS_ROCE_DWQE_SIZE * hr_qp->qpn;
857                 else
858                         hr_qp->sq.db_reg =
859                                 hr_dev->reg_base + hr_dev->sdb_offset +
860                                 DB_REG_OFFSET * hr_dev->priv_uar.index;
861
862                 hr_qp->rq.db_reg = hr_dev->reg_base + hr_dev->odb_offset +
863                                    DB_REG_OFFSET * hr_dev->priv_uar.index;
864
865                 if (kernel_qp_has_rdb(hr_dev, init_attr)) {
866                         ret = hns_roce_alloc_db(hr_dev, &hr_qp->rdb, 0);
867                         if (ret) {
868                                 ibdev_err(ibdev,
869                                           "failed to alloc kernel RQ doorbell, ret = %d.\n",
870                                           ret);
871                                 goto err_out;
872                         }
873                         *hr_qp->rdb.db_record = 0;
874                         hr_qp->en_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB;
875                 }
876         }
877
878         return 0;
879 err_sdb:
880         if (udata && hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB)
881                 hns_roce_db_unmap_user(uctx, &hr_qp->sdb);
882 err_out:
883         return ret;
884 }
885
886 static void free_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
887                        struct ib_udata *udata)
888 {
889         struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context(
890                 udata, struct hns_roce_ucontext, ibucontext);
891
892         if (udata) {
893                 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)
894                         hns_roce_db_unmap_user(uctx, &hr_qp->rdb);
895                 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB)
896                         hns_roce_db_unmap_user(uctx, &hr_qp->sdb);
897         } else {
898                 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)
899                         hns_roce_free_db(hr_dev, &hr_qp->rdb);
900         }
901 }
902
903 static int alloc_kernel_wrid(struct hns_roce_dev *hr_dev,
904                              struct hns_roce_qp *hr_qp)
905 {
906         struct ib_device *ibdev = &hr_dev->ib_dev;
907         u64 *sq_wrid = NULL;
908         u64 *rq_wrid = NULL;
909         int ret;
910
911         sq_wrid = kcalloc(hr_qp->sq.wqe_cnt, sizeof(u64), GFP_KERNEL);
912         if (ZERO_OR_NULL_PTR(sq_wrid)) {
913                 ibdev_err(ibdev, "failed to alloc SQ wrid.\n");
914                 return -ENOMEM;
915         }
916
917         if (hr_qp->rq.wqe_cnt) {
918                 rq_wrid = kcalloc(hr_qp->rq.wqe_cnt, sizeof(u64), GFP_KERNEL);
919                 if (ZERO_OR_NULL_PTR(rq_wrid)) {
920                         ibdev_err(ibdev, "failed to alloc RQ wrid.\n");
921                         ret = -ENOMEM;
922                         goto err_sq;
923                 }
924         }
925
926         hr_qp->sq.wrid = sq_wrid;
927         hr_qp->rq.wrid = rq_wrid;
928         return 0;
929 err_sq:
930         kfree(sq_wrid);
931
932         return ret;
933 }
934
935 static void free_kernel_wrid(struct hns_roce_qp *hr_qp)
936 {
937         kfree(hr_qp->rq.wrid);
938         kfree(hr_qp->sq.wrid);
939 }
940
941 static int set_qp_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
942                         struct ib_qp_init_attr *init_attr,
943                         struct ib_udata *udata,
944                         struct hns_roce_ib_create_qp *ucmd)
945 {
946         struct ib_device *ibdev = &hr_dev->ib_dev;
947         int ret;
948
949         hr_qp->ibqp.qp_type = init_attr->qp_type;
950
951         if (init_attr->cap.max_inline_data > hr_dev->caps.max_sq_inline)
952                 init_attr->cap.max_inline_data = hr_dev->caps.max_sq_inline;
953
954         hr_qp->max_inline_data = init_attr->cap.max_inline_data;
955
956         if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
957                 hr_qp->sq_signal_bits = IB_SIGNAL_ALL_WR;
958         else
959                 hr_qp->sq_signal_bits = IB_SIGNAL_REQ_WR;
960
961         ret = set_rq_size(hr_dev, &init_attr->cap, hr_qp,
962                           hns_roce_qp_has_rq(init_attr), !!udata);
963         if (ret) {
964                 ibdev_err(ibdev, "failed to set user RQ size, ret = %d.\n",
965                           ret);
966                 return ret;
967         }
968
969         if (udata) {
970                 ret = ib_copy_from_udata(ucmd, udata,
971                                          min(udata->inlen, sizeof(*ucmd)));
972                 if (ret) {
973                         ibdev_err(ibdev,
974                                   "failed to copy QP ucmd, ret = %d\n", ret);
975                         return ret;
976                 }
977
978                 ret = set_user_sq_size(hr_dev, &init_attr->cap, hr_qp, ucmd);
979                 if (ret)
980                         ibdev_err(ibdev,
981                                   "failed to set user SQ size, ret = %d.\n",
982                                   ret);
983         } else {
984                 ret = set_kernel_sq_size(hr_dev, &init_attr->cap, hr_qp);
985                 if (ret)
986                         ibdev_err(ibdev,
987                                   "failed to set kernel SQ size, ret = %d.\n",
988                                   ret);
989         }
990
991         return ret;
992 }
993
994 static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
995                                      struct ib_pd *ib_pd,
996                                      struct ib_qp_init_attr *init_attr,
997                                      struct ib_udata *udata,
998                                      struct hns_roce_qp *hr_qp)
999 {
1000         struct hns_roce_ib_create_qp_resp resp = {};
1001         struct ib_device *ibdev = &hr_dev->ib_dev;
1002         struct hns_roce_ib_create_qp ucmd;
1003         int ret;
1004
1005         mutex_init(&hr_qp->mutex);
1006         spin_lock_init(&hr_qp->sq.lock);
1007         spin_lock_init(&hr_qp->rq.lock);
1008
1009         hr_qp->state = IB_QPS_RESET;
1010         hr_qp->flush_flag = 0;
1011
1012         if (init_attr->create_flags)
1013                 return -EOPNOTSUPP;
1014
1015         ret = set_qp_param(hr_dev, hr_qp, init_attr, udata, &ucmd);
1016         if (ret) {
1017                 ibdev_err(ibdev, "failed to set QP param, ret = %d.\n", ret);
1018                 return ret;
1019         }
1020
1021         if (!udata) {
1022                 ret = alloc_kernel_wrid(hr_dev, hr_qp);
1023                 if (ret) {
1024                         ibdev_err(ibdev, "failed to alloc wrid, ret = %d.\n",
1025                                   ret);
1026                         return ret;
1027                 }
1028         }
1029
1030         ret = alloc_qp_buf(hr_dev, hr_qp, init_attr, udata, ucmd.buf_addr);
1031         if (ret) {
1032                 ibdev_err(ibdev, "failed to alloc QP buffer, ret = %d.\n", ret);
1033                 goto err_buf;
1034         }
1035
1036         ret = alloc_qpn(hr_dev, hr_qp);
1037         if (ret) {
1038                 ibdev_err(ibdev, "failed to alloc QPN, ret = %d.\n", ret);
1039                 goto err_qpn;
1040         }
1041
1042         ret = alloc_qp_db(hr_dev, hr_qp, init_attr, udata, &ucmd, &resp);
1043         if (ret) {
1044                 ibdev_err(ibdev, "failed to alloc QP doorbell, ret = %d.\n",
1045                           ret);
1046                 goto err_db;
1047         }
1048
1049         ret = alloc_qpc(hr_dev, hr_qp);
1050         if (ret) {
1051                 ibdev_err(ibdev, "failed to alloc QP context, ret = %d.\n",
1052                           ret);
1053                 goto err_qpc;
1054         }
1055
1056         ret = hns_roce_qp_store(hr_dev, hr_qp, init_attr);
1057         if (ret) {
1058                 ibdev_err(ibdev, "failed to store QP, ret = %d.\n", ret);
1059                 goto err_store;
1060         }
1061
1062         if (udata) {
1063                 ret = ib_copy_to_udata(udata, &resp,
1064                                        min(udata->outlen, sizeof(resp)));
1065                 if (ret) {
1066                         ibdev_err(ibdev, "copy qp resp failed!\n");
1067                         goto err_store;
1068                 }
1069         }
1070
1071         if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) {
1072                 ret = hr_dev->hw->qp_flow_control_init(hr_dev, hr_qp);
1073                 if (ret)
1074                         goto err_flow_ctrl;
1075         }
1076
1077         hr_qp->ibqp.qp_num = hr_qp->qpn;
1078         hr_qp->event = hns_roce_ib_qp_event;
1079         atomic_set(&hr_qp->refcount, 1);
1080         init_completion(&hr_qp->free);
1081
1082         return 0;
1083
1084 err_flow_ctrl:
1085         hns_roce_qp_remove(hr_dev, hr_qp);
1086 err_store:
1087         free_qpc(hr_dev, hr_qp);
1088 err_qpc:
1089         free_qp_db(hr_dev, hr_qp, udata);
1090 err_db:
1091         free_qpn(hr_dev, hr_qp);
1092 err_qpn:
1093         free_qp_buf(hr_dev, hr_qp);
1094 err_buf:
1095         free_kernel_wrid(hr_qp);
1096         return ret;
1097 }
1098
1099 void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
1100                          struct ib_udata *udata)
1101 {
1102         if (atomic_dec_and_test(&hr_qp->refcount))
1103                 complete(&hr_qp->free);
1104         wait_for_completion(&hr_qp->free);
1105
1106         free_qpc(hr_dev, hr_qp);
1107         free_qpn(hr_dev, hr_qp);
1108         free_qp_buf(hr_dev, hr_qp);
1109         free_kernel_wrid(hr_qp);
1110         free_qp_db(hr_dev, hr_qp, udata);
1111
1112         kfree(hr_qp);
1113 }
1114
1115 static int check_qp_type(struct hns_roce_dev *hr_dev, enum ib_qp_type type,
1116                          bool is_user)
1117 {
1118         switch (type) {
1119         case IB_QPT_XRC_INI:
1120         case IB_QPT_XRC_TGT:
1121                 if (!(hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC))
1122                         goto out;
1123                 break;
1124         case IB_QPT_UD:
1125                 if (hr_dev->pci_dev->revision <= PCI_REVISION_ID_HIP08 &&
1126                     is_user)
1127                         goto out;
1128                 break;
1129         case IB_QPT_RC:
1130         case IB_QPT_GSI:
1131                 break;
1132         default:
1133                 goto out;
1134         }
1135
1136         return 0;
1137
1138 out:
1139         ibdev_err(&hr_dev->ib_dev, "not support QP type %d\n", type);
1140
1141         return -EOPNOTSUPP;
1142 }
1143
1144 struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
1145                                  struct ib_qp_init_attr *init_attr,
1146                                  struct ib_udata *udata)
1147 {
1148         struct ib_device *ibdev = pd ? pd->device : init_attr->xrcd->device;
1149         struct hns_roce_dev *hr_dev = to_hr_dev(ibdev);
1150         struct hns_roce_qp *hr_qp;
1151         int ret;
1152
1153         ret = check_qp_type(hr_dev, init_attr->qp_type, !!udata);
1154         if (ret)
1155                 return ERR_PTR(ret);
1156
1157         hr_qp = kzalloc(sizeof(*hr_qp), GFP_KERNEL);
1158         if (!hr_qp)
1159                 return ERR_PTR(-ENOMEM);
1160
1161         if (init_attr->qp_type == IB_QPT_XRC_INI)
1162                 init_attr->recv_cq = NULL;
1163
1164         if (init_attr->qp_type == IB_QPT_XRC_TGT) {
1165                 hr_qp->xrcdn = to_hr_xrcd(init_attr->xrcd)->xrcdn;
1166                 init_attr->recv_cq = NULL;
1167                 init_attr->send_cq = NULL;
1168         }
1169
1170         if (init_attr->qp_type == IB_QPT_GSI) {
1171                 hr_qp->port = init_attr->port_num - 1;
1172                 hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
1173         }
1174
1175         ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, hr_qp);
1176         if (ret) {
1177                 ibdev_err(ibdev, "Create QP type 0x%x failed(%d)\n",
1178                           init_attr->qp_type, ret);
1179
1180                 kfree(hr_qp);
1181                 return ERR_PTR(ret);
1182         }
1183
1184         return &hr_qp->ibqp;
1185 }
1186
1187 int to_hr_qp_type(int qp_type)
1188 {
1189         switch (qp_type) {
1190         case IB_QPT_RC:
1191                 return SERV_TYPE_RC;
1192         case IB_QPT_UD:
1193         case IB_QPT_GSI:
1194                 return SERV_TYPE_UD;
1195         case IB_QPT_XRC_INI:
1196         case IB_QPT_XRC_TGT:
1197                 return SERV_TYPE_XRC;
1198         default:
1199                 return -1;
1200         }
1201 }
1202
1203 static int check_mtu_validate(struct hns_roce_dev *hr_dev,
1204                               struct hns_roce_qp *hr_qp,
1205                               struct ib_qp_attr *attr, int attr_mask)
1206 {
1207         enum ib_mtu active_mtu;
1208         int p;
1209
1210         p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
1211         active_mtu = iboe_get_mtu(hr_dev->iboe.netdevs[p]->mtu);
1212
1213         if ((hr_dev->caps.max_mtu >= IB_MTU_2048 &&
1214             attr->path_mtu > hr_dev->caps.max_mtu) ||
1215             attr->path_mtu < IB_MTU_256 || attr->path_mtu > active_mtu) {
1216                 ibdev_err(&hr_dev->ib_dev,
1217                         "attr path_mtu(%d)invalid while modify qp",
1218                         attr->path_mtu);
1219                 return -EINVAL;
1220         }
1221
1222         return 0;
1223 }
1224
1225 static int hns_roce_check_qp_attr(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1226                                   int attr_mask)
1227 {
1228         struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
1229         struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
1230         int p;
1231
1232         if ((attr_mask & IB_QP_PORT) &&
1233             (attr->port_num == 0 || attr->port_num > hr_dev->caps.num_ports)) {
1234                 ibdev_err(&hr_dev->ib_dev, "invalid attr, port_num = %u.\n",
1235                           attr->port_num);
1236                 return -EINVAL;
1237         }
1238
1239         if (attr_mask & IB_QP_PKEY_INDEX) {
1240                 p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
1241                 if (attr->pkey_index >= hr_dev->caps.pkey_table_len[p]) {
1242                         ibdev_err(&hr_dev->ib_dev,
1243                                   "invalid attr, pkey_index = %u.\n",
1244                                   attr->pkey_index);
1245                         return -EINVAL;
1246                 }
1247         }
1248
1249         if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
1250             attr->max_rd_atomic > hr_dev->caps.max_qp_init_rdma) {
1251                 ibdev_err(&hr_dev->ib_dev,
1252                           "invalid attr, max_rd_atomic = %u.\n",
1253                           attr->max_rd_atomic);
1254                 return -EINVAL;
1255         }
1256
1257         if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
1258             attr->max_dest_rd_atomic > hr_dev->caps.max_qp_dest_rdma) {
1259                 ibdev_err(&hr_dev->ib_dev,
1260                           "invalid attr, max_dest_rd_atomic = %u.\n",
1261                           attr->max_dest_rd_atomic);
1262                 return -EINVAL;
1263         }
1264
1265         if (attr_mask & IB_QP_PATH_MTU)
1266                 return check_mtu_validate(hr_dev, hr_qp, attr, attr_mask);
1267
1268         return 0;
1269 }
1270
1271 int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1272                        int attr_mask, struct ib_udata *udata)
1273 {
1274         struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
1275         struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
1276         enum ib_qp_state cur_state, new_state;
1277         int ret = -EINVAL;
1278
1279         mutex_lock(&hr_qp->mutex);
1280
1281         if (attr_mask & IB_QP_CUR_STATE && attr->cur_qp_state != hr_qp->state)
1282                 goto out;
1283
1284         cur_state = hr_qp->state;
1285         new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
1286
1287         if (ibqp->uobject &&
1288             (attr_mask & IB_QP_STATE) && new_state == IB_QPS_ERR) {
1289                 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB) {
1290                         hr_qp->sq.head = *(int *)(hr_qp->sdb.virt_addr);
1291
1292                         if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)
1293                                 hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr);
1294                 } else {
1295                         ibdev_warn(&hr_dev->ib_dev,
1296                                   "flush cqe is not supported in userspace!\n");
1297                         goto out;
1298                 }
1299         }
1300
1301         if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
1302                                 attr_mask)) {
1303                 ibdev_err(&hr_dev->ib_dev, "ib_modify_qp_is_ok failed\n");
1304                 goto out;
1305         }
1306
1307         ret = hns_roce_check_qp_attr(ibqp, attr, attr_mask);
1308         if (ret)
1309                 goto out;
1310
1311         if (cur_state == new_state && cur_state == IB_QPS_RESET) {
1312                 if (hr_dev->hw_rev == HNS_ROCE_HW_VER1) {
1313                         ret = -EPERM;
1314                         ibdev_err(&hr_dev->ib_dev,
1315                                   "RST2RST state is not supported\n");
1316                 } else {
1317                         ret = 0;
1318                 }
1319
1320                 goto out;
1321         }
1322
1323         ret = hr_dev->hw->modify_qp(ibqp, attr, attr_mask, cur_state,
1324                                     new_state);
1325
1326 out:
1327         mutex_unlock(&hr_qp->mutex);
1328
1329         return ret;
1330 }
1331
1332 void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq)
1333                        __acquires(&send_cq->lock) __acquires(&recv_cq->lock)
1334 {
1335         if (unlikely(send_cq == NULL && recv_cq == NULL)) {
1336                 __acquire(&send_cq->lock);
1337                 __acquire(&recv_cq->lock);
1338         } else if (unlikely(send_cq != NULL && recv_cq == NULL)) {
1339                 spin_lock_irq(&send_cq->lock);
1340                 __acquire(&recv_cq->lock);
1341         } else if (unlikely(send_cq == NULL && recv_cq != NULL)) {
1342                 spin_lock_irq(&recv_cq->lock);
1343                 __acquire(&send_cq->lock);
1344         } else if (send_cq == recv_cq) {
1345                 spin_lock_irq(&send_cq->lock);
1346                 __acquire(&recv_cq->lock);
1347         } else if (send_cq->cqn < recv_cq->cqn) {
1348                 spin_lock_irq(&send_cq->lock);
1349                 spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
1350         } else {
1351                 spin_lock_irq(&recv_cq->lock);
1352                 spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
1353         }
1354 }
1355
1356 void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq,
1357                          struct hns_roce_cq *recv_cq) __releases(&send_cq->lock)
1358                          __releases(&recv_cq->lock)
1359 {
1360         if (unlikely(send_cq == NULL && recv_cq == NULL)) {
1361                 __release(&recv_cq->lock);
1362                 __release(&send_cq->lock);
1363         } else if (unlikely(send_cq != NULL && recv_cq == NULL)) {
1364                 __release(&recv_cq->lock);
1365                 spin_unlock(&send_cq->lock);
1366         } else if (unlikely(send_cq == NULL && recv_cq != NULL)) {
1367                 __release(&send_cq->lock);
1368                 spin_unlock(&recv_cq->lock);
1369         } else if (send_cq == recv_cq) {
1370                 __release(&recv_cq->lock);
1371                 spin_unlock_irq(&send_cq->lock);
1372         } else if (send_cq->cqn < recv_cq->cqn) {
1373                 spin_unlock(&recv_cq->lock);
1374                 spin_unlock_irq(&send_cq->lock);
1375         } else {
1376                 spin_unlock(&send_cq->lock);
1377                 spin_unlock_irq(&recv_cq->lock);
1378         }
1379 }
1380
1381 static inline void *get_wqe(struct hns_roce_qp *hr_qp, int offset)
1382 {
1383         return hns_roce_buf_offset(hr_qp->mtr.kmem, offset);
1384 }
1385
1386 void *hns_roce_get_recv_wqe(struct hns_roce_qp *hr_qp, unsigned int n)
1387 {
1388         return get_wqe(hr_qp, hr_qp->rq.offset + (n << hr_qp->rq.wqe_shift));
1389 }
1390
1391 void *hns_roce_get_send_wqe(struct hns_roce_qp *hr_qp, unsigned int n)
1392 {
1393         return get_wqe(hr_qp, hr_qp->sq.offset + (n << hr_qp->sq.wqe_shift));
1394 }
1395
1396 void *hns_roce_get_extend_sge(struct hns_roce_qp *hr_qp, unsigned int n)
1397 {
1398         return get_wqe(hr_qp, hr_qp->sge.offset + (n << hr_qp->sge.sge_shift));
1399 }
1400
1401 bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, u32 nreq,
1402                           struct ib_cq *ib_cq)
1403 {
1404         struct hns_roce_cq *hr_cq;
1405         u32 cur;
1406
1407         cur = hr_wq->head - hr_wq->tail;
1408         if (likely(cur + nreq < hr_wq->wqe_cnt))
1409                 return false;
1410
1411         hr_cq = to_hr_cq(ib_cq);
1412         spin_lock(&hr_cq->lock);
1413         cur = hr_wq->head - hr_wq->tail;
1414         spin_unlock(&hr_cq->lock);
1415
1416         return cur + nreq >= hr_wq->wqe_cnt;
1417 }
1418
1419 int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
1420 {
1421         struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
1422         unsigned int reserved_from_bot;
1423         unsigned int i;
1424
1425         mutex_init(&qp_table->scc_mutex);
1426         mutex_init(&qp_table->bank_mutex);
1427         xa_init(&hr_dev->qp_table_xa);
1428
1429         reserved_from_bot = hr_dev->caps.reserved_qps;
1430
1431         for (i = 0; i < reserved_from_bot; i++) {
1432                 hr_dev->qp_table.bank[get_qp_bankid(i)].inuse++;
1433                 hr_dev->qp_table.bank[get_qp_bankid(i)].min++;
1434         }
1435
1436         for (i = 0; i < HNS_ROCE_QP_BANK_NUM; i++) {
1437                 ida_init(&hr_dev->qp_table.bank[i].ida);
1438                 hr_dev->qp_table.bank[i].max = hr_dev->caps.num_qps /
1439                                                HNS_ROCE_QP_BANK_NUM - 1;
1440                 hr_dev->qp_table.bank[i].next = hr_dev->qp_table.bank[i].min;
1441         }
1442
1443         return 0;
1444 }
1445
1446 void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev)
1447 {
1448         int i;
1449
1450         for (i = 0; i < HNS_ROCE_QP_BANK_NUM; i++)
1451                 ida_destroy(&hr_dev->qp_table.bank[i].ida);
1452 }