2 * Copyright (c) 2016 Hisilicon Limited.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/pci.h>
35 #include <linux/platform_device.h>
36 #include <rdma/ib_addr.h>
37 #include <rdma/ib_umem.h>
38 #include <rdma/uverbs_ioctl.h>
39 #include "hns_roce_common.h"
40 #include "hns_roce_device.h"
41 #include "hns_roce_hem.h"
42 #include <rdma/hns-abi.h>
44 #define SQP_NUM (2 * HNS_ROCE_MAX_PORTS)
46 static void flush_work_handle(struct work_struct *work)
48 struct hns_roce_work *flush_work = container_of(work,
49 struct hns_roce_work, work);
50 struct hns_roce_qp *hr_qp = container_of(flush_work,
51 struct hns_roce_qp, flush_work);
52 struct device *dev = flush_work->hr_dev->dev;
53 struct ib_qp_attr attr;
57 attr_mask = IB_QP_STATE;
58 attr.qp_state = IB_QPS_ERR;
60 if (test_and_clear_bit(HNS_ROCE_FLUSH_FLAG, &hr_qp->flush_flag)) {
61 ret = hns_roce_modify_qp(&hr_qp->ibqp, &attr, attr_mask, NULL);
63 dev_err(dev, "Modify QP to error state failed(%d) during CQE flush\n",
68 * make sure we signal QP destroy leg that flush QP was completed
69 * so that it can safely proceed ahead now and destroy QP
71 if (atomic_dec_and_test(&hr_qp->refcount))
72 complete(&hr_qp->free);
75 void init_flush_work(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
77 struct hns_roce_work *flush_work = &hr_qp->flush_work;
79 flush_work->hr_dev = hr_dev;
80 INIT_WORK(&flush_work->work, flush_work_handle);
81 atomic_inc(&hr_qp->refcount);
82 queue_work(hr_dev->irq_workq, &flush_work->work);
85 void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
87 struct device *dev = hr_dev->dev;
88 struct hns_roce_qp *qp;
90 xa_lock(&hr_dev->qp_table_xa);
91 qp = __hns_roce_qp_lookup(hr_dev, qpn);
93 atomic_inc(&qp->refcount);
94 xa_unlock(&hr_dev->qp_table_xa);
97 dev_warn(dev, "Async event for bogus QP %08x\n", qpn);
101 if (hr_dev->hw_rev != HNS_ROCE_HW_VER1 &&
102 (event_type == HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR ||
103 event_type == HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR ||
104 event_type == HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR)) {
105 qp->state = IB_QPS_ERR;
106 if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &qp->flush_flag))
107 init_flush_work(hr_dev, qp);
110 qp->event(qp, (enum hns_roce_event)event_type);
112 if (atomic_dec_and_test(&qp->refcount))
116 static void hns_roce_ib_qp_event(struct hns_roce_qp *hr_qp,
117 enum hns_roce_event type)
119 struct ib_event event;
120 struct ib_qp *ibqp = &hr_qp->ibqp;
122 if (ibqp->event_handler) {
123 event.device = ibqp->device;
124 event.element.qp = ibqp;
126 case HNS_ROCE_EVENT_TYPE_PATH_MIG:
127 event.event = IB_EVENT_PATH_MIG;
129 case HNS_ROCE_EVENT_TYPE_COMM_EST:
130 event.event = IB_EVENT_COMM_EST;
132 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
133 event.event = IB_EVENT_SQ_DRAINED;
135 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
136 event.event = IB_EVENT_QP_LAST_WQE_REACHED;
138 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
139 event.event = IB_EVENT_QP_FATAL;
141 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
142 event.event = IB_EVENT_PATH_MIG_ERR;
144 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
145 event.event = IB_EVENT_QP_REQ_ERR;
147 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
148 event.event = IB_EVENT_QP_ACCESS_ERR;
151 dev_dbg(ibqp->device->dev.parent, "roce_ib: Unexpected event type %d on QP %06lx\n",
155 ibqp->event_handler(&event, ibqp->qp_context);
159 static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
161 unsigned long num = 0;
164 if (hr_qp->ibqp.qp_type == IB_QPT_GSI) {
165 /* when hw version is v1, the sqpn is allocated */
166 if (hr_dev->hw_rev == HNS_ROCE_HW_VER1)
167 num = HNS_ROCE_MAX_PORTS +
168 hr_dev->iboe.phy_port[hr_qp->port];
172 hr_qp->doorbell_qpn = 1;
174 ret = hns_roce_bitmap_alloc_range(&hr_dev->qp_table.bitmap,
177 ibdev_err(&hr_dev->ib_dev, "Failed to alloc bitmap\n");
181 hr_qp->doorbell_qpn = (u32)num;
189 enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state)
193 return HNS_ROCE_QP_STATE_RST;
195 return HNS_ROCE_QP_STATE_INIT;
197 return HNS_ROCE_QP_STATE_RTR;
199 return HNS_ROCE_QP_STATE_RTS;
201 return HNS_ROCE_QP_STATE_SQD;
203 return HNS_ROCE_QP_STATE_ERR;
205 return HNS_ROCE_QP_NUM_STATE;
209 static void add_qp_to_list(struct hns_roce_dev *hr_dev,
210 struct hns_roce_qp *hr_qp,
211 struct ib_cq *send_cq, struct ib_cq *recv_cq)
213 struct hns_roce_cq *hr_send_cq, *hr_recv_cq;
216 hr_send_cq = send_cq ? to_hr_cq(send_cq) : NULL;
217 hr_recv_cq = recv_cq ? to_hr_cq(recv_cq) : NULL;
219 spin_lock_irqsave(&hr_dev->qp_list_lock, flags);
220 hns_roce_lock_cqs(hr_send_cq, hr_recv_cq);
222 list_add_tail(&hr_qp->node, &hr_dev->qp_list);
224 list_add_tail(&hr_qp->sq_node, &hr_send_cq->sq_list);
226 list_add_tail(&hr_qp->rq_node, &hr_recv_cq->rq_list);
228 hns_roce_unlock_cqs(hr_send_cq, hr_recv_cq);
229 spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags);
232 static int hns_roce_qp_store(struct hns_roce_dev *hr_dev,
233 struct hns_roce_qp *hr_qp,
234 struct ib_qp_init_attr *init_attr)
236 struct xarray *xa = &hr_dev->qp_table_xa;
242 ret = xa_err(xa_store_irq(xa, hr_qp->qpn, hr_qp, GFP_KERNEL));
244 dev_err(hr_dev->dev, "Failed to xa store for QPC\n");
246 /* add QP to device's QP list for softwc */
247 add_qp_to_list(hr_dev, hr_qp, init_attr->send_cq,
253 static int alloc_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
255 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
256 struct device *dev = hr_dev->dev;
262 /* In v1 engine, GSI QP context is saved in the RoCE hw's register */
263 if (hr_qp->ibqp.qp_type == IB_QPT_GSI &&
264 hr_dev->hw_rev == HNS_ROCE_HW_VER1)
267 /* Alloc memory for QPC */
268 ret = hns_roce_table_get(hr_dev, &qp_table->qp_table, hr_qp->qpn);
270 dev_err(dev, "Failed to get QPC table\n");
274 /* Alloc memory for IRRL */
275 ret = hns_roce_table_get(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
277 dev_err(dev, "Failed to get IRRL table\n");
281 if (hr_dev->caps.trrl_entry_sz) {
282 /* Alloc memory for TRRL */
283 ret = hns_roce_table_get(hr_dev, &qp_table->trrl_table,
286 dev_err(dev, "Failed to get TRRL table\n");
291 if (hr_dev->caps.sccc_entry_sz) {
292 /* Alloc memory for SCC CTX */
293 ret = hns_roce_table_get(hr_dev, &qp_table->sccc_table,
296 dev_err(dev, "Failed to get SCC CTX table\n");
304 if (hr_dev->caps.trrl_entry_sz)
305 hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn);
308 hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
311 hns_roce_table_put(hr_dev, &qp_table->qp_table, hr_qp->qpn);
317 void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
319 struct xarray *xa = &hr_dev->qp_table_xa;
322 list_del(&hr_qp->node);
323 list_del(&hr_qp->sq_node);
324 list_del(&hr_qp->rq_node);
326 xa_lock_irqsave(xa, flags);
327 __xa_erase(xa, hr_qp->qpn & (hr_dev->caps.num_qps - 1));
328 xa_unlock_irqrestore(xa, flags);
331 static void free_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
333 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
335 /* In v1 engine, GSI QP context is saved in the RoCE hw's register */
336 if (hr_qp->ibqp.qp_type == IB_QPT_GSI &&
337 hr_dev->hw_rev == HNS_ROCE_HW_VER1)
340 if (hr_dev->caps.trrl_entry_sz)
341 hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn);
342 hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
345 static void free_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
347 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
349 if (hr_qp->ibqp.qp_type == IB_QPT_GSI)
352 if (hr_qp->qpn < hr_dev->caps.reserved_qps)
355 hns_roce_bitmap_free_range(&qp_table->bitmap, hr_qp->qpn, 1, BITMAP_RR);
358 static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap,
359 struct hns_roce_qp *hr_qp, int has_rq)
363 /* If srq exist, set zero for relative number of rq */
365 hr_qp->rq.wqe_cnt = 0;
366 hr_qp->rq.max_gs = 0;
367 hr_qp->rq_inl_buf.wqe_cnt = 0;
368 cap->max_recv_wr = 0;
369 cap->max_recv_sge = 0;
374 /* Check the validity of QP support capacity */
375 if (!cap->max_recv_wr || cap->max_recv_wr > hr_dev->caps.max_wqes ||
376 cap->max_recv_sge > hr_dev->caps.max_rq_sg) {
377 ibdev_err(&hr_dev->ib_dev, "RQ config error, depth=%u, sge=%d\n",
378 cap->max_recv_wr, cap->max_recv_sge);
382 cnt = roundup_pow_of_two(max(cap->max_recv_wr, hr_dev->caps.min_wqes));
383 if (cnt > hr_dev->caps.max_wqes) {
384 ibdev_err(&hr_dev->ib_dev, "rq depth %u too large\n",
389 hr_qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge) +
390 HNS_ROCE_RESERVED_SGE);
392 if (hr_dev->caps.max_rq_sg <= HNS_ROCE_SGE_IN_WQE)
393 hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz);
395 hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz *
398 hr_qp->rq.wqe_cnt = cnt;
399 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE)
400 hr_qp->rq_inl_buf.wqe_cnt = cnt;
402 hr_qp->rq_inl_buf.wqe_cnt = 0;
404 cap->max_recv_wr = cnt;
405 cap->max_recv_sge = hr_qp->rq.max_gs - HNS_ROCE_RESERVED_SGE;
410 static int set_extend_sge_param(struct hns_roce_dev *hr_dev, u32 sq_wqe_cnt,
411 struct hns_roce_qp *hr_qp,
412 struct ib_qp_cap *cap)
416 cnt = max(1U, cap->max_send_sge);
417 if (hr_dev->hw_rev == HNS_ROCE_HW_VER1) {
418 hr_qp->sq.max_gs = roundup_pow_of_two(cnt);
419 hr_qp->sge.sge_cnt = 0;
424 hr_qp->sq.max_gs = cnt;
426 /* UD sqwqe's sge use extend sge */
427 if (hr_qp->ibqp.qp_type == IB_QPT_GSI ||
428 hr_qp->ibqp.qp_type == IB_QPT_UD) {
429 cnt = roundup_pow_of_two(sq_wqe_cnt * hr_qp->sq.max_gs);
430 } else if (hr_qp->sq.max_gs > HNS_ROCE_SGE_IN_WQE) {
431 cnt = roundup_pow_of_two(sq_wqe_cnt *
432 (hr_qp->sq.max_gs - HNS_ROCE_SGE_IN_WQE));
437 hr_qp->sge.sge_shift = HNS_ROCE_SGE_SHIFT;
438 hr_qp->sge.sge_cnt = cnt;
443 static int check_sq_size_with_integrity(struct hns_roce_dev *hr_dev,
444 struct ib_qp_cap *cap,
445 struct hns_roce_ib_create_qp *ucmd)
447 u32 roundup_sq_stride = roundup_pow_of_two(hr_dev->caps.max_sq_desc_sz);
448 u8 max_sq_stride = ilog2(roundup_sq_stride);
450 /* Sanity check SQ size before proceeding */
451 if (ucmd->log_sq_stride > max_sq_stride ||
452 ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) {
453 ibdev_err(&hr_dev->ib_dev, "Failed to check SQ stride size\n");
457 if (cap->max_send_sge > hr_dev->caps.max_sq_sg) {
458 ibdev_err(&hr_dev->ib_dev, "Failed to check SQ SGE size %d\n",
466 static int set_user_sq_size(struct hns_roce_dev *hr_dev,
467 struct ib_qp_cap *cap, struct hns_roce_qp *hr_qp,
468 struct hns_roce_ib_create_qp *ucmd)
470 struct ib_device *ibdev = &hr_dev->ib_dev;
474 if (check_shl_overflow(1, ucmd->log_sq_bb_count, &cnt) ||
475 cnt > hr_dev->caps.max_wqes)
478 ret = check_sq_size_with_integrity(hr_dev, cap, ucmd);
480 ibdev_err(ibdev, "failed to check user SQ size, ret = %d.\n",
485 ret = set_extend_sge_param(hr_dev, cnt, hr_qp, cap);
489 hr_qp->sq.wqe_shift = ucmd->log_sq_stride;
490 hr_qp->sq.wqe_cnt = cnt;
495 static int set_wqe_buf_attr(struct hns_roce_dev *hr_dev,
496 struct hns_roce_qp *hr_qp,
497 struct hns_roce_buf_attr *buf_attr)
502 hr_qp->buff_size = 0;
505 hr_qp->sq.offset = 0;
506 buf_size = to_hr_hem_entries_size(hr_qp->sq.wqe_cnt,
507 hr_qp->sq.wqe_shift);
508 if (buf_size > 0 && idx < ARRAY_SIZE(buf_attr->region)) {
509 buf_attr->region[idx].size = buf_size;
510 buf_attr->region[idx].hopnum = hr_dev->caps.wqe_sq_hop_num;
512 hr_qp->buff_size += buf_size;
515 /* extend SGE WQE in SQ */
516 hr_qp->sge.offset = hr_qp->buff_size;
517 buf_size = to_hr_hem_entries_size(hr_qp->sge.sge_cnt,
518 hr_qp->sge.sge_shift);
519 if (buf_size > 0 && idx < ARRAY_SIZE(buf_attr->region)) {
520 buf_attr->region[idx].size = buf_size;
521 buf_attr->region[idx].hopnum = hr_dev->caps.wqe_sge_hop_num;
523 hr_qp->buff_size += buf_size;
527 hr_qp->rq.offset = hr_qp->buff_size;
528 buf_size = to_hr_hem_entries_size(hr_qp->rq.wqe_cnt,
529 hr_qp->rq.wqe_shift);
530 if (buf_size > 0 && idx < ARRAY_SIZE(buf_attr->region)) {
531 buf_attr->region[idx].size = buf_size;
532 buf_attr->region[idx].hopnum = hr_dev->caps.wqe_rq_hop_num;
534 hr_qp->buff_size += buf_size;
537 if (hr_qp->buff_size < 1)
540 buf_attr->page_shift = HNS_HW_PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz;
541 buf_attr->fixed_page = true;
542 buf_attr->region_count = idx;
547 static int set_kernel_sq_size(struct hns_roce_dev *hr_dev,
548 struct ib_qp_cap *cap, struct hns_roce_qp *hr_qp)
550 struct ib_device *ibdev = &hr_dev->ib_dev;
554 if (!cap->max_send_wr || cap->max_send_wr > hr_dev->caps.max_wqes ||
555 cap->max_send_sge > hr_dev->caps.max_sq_sg ||
556 cap->max_inline_data > hr_dev->caps.max_sq_inline) {
558 "failed to check SQ WR, SGE or inline num, ret = %d.\n",
563 cnt = roundup_pow_of_two(max(cap->max_send_wr, hr_dev->caps.min_wqes));
564 if (cnt > hr_dev->caps.max_wqes) {
565 ibdev_err(ibdev, "failed to check WQE num, WQE num = %d.\n",
570 hr_qp->sq.wqe_shift = ilog2(hr_dev->caps.max_sq_desc_sz);
571 hr_qp->sq.wqe_cnt = cnt;
573 ret = set_extend_sge_param(hr_dev, cnt, hr_qp, cap);
577 /* sync the parameters of kernel QP to user's configuration */
578 cap->max_send_wr = cnt;
579 cap->max_send_sge = hr_qp->sq.max_gs;
581 /* We don't support inline sends for kernel QPs (yet) */
582 cap->max_inline_data = 0;
587 static int hns_roce_qp_has_sq(struct ib_qp_init_attr *attr)
589 if (attr->qp_type == IB_QPT_XRC_TGT || !attr->cap.max_send_wr)
595 static int hns_roce_qp_has_rq(struct ib_qp_init_attr *attr)
597 if (attr->qp_type == IB_QPT_XRC_INI ||
598 attr->qp_type == IB_QPT_XRC_TGT || attr->srq ||
599 !attr->cap.max_recv_wr)
605 static int alloc_rq_inline_buf(struct hns_roce_qp *hr_qp,
606 struct ib_qp_init_attr *init_attr)
608 u32 max_recv_sge = init_attr->cap.max_recv_sge;
609 u32 wqe_cnt = hr_qp->rq_inl_buf.wqe_cnt;
610 struct hns_roce_rinl_wqe *wqe_list;
613 /* allocate recv inline buf */
614 wqe_list = kcalloc(wqe_cnt, sizeof(struct hns_roce_rinl_wqe),
620 /* Allocate a continuous buffer for all inline sge we need */
621 wqe_list[0].sg_list = kcalloc(wqe_cnt, (max_recv_sge *
622 sizeof(struct hns_roce_rinl_sge)),
624 if (!wqe_list[0].sg_list)
627 /* Assign buffers of sg_list to each inline wqe */
628 for (i = 1; i < wqe_cnt; i++)
629 wqe_list[i].sg_list = &wqe_list[0].sg_list[i * max_recv_sge];
631 hr_qp->rq_inl_buf.wqe_list = wqe_list;
642 static void free_rq_inline_buf(struct hns_roce_qp *hr_qp)
644 if (hr_qp->rq_inl_buf.wqe_list)
645 kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list);
646 kfree(hr_qp->rq_inl_buf.wqe_list);
649 static int alloc_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
650 struct ib_qp_init_attr *init_attr,
651 struct ib_udata *udata, unsigned long addr)
653 struct ib_device *ibdev = &hr_dev->ib_dev;
654 struct hns_roce_buf_attr buf_attr = {};
657 if (!udata && hr_qp->rq_inl_buf.wqe_cnt) {
658 ret = alloc_rq_inline_buf(hr_qp, init_attr);
661 "failed to alloc inline buf, ret = %d.\n",
666 hr_qp->rq_inl_buf.wqe_list = NULL;
669 ret = set_wqe_buf_attr(hr_dev, hr_qp, &buf_attr);
671 ibdev_err(ibdev, "failed to split WQE buf, ret = %d.\n", ret);
674 ret = hns_roce_mtr_create(hr_dev, &hr_qp->mtr, &buf_attr,
675 HNS_HW_PAGE_SHIFT + hr_dev->caps.mtt_ba_pg_sz,
678 ibdev_err(ibdev, "failed to create WQE mtr, ret = %d.\n", ret);
684 free_rq_inline_buf(hr_qp);
689 static void free_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
691 hns_roce_mtr_destroy(hr_dev, &hr_qp->mtr);
692 free_rq_inline_buf(hr_qp);
695 static inline bool user_qp_has_sdb(struct hns_roce_dev *hr_dev,
696 struct ib_qp_init_attr *init_attr,
697 struct ib_udata *udata,
698 struct hns_roce_ib_create_qp_resp *resp,
699 struct hns_roce_ib_create_qp *ucmd)
701 return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SQ_RECORD_DB) &&
702 udata->outlen >= offsetofend(typeof(*resp), cap_flags) &&
703 hns_roce_qp_has_sq(init_attr) &&
704 udata->inlen >= offsetofend(typeof(*ucmd), sdb_addr));
707 static inline bool user_qp_has_rdb(struct hns_roce_dev *hr_dev,
708 struct ib_qp_init_attr *init_attr,
709 struct ib_udata *udata,
710 struct hns_roce_ib_create_qp_resp *resp)
712 return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
713 udata->outlen >= offsetofend(typeof(*resp), cap_flags) &&
714 hns_roce_qp_has_rq(init_attr));
717 static inline bool kernel_qp_has_rdb(struct hns_roce_dev *hr_dev,
718 struct ib_qp_init_attr *init_attr)
720 return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
721 hns_roce_qp_has_rq(init_attr));
724 static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
725 struct ib_qp_init_attr *init_attr,
726 struct ib_udata *udata,
727 struct hns_roce_ib_create_qp *ucmd,
728 struct hns_roce_ib_create_qp_resp *resp)
730 struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context(
731 udata, struct hns_roce_ucontext, ibucontext);
732 struct ib_device *ibdev = &hr_dev->ib_dev;
736 if (user_qp_has_sdb(hr_dev, init_attr, udata, resp, ucmd)) {
737 ret = hns_roce_db_map_user(uctx, udata, ucmd->sdb_addr,
741 "Failed to map user SQ doorbell\n");
744 hr_qp->en_flags |= HNS_ROCE_QP_CAP_SQ_RECORD_DB;
745 resp->cap_flags |= HNS_ROCE_QP_CAP_SQ_RECORD_DB;
748 if (user_qp_has_rdb(hr_dev, init_attr, udata, resp)) {
749 ret = hns_roce_db_map_user(uctx, udata, ucmd->db_addr,
753 "Failed to map user RQ doorbell\n");
756 hr_qp->en_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB;
757 resp->cap_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB;
760 /* QP doorbell register address */
761 hr_qp->sq.db_reg_l = hr_dev->reg_base + hr_dev->sdb_offset +
762 DB_REG_OFFSET * hr_dev->priv_uar.index;
763 hr_qp->rq.db_reg_l = hr_dev->reg_base + hr_dev->odb_offset +
764 DB_REG_OFFSET * hr_dev->priv_uar.index;
766 if (kernel_qp_has_rdb(hr_dev, init_attr)) {
767 ret = hns_roce_alloc_db(hr_dev, &hr_qp->rdb, 0);
770 "Failed to alloc kernel RQ doorbell\n");
773 *hr_qp->rdb.db_record = 0;
774 hr_qp->en_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB;
780 if (udata && hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB)
781 hns_roce_db_unmap_user(uctx, &hr_qp->sdb);
786 static void free_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
787 struct ib_udata *udata)
789 struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context(
790 udata, struct hns_roce_ucontext, ibucontext);
793 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)
794 hns_roce_db_unmap_user(uctx, &hr_qp->rdb);
795 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB)
796 hns_roce_db_unmap_user(uctx, &hr_qp->sdb);
798 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)
799 hns_roce_free_db(hr_dev, &hr_qp->rdb);
803 static int alloc_kernel_wrid(struct hns_roce_dev *hr_dev,
804 struct hns_roce_qp *hr_qp)
806 struct ib_device *ibdev = &hr_dev->ib_dev;
811 sq_wrid = kcalloc(hr_qp->sq.wqe_cnt, sizeof(u64), GFP_KERNEL);
812 if (ZERO_OR_NULL_PTR(sq_wrid)) {
813 ibdev_err(ibdev, "Failed to alloc SQ wrid\n");
817 if (hr_qp->rq.wqe_cnt) {
818 rq_wrid = kcalloc(hr_qp->rq.wqe_cnt, sizeof(u64), GFP_KERNEL);
819 if (ZERO_OR_NULL_PTR(rq_wrid)) {
820 ibdev_err(ibdev, "Failed to alloc RQ wrid\n");
826 hr_qp->sq.wrid = sq_wrid;
827 hr_qp->rq.wrid = rq_wrid;
835 static void free_kernel_wrid(struct hns_roce_qp *hr_qp)
837 kfree(hr_qp->rq.wrid);
838 kfree(hr_qp->sq.wrid);
841 static int set_qp_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
842 struct ib_qp_init_attr *init_attr,
843 struct ib_udata *udata,
844 struct hns_roce_ib_create_qp *ucmd)
846 struct ib_device *ibdev = &hr_dev->ib_dev;
849 hr_qp->ibqp.qp_type = init_attr->qp_type;
851 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
852 hr_qp->sq_signal_bits = IB_SIGNAL_ALL_WR;
854 hr_qp->sq_signal_bits = IB_SIGNAL_REQ_WR;
856 ret = set_rq_size(hr_dev, &init_attr->cap, hr_qp,
857 hns_roce_qp_has_rq(init_attr));
859 ibdev_err(ibdev, "failed to set user RQ size, ret = %d.\n",
865 if (ib_copy_from_udata(ucmd, udata, sizeof(*ucmd))) {
866 ibdev_err(ibdev, "Failed to copy QP ucmd\n");
870 ret = set_user_sq_size(hr_dev, &init_attr->cap, hr_qp, ucmd);
872 ibdev_err(ibdev, "Failed to set user SQ size\n");
874 if (init_attr->create_flags &
875 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
876 ibdev_err(ibdev, "Failed to check multicast loopback\n");
880 if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) {
881 ibdev_err(ibdev, "Failed to check ipoib ud lso\n");
885 ret = set_kernel_sq_size(hr_dev, &init_attr->cap, hr_qp);
887 ibdev_err(ibdev, "Failed to set kernel SQ size\n");
893 static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
895 struct ib_qp_init_attr *init_attr,
896 struct ib_udata *udata,
897 struct hns_roce_qp *hr_qp)
899 struct hns_roce_ib_create_qp_resp resp = {};
900 struct ib_device *ibdev = &hr_dev->ib_dev;
901 struct hns_roce_ib_create_qp ucmd;
904 mutex_init(&hr_qp->mutex);
905 spin_lock_init(&hr_qp->sq.lock);
906 spin_lock_init(&hr_qp->rq.lock);
908 hr_qp->state = IB_QPS_RESET;
909 hr_qp->flush_flag = 0;
911 ret = set_qp_param(hr_dev, hr_qp, init_attr, udata, &ucmd);
913 ibdev_err(ibdev, "Failed to set QP param\n");
918 ret = alloc_kernel_wrid(hr_dev, hr_qp);
920 ibdev_err(ibdev, "Failed to alloc wrid\n");
925 ret = alloc_qp_db(hr_dev, hr_qp, init_attr, udata, &ucmd, &resp);
927 ibdev_err(ibdev, "Failed to alloc QP doorbell\n");
931 ret = alloc_qp_buf(hr_dev, hr_qp, init_attr, udata, ucmd.buf_addr);
933 ibdev_err(ibdev, "Failed to alloc QP buffer\n");
937 ret = alloc_qpn(hr_dev, hr_qp);
939 ibdev_err(ibdev, "Failed to alloc QPN\n");
943 ret = alloc_qpc(hr_dev, hr_qp);
945 ibdev_err(ibdev, "Failed to alloc QP context\n");
949 ret = hns_roce_qp_store(hr_dev, hr_qp, init_attr);
951 ibdev_err(ibdev, "Failed to store QP\n");
956 ret = ib_copy_to_udata(udata, &resp,
957 min(udata->outlen, sizeof(resp)));
959 ibdev_err(ibdev, "copy qp resp failed!\n");
964 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) {
965 ret = hr_dev->hw->qp_flow_control_init(hr_dev, hr_qp);
970 hr_qp->ibqp.qp_num = hr_qp->qpn;
971 hr_qp->event = hns_roce_ib_qp_event;
972 atomic_set(&hr_qp->refcount, 1);
973 init_completion(&hr_qp->free);
978 hns_roce_qp_remove(hr_dev, hr_qp);
980 free_qpc(hr_dev, hr_qp);
982 free_qpn(hr_dev, hr_qp);
984 free_qp_buf(hr_dev, hr_qp);
986 free_qp_db(hr_dev, hr_qp, udata);
988 free_kernel_wrid(hr_qp);
992 void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
993 struct ib_udata *udata)
995 if (atomic_dec_and_test(&hr_qp->refcount))
996 complete(&hr_qp->free);
997 wait_for_completion(&hr_qp->free);
999 free_qpc(hr_dev, hr_qp);
1000 free_qpn(hr_dev, hr_qp);
1001 free_qp_buf(hr_dev, hr_qp);
1002 free_kernel_wrid(hr_qp);
1003 free_qp_db(hr_dev, hr_qp, udata);
1008 struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
1009 struct ib_qp_init_attr *init_attr,
1010 struct ib_udata *udata)
1012 struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
1013 struct ib_device *ibdev = &hr_dev->ib_dev;
1014 struct hns_roce_qp *hr_qp;
1017 switch (init_attr->qp_type) {
1019 hr_qp = kzalloc(sizeof(*hr_qp), GFP_KERNEL);
1021 return ERR_PTR(-ENOMEM);
1023 ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata,
1026 ibdev_err(ibdev, "Create QP 0x%06lx failed(%d)\n",
1029 return ERR_PTR(ret);
1035 /* Userspace is not allowed to create special QPs: */
1037 ibdev_err(ibdev, "not support usr space GSI\n");
1038 return ERR_PTR(-EINVAL);
1041 hr_qp = kzalloc(sizeof(*hr_qp), GFP_KERNEL);
1043 return ERR_PTR(-ENOMEM);
1045 hr_qp->port = init_attr->port_num - 1;
1046 hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
1048 ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata,
1051 ibdev_err(ibdev, "Create GSI QP failed!\n");
1053 return ERR_PTR(ret);
1059 ibdev_err(ibdev, "not support QP type %d\n",
1060 init_attr->qp_type);
1061 return ERR_PTR(-EOPNOTSUPP);
1065 return &hr_qp->ibqp;
1068 int to_hr_qp_type(int qp_type)
1072 if (qp_type == IB_QPT_RC)
1073 transport_type = SERV_TYPE_RC;
1074 else if (qp_type == IB_QPT_UC)
1075 transport_type = SERV_TYPE_UC;
1076 else if (qp_type == IB_QPT_UD)
1077 transport_type = SERV_TYPE_UD;
1078 else if (qp_type == IB_QPT_GSI)
1079 transport_type = SERV_TYPE_UD;
1081 transport_type = -1;
1083 return transport_type;
1086 static int check_mtu_validate(struct hns_roce_dev *hr_dev,
1087 struct hns_roce_qp *hr_qp,
1088 struct ib_qp_attr *attr, int attr_mask)
1090 enum ib_mtu active_mtu;
1093 p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
1094 active_mtu = iboe_get_mtu(hr_dev->iboe.netdevs[p]->mtu);
1096 if ((hr_dev->caps.max_mtu >= IB_MTU_2048 &&
1097 attr->path_mtu > hr_dev->caps.max_mtu) ||
1098 attr->path_mtu < IB_MTU_256 || attr->path_mtu > active_mtu) {
1099 ibdev_err(&hr_dev->ib_dev,
1100 "attr path_mtu(%d)invalid while modify qp",
1108 static int hns_roce_check_qp_attr(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1111 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
1112 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
1115 if ((attr_mask & IB_QP_PORT) &&
1116 (attr->port_num == 0 || attr->port_num > hr_dev->caps.num_ports)) {
1117 ibdev_err(&hr_dev->ib_dev,
1118 "attr port_num invalid.attr->port_num=%d\n",
1123 if (attr_mask & IB_QP_PKEY_INDEX) {
1124 p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
1125 if (attr->pkey_index >= hr_dev->caps.pkey_table_len[p]) {
1126 ibdev_err(&hr_dev->ib_dev,
1127 "attr pkey_index invalid.attr->pkey_index=%d\n",
1133 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
1134 attr->max_rd_atomic > hr_dev->caps.max_qp_init_rdma) {
1135 ibdev_err(&hr_dev->ib_dev,
1136 "attr max_rd_atomic invalid.attr->max_rd_atomic=%d\n",
1137 attr->max_rd_atomic);
1141 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
1142 attr->max_dest_rd_atomic > hr_dev->caps.max_qp_dest_rdma) {
1143 ibdev_err(&hr_dev->ib_dev,
1144 "attr max_dest_rd_atomic invalid.attr->max_dest_rd_atomic=%d\n",
1145 attr->max_dest_rd_atomic);
1149 if (attr_mask & IB_QP_PATH_MTU)
1150 return check_mtu_validate(hr_dev, hr_qp, attr, attr_mask);
1155 int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1156 int attr_mask, struct ib_udata *udata)
1158 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
1159 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
1160 enum ib_qp_state cur_state, new_state;
1163 mutex_lock(&hr_qp->mutex);
1165 cur_state = attr_mask & IB_QP_CUR_STATE ?
1166 attr->cur_qp_state : (enum ib_qp_state)hr_qp->state;
1167 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
1169 if (ibqp->uobject &&
1170 (attr_mask & IB_QP_STATE) && new_state == IB_QPS_ERR) {
1171 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB) {
1172 hr_qp->sq.head = *(int *)(hr_qp->sdb.virt_addr);
1174 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)
1175 hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr);
1177 ibdev_warn(&hr_dev->ib_dev,
1178 "flush cqe is not supported in userspace!\n");
1183 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
1185 ibdev_err(&hr_dev->ib_dev, "ib_modify_qp_is_ok failed\n");
1189 ret = hns_roce_check_qp_attr(ibqp, attr, attr_mask);
1193 if (cur_state == new_state && cur_state == IB_QPS_RESET) {
1194 if (hr_dev->hw_rev == HNS_ROCE_HW_VER1) {
1196 ibdev_err(&hr_dev->ib_dev,
1197 "RST2RST state is not supported\n");
1205 ret = hr_dev->hw->modify_qp(ibqp, attr, attr_mask, cur_state,
1209 mutex_unlock(&hr_qp->mutex);
1214 void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq)
1215 __acquires(&send_cq->lock) __acquires(&recv_cq->lock)
1217 if (unlikely(send_cq == NULL && recv_cq == NULL)) {
1218 __acquire(&send_cq->lock);
1219 __acquire(&recv_cq->lock);
1220 } else if (unlikely(send_cq != NULL && recv_cq == NULL)) {
1221 spin_lock_irq(&send_cq->lock);
1222 __acquire(&recv_cq->lock);
1223 } else if (unlikely(send_cq == NULL && recv_cq != NULL)) {
1224 spin_lock_irq(&recv_cq->lock);
1225 __acquire(&send_cq->lock);
1226 } else if (send_cq == recv_cq) {
1227 spin_lock_irq(&send_cq->lock);
1228 __acquire(&recv_cq->lock);
1229 } else if (send_cq->cqn < recv_cq->cqn) {
1230 spin_lock_irq(&send_cq->lock);
1231 spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
1233 spin_lock_irq(&recv_cq->lock);
1234 spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
1238 void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq,
1239 struct hns_roce_cq *recv_cq) __releases(&send_cq->lock)
1240 __releases(&recv_cq->lock)
1242 if (unlikely(send_cq == NULL && recv_cq == NULL)) {
1243 __release(&recv_cq->lock);
1244 __release(&send_cq->lock);
1245 } else if (unlikely(send_cq != NULL && recv_cq == NULL)) {
1246 __release(&recv_cq->lock);
1247 spin_unlock(&send_cq->lock);
1248 } else if (unlikely(send_cq == NULL && recv_cq != NULL)) {
1249 __release(&send_cq->lock);
1250 spin_unlock(&recv_cq->lock);
1251 } else if (send_cq == recv_cq) {
1252 __release(&recv_cq->lock);
1253 spin_unlock_irq(&send_cq->lock);
1254 } else if (send_cq->cqn < recv_cq->cqn) {
1255 spin_unlock(&recv_cq->lock);
1256 spin_unlock_irq(&send_cq->lock);
1258 spin_unlock(&send_cq->lock);
1259 spin_unlock_irq(&recv_cq->lock);
1263 static inline void *get_wqe(struct hns_roce_qp *hr_qp, int offset)
1265 return hns_roce_buf_offset(hr_qp->mtr.kmem, offset);
1268 void *hns_roce_get_recv_wqe(struct hns_roce_qp *hr_qp, int n)
1270 return get_wqe(hr_qp, hr_qp->rq.offset + (n << hr_qp->rq.wqe_shift));
1273 void *hns_roce_get_send_wqe(struct hns_roce_qp *hr_qp, int n)
1275 return get_wqe(hr_qp, hr_qp->sq.offset + (n << hr_qp->sq.wqe_shift));
1278 void *hns_roce_get_extend_sge(struct hns_roce_qp *hr_qp, int n)
1280 return get_wqe(hr_qp, hr_qp->sge.offset + (n << hr_qp->sge.sge_shift));
1283 bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq,
1284 struct ib_cq *ib_cq)
1286 struct hns_roce_cq *hr_cq;
1289 cur = hr_wq->head - hr_wq->tail;
1290 if (likely(cur + nreq < hr_wq->wqe_cnt))
1293 hr_cq = to_hr_cq(ib_cq);
1294 spin_lock(&hr_cq->lock);
1295 cur = hr_wq->head - hr_wq->tail;
1296 spin_unlock(&hr_cq->lock);
1298 return cur + nreq >= hr_wq->wqe_cnt;
1301 int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
1303 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
1304 int reserved_from_top = 0;
1305 int reserved_from_bot;
1308 mutex_init(&qp_table->scc_mutex);
1309 xa_init(&hr_dev->qp_table_xa);
1311 reserved_from_bot = hr_dev->caps.reserved_qps;
1313 ret = hns_roce_bitmap_init(&qp_table->bitmap, hr_dev->caps.num_qps,
1314 hr_dev->caps.num_qps - 1, reserved_from_bot,
1317 dev_err(hr_dev->dev, "qp bitmap init failed!error=%d\n",
1325 void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev)
1327 hns_roce_bitmap_cleanup(&hr_dev->qp_table.bitmap);