2 * Copyright (c) 2016-2017 Hisilicon Limited.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/acpi.h>
34 #include <linux/etherdevice.h>
35 #include <linux/interrupt.h>
36 #include <linux/iopoll.h>
37 #include <linux/kernel.h>
38 #include <linux/types.h>
39 #include <net/addrconf.h>
40 #include <rdma/ib_addr.h>
41 #include <rdma/ib_cache.h>
42 #include <rdma/ib_umem.h>
43 #include <rdma/uverbs_ioctl.h>
46 #include "hns_roce_common.h"
47 #include "hns_roce_device.h"
48 #include "hns_roce_cmd.h"
49 #include "hns_roce_hem.h"
50 #include "hns_roce_hw_v2.h"
58 static inline void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg *dseg,
61 dseg->lkey = cpu_to_le32(sg->lkey);
62 dseg->addr = cpu_to_le64(sg->addr);
63 dseg->len = cpu_to_le32(sg->length);
67 * mapped-value = 1 + real-value
68 * The hns wr opcode real value is start from 0, In order to distinguish between
69 * initialized and uninitialized map values, we plus 1 to the actual value when
70 * defining the mapping, so that the validity can be identified by checking the
71 * mapped value is greater than 0.
73 #define HR_OPC_MAP(ib_key, hr_key) \
74 [IB_WR_ ## ib_key] = 1 + HNS_ROCE_V2_WQE_OP_ ## hr_key
76 static const u32 hns_roce_op_code[] = {
77 HR_OPC_MAP(RDMA_WRITE, RDMA_WRITE),
78 HR_OPC_MAP(RDMA_WRITE_WITH_IMM, RDMA_WRITE_WITH_IMM),
79 HR_OPC_MAP(SEND, SEND),
80 HR_OPC_MAP(SEND_WITH_IMM, SEND_WITH_IMM),
81 HR_OPC_MAP(RDMA_READ, RDMA_READ),
82 HR_OPC_MAP(ATOMIC_CMP_AND_SWP, ATOM_CMP_AND_SWAP),
83 HR_OPC_MAP(ATOMIC_FETCH_AND_ADD, ATOM_FETCH_AND_ADD),
84 HR_OPC_MAP(SEND_WITH_INV, SEND_WITH_INV),
85 HR_OPC_MAP(LOCAL_INV, LOCAL_INV),
86 HR_OPC_MAP(MASKED_ATOMIC_CMP_AND_SWP, ATOM_MSK_CMP_AND_SWAP),
87 HR_OPC_MAP(MASKED_ATOMIC_FETCH_AND_ADD, ATOM_MSK_FETCH_AND_ADD),
88 HR_OPC_MAP(REG_MR, FAST_REG_PMR),
91 static u32 to_hr_opcode(u32 ib_opcode)
93 if (ib_opcode >= ARRAY_SIZE(hns_roce_op_code))
94 return HNS_ROCE_V2_WQE_OP_MASK;
96 return hns_roce_op_code[ib_opcode] ? hns_roce_op_code[ib_opcode] - 1 :
97 HNS_ROCE_V2_WQE_OP_MASK;
100 static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
101 const struct ib_reg_wr *wr)
103 struct hns_roce_wqe_frmr_seg *fseg =
104 (void *)rc_sq_wqe + sizeof(struct hns_roce_v2_rc_send_wqe);
105 struct hns_roce_mr *mr = to_hr_mr(wr->mr);
108 /* use ib_access_flags */
109 hr_reg_write_bool(fseg, FRMR_BIND_EN, wr->access & IB_ACCESS_MW_BIND);
110 hr_reg_write_bool(fseg, FRMR_ATOMIC,
111 wr->access & IB_ACCESS_REMOTE_ATOMIC);
112 hr_reg_write_bool(fseg, FRMR_RR, wr->access & IB_ACCESS_REMOTE_READ);
113 hr_reg_write_bool(fseg, FRMR_RW, wr->access & IB_ACCESS_REMOTE_WRITE);
114 hr_reg_write_bool(fseg, FRMR_LW, wr->access & IB_ACCESS_LOCAL_WRITE);
116 /* Data structure reuse may lead to confusion */
117 pbl_ba = mr->pbl_mtr.hem_cfg.root_ba;
118 rc_sq_wqe->msg_len = cpu_to_le32(lower_32_bits(pbl_ba));
119 rc_sq_wqe->inv_key = cpu_to_le32(upper_32_bits(pbl_ba));
121 rc_sq_wqe->byte_16 = cpu_to_le32(wr->mr->length & 0xffffffff);
122 rc_sq_wqe->byte_20 = cpu_to_le32(wr->mr->length >> 32);
123 rc_sq_wqe->rkey = cpu_to_le32(wr->key);
124 rc_sq_wqe->va = cpu_to_le64(wr->mr->iova);
126 hr_reg_write(fseg, FRMR_PBL_SIZE, mr->npages);
127 hr_reg_write(fseg, FRMR_PBL_BUF_PG_SZ,
128 to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift));
129 hr_reg_clear(fseg, FRMR_BLK_MODE);
132 static void set_atomic_seg(const struct ib_send_wr *wr,
133 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
134 unsigned int valid_num_sge)
136 struct hns_roce_v2_wqe_data_seg *dseg =
137 (void *)rc_sq_wqe + sizeof(struct hns_roce_v2_rc_send_wqe);
138 struct hns_roce_wqe_atomic_seg *aseg =
139 (void *)dseg + sizeof(struct hns_roce_v2_wqe_data_seg);
141 set_data_seg_v2(dseg, wr->sg_list);
143 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
144 aseg->fetchadd_swap_data = cpu_to_le64(atomic_wr(wr)->swap);
145 aseg->cmp_data = cpu_to_le64(atomic_wr(wr)->compare_add);
147 aseg->fetchadd_swap_data =
148 cpu_to_le64(atomic_wr(wr)->compare_add);
152 roce_set_field(rc_sq_wqe->byte_16, V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
153 V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S, valid_num_sge);
156 static int fill_ext_sge_inl_data(struct hns_roce_qp *qp,
157 const struct ib_send_wr *wr,
158 unsigned int *sge_idx, u32 msg_len)
160 struct ib_device *ibdev = &(to_hr_dev(qp->ibqp.device))->ib_dev;
161 unsigned int dseg_len = sizeof(struct hns_roce_v2_wqe_data_seg);
162 unsigned int ext_sge_sz = qp->sq.max_gs * dseg_len;
163 unsigned int left_len_in_pg;
164 unsigned int idx = *sge_idx;
170 if (msg_len > ext_sge_sz) {
172 "no enough extended sge space for inline data.\n");
176 dseg = hns_roce_get_extend_sge(qp, idx & (qp->sge.sge_cnt - 1));
177 left_len_in_pg = hr_hw_page_align((uintptr_t)dseg) - (uintptr_t)dseg;
178 len = wr->sg_list[0].length;
179 addr = (void *)(unsigned long)(wr->sg_list[0].addr);
181 /* When copying data to extended sge space, the left length in page may
182 * not long enough for current user's sge. So the data should be
183 * splited into several parts, one in the first page, and the others in
184 * the subsequent pages.
187 if (len <= left_len_in_pg) {
188 memcpy(dseg, addr, len);
190 idx += len / dseg_len;
193 if (i >= wr->num_sge)
196 left_len_in_pg -= len;
197 len = wr->sg_list[i].length;
198 addr = (void *)(unsigned long)(wr->sg_list[i].addr);
201 memcpy(dseg, addr, left_len_in_pg);
203 len -= left_len_in_pg;
204 addr += left_len_in_pg;
205 idx += left_len_in_pg / dseg_len;
206 dseg = hns_roce_get_extend_sge(qp,
207 idx & (qp->sge.sge_cnt - 1));
208 left_len_in_pg = 1 << HNS_HW_PAGE_SHIFT;
217 static void set_extend_sge(struct hns_roce_qp *qp, struct ib_sge *sge,
218 unsigned int *sge_ind, unsigned int cnt)
220 struct hns_roce_v2_wqe_data_seg *dseg;
221 unsigned int idx = *sge_ind;
224 dseg = hns_roce_get_extend_sge(qp, idx & (qp->sge.sge_cnt - 1));
225 if (likely(sge->length)) {
226 set_data_seg_v2(dseg, sge);
236 static bool check_inl_data_len(struct hns_roce_qp *qp, unsigned int len)
238 struct hns_roce_dev *hr_dev = to_hr_dev(qp->ibqp.device);
239 int mtu = ib_mtu_enum_to_int(qp->path_mtu);
241 if (len > qp->max_inline_data || len > mtu) {
242 ibdev_err(&hr_dev->ib_dev,
243 "invalid length of data, data len = %u, max inline len = %u, path mtu = %d.\n",
244 len, qp->max_inline_data, mtu);
251 static int set_rc_inl(struct hns_roce_qp *qp, const struct ib_send_wr *wr,
252 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
253 unsigned int *sge_idx)
255 struct hns_roce_dev *hr_dev = to_hr_dev(qp->ibqp.device);
256 u32 msg_len = le32_to_cpu(rc_sq_wqe->msg_len);
257 struct ib_device *ibdev = &hr_dev->ib_dev;
258 unsigned int curr_idx = *sge_idx;
259 void *dseg = rc_sq_wqe;
263 if (unlikely(wr->opcode == IB_WR_RDMA_READ)) {
264 ibdev_err(ibdev, "invalid inline parameters!\n");
268 if (!check_inl_data_len(qp, msg_len))
271 dseg += sizeof(struct hns_roce_v2_rc_send_wqe);
273 if (msg_len <= HNS_ROCE_V2_MAX_RC_INL_INN_SZ) {
274 roce_set_bit(rc_sq_wqe->byte_20,
275 V2_RC_SEND_WQE_BYTE_20_INL_TYPE_S, 0);
277 for (i = 0; i < wr->num_sge; i++) {
278 memcpy(dseg, ((void *)wr->sg_list[i].addr),
279 wr->sg_list[i].length);
280 dseg += wr->sg_list[i].length;
283 roce_set_bit(rc_sq_wqe->byte_20,
284 V2_RC_SEND_WQE_BYTE_20_INL_TYPE_S, 1);
286 ret = fill_ext_sge_inl_data(qp, wr, &curr_idx, msg_len);
290 roce_set_field(rc_sq_wqe->byte_16,
291 V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
292 V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S,
293 curr_idx - *sge_idx);
301 static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
302 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
303 unsigned int *sge_ind,
304 unsigned int valid_num_sge)
306 struct hns_roce_v2_wqe_data_seg *dseg =
307 (void *)rc_sq_wqe + sizeof(struct hns_roce_v2_rc_send_wqe);
308 struct hns_roce_qp *qp = to_hr_qp(ibqp);
312 roce_set_field(rc_sq_wqe->byte_20,
313 V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M,
314 V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
315 (*sge_ind) & (qp->sge.sge_cnt - 1));
317 roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_INLINE_S,
318 !!(wr->send_flags & IB_SEND_INLINE));
319 if (wr->send_flags & IB_SEND_INLINE)
320 return set_rc_inl(qp, wr, rc_sq_wqe, sge_ind);
322 if (valid_num_sge <= HNS_ROCE_SGE_IN_WQE) {
323 for (i = 0; i < wr->num_sge; i++) {
324 if (likely(wr->sg_list[i].length)) {
325 set_data_seg_v2(dseg, wr->sg_list + i);
330 for (i = 0; i < wr->num_sge && j < HNS_ROCE_SGE_IN_WQE; i++) {
331 if (likely(wr->sg_list[i].length)) {
332 set_data_seg_v2(dseg, wr->sg_list + i);
338 set_extend_sge(qp, wr->sg_list + i, sge_ind,
339 valid_num_sge - HNS_ROCE_SGE_IN_WQE);
342 roce_set_field(rc_sq_wqe->byte_16,
343 V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
344 V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S, valid_num_sge);
349 static int check_send_valid(struct hns_roce_dev *hr_dev,
350 struct hns_roce_qp *hr_qp)
352 struct ib_device *ibdev = &hr_dev->ib_dev;
353 struct ib_qp *ibqp = &hr_qp->ibqp;
355 if (unlikely(ibqp->qp_type != IB_QPT_RC &&
356 ibqp->qp_type != IB_QPT_GSI &&
357 ibqp->qp_type != IB_QPT_UD)) {
358 ibdev_err(ibdev, "Not supported QP(0x%x)type!\n",
361 } else if (unlikely(hr_qp->state == IB_QPS_RESET ||
362 hr_qp->state == IB_QPS_INIT ||
363 hr_qp->state == IB_QPS_RTR)) {
364 ibdev_err(ibdev, "failed to post WQE, QP state %u!\n",
367 } else if (unlikely(hr_dev->state >= HNS_ROCE_DEVICE_STATE_RST_DOWN)) {
368 ibdev_err(ibdev, "failed to post WQE, dev state %d!\n",
376 static unsigned int calc_wr_sge_num(const struct ib_send_wr *wr,
377 unsigned int *sge_len)
379 unsigned int valid_num = 0;
380 unsigned int len = 0;
383 for (i = 0; i < wr->num_sge; i++) {
384 if (likely(wr->sg_list[i].length)) {
385 len += wr->sg_list[i].length;
394 static __le32 get_immtdata(const struct ib_send_wr *wr)
396 switch (wr->opcode) {
397 case IB_WR_SEND_WITH_IMM:
398 case IB_WR_RDMA_WRITE_WITH_IMM:
399 return cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
405 static int set_ud_opcode(struct hns_roce_v2_ud_send_wqe *ud_sq_wqe,
406 const struct ib_send_wr *wr)
408 u32 ib_op = wr->opcode;
410 if (ib_op != IB_WR_SEND && ib_op != IB_WR_SEND_WITH_IMM)
413 ud_sq_wqe->immtdata = get_immtdata(wr);
415 roce_set_field(ud_sq_wqe->byte_4, V2_UD_SEND_WQE_BYTE_4_OPCODE_M,
416 V2_UD_SEND_WQE_BYTE_4_OPCODE_S, to_hr_opcode(ib_op));
421 static int fill_ud_av(struct hns_roce_v2_ud_send_wqe *ud_sq_wqe,
422 struct hns_roce_ah *ah)
424 struct ib_device *ib_dev = ah->ibah.device;
425 struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
427 roce_set_field(ud_sq_wqe->byte_24, V2_UD_SEND_WQE_BYTE_24_UDPSPN_M,
428 V2_UD_SEND_WQE_BYTE_24_UDPSPN_S, ah->av.udp_sport);
430 roce_set_field(ud_sq_wqe->byte_36, V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_M,
431 V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_S, ah->av.hop_limit);
432 roce_set_field(ud_sq_wqe->byte_36, V2_UD_SEND_WQE_BYTE_36_TCLASS_M,
433 V2_UD_SEND_WQE_BYTE_36_TCLASS_S, ah->av.tclass);
434 roce_set_field(ud_sq_wqe->byte_40, V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_M,
435 V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_S, ah->av.flowlabel);
437 if (WARN_ON(ah->av.sl > MAX_SERVICE_LEVEL))
440 roce_set_field(ud_sq_wqe->byte_40, V2_UD_SEND_WQE_BYTE_40_SL_M,
441 V2_UD_SEND_WQE_BYTE_40_SL_S, ah->av.sl);
443 ud_sq_wqe->sgid_index = ah->av.gid_index;
445 memcpy(ud_sq_wqe->dmac, ah->av.mac, ETH_ALEN);
446 memcpy(ud_sq_wqe->dgid, ah->av.dgid, GID_LEN_V2);
448 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
451 roce_set_bit(ud_sq_wqe->byte_40, V2_UD_SEND_WQE_BYTE_40_UD_VLAN_EN_S,
453 roce_set_field(ud_sq_wqe->byte_36, V2_UD_SEND_WQE_BYTE_36_VLAN_M,
454 V2_UD_SEND_WQE_BYTE_36_VLAN_S, ah->av.vlan_id);
459 static inline int set_ud_wqe(struct hns_roce_qp *qp,
460 const struct ib_send_wr *wr,
461 void *wqe, unsigned int *sge_idx,
462 unsigned int owner_bit)
464 struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah);
465 struct hns_roce_v2_ud_send_wqe *ud_sq_wqe = wqe;
466 unsigned int curr_idx = *sge_idx;
467 unsigned int valid_num_sge;
471 valid_num_sge = calc_wr_sge_num(wr, &msg_len);
473 ret = set_ud_opcode(ud_sq_wqe, wr);
477 ud_sq_wqe->msg_len = cpu_to_le32(msg_len);
479 roce_set_bit(ud_sq_wqe->byte_4, V2_UD_SEND_WQE_BYTE_4_CQE_S,
480 !!(wr->send_flags & IB_SEND_SIGNALED));
482 roce_set_bit(ud_sq_wqe->byte_4, V2_UD_SEND_WQE_BYTE_4_SE_S,
483 !!(wr->send_flags & IB_SEND_SOLICITED));
485 roce_set_field(ud_sq_wqe->byte_16, V2_UD_SEND_WQE_BYTE_16_PD_M,
486 V2_UD_SEND_WQE_BYTE_16_PD_S, to_hr_pd(qp->ibqp.pd)->pdn);
488 roce_set_field(ud_sq_wqe->byte_16, V2_UD_SEND_WQE_BYTE_16_SGE_NUM_M,
489 V2_UD_SEND_WQE_BYTE_16_SGE_NUM_S, valid_num_sge);
491 roce_set_field(ud_sq_wqe->byte_20,
492 V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M,
493 V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
494 curr_idx & (qp->sge.sge_cnt - 1));
496 ud_sq_wqe->qkey = cpu_to_le32(ud_wr(wr)->remote_qkey & 0x80000000 ?
497 qp->qkey : ud_wr(wr)->remote_qkey);
498 roce_set_field(ud_sq_wqe->byte_32, V2_UD_SEND_WQE_BYTE_32_DQPN_M,
499 V2_UD_SEND_WQE_BYTE_32_DQPN_S, ud_wr(wr)->remote_qpn);
501 ret = fill_ud_av(ud_sq_wqe, ah);
505 qp->sl = to_hr_ah(ud_wr(wr)->ah)->av.sl;
507 set_extend_sge(qp, wr->sg_list, &curr_idx, valid_num_sge);
510 * The pipeline can sequentially post all valid WQEs into WQ buffer,
511 * including new WQEs waiting for the doorbell to update the PI again.
512 * Therefore, the owner bit of WQE MUST be updated after all fields
513 * and extSGEs have been written into DDR instead of cache.
515 if (qp->en_flags & HNS_ROCE_QP_CAP_OWNER_DB)
519 roce_set_bit(ud_sq_wqe->byte_4, V2_UD_SEND_WQE_BYTE_4_OWNER_S,
525 static int set_rc_opcode(struct hns_roce_dev *hr_dev,
526 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
527 const struct ib_send_wr *wr)
529 u32 ib_op = wr->opcode;
532 rc_sq_wqe->immtdata = get_immtdata(wr);
535 case IB_WR_RDMA_READ:
536 case IB_WR_RDMA_WRITE:
537 case IB_WR_RDMA_WRITE_WITH_IMM:
538 rc_sq_wqe->rkey = cpu_to_le32(rdma_wr(wr)->rkey);
539 rc_sq_wqe->va = cpu_to_le64(rdma_wr(wr)->remote_addr);
542 case IB_WR_SEND_WITH_IMM:
544 case IB_WR_ATOMIC_CMP_AND_SWP:
545 case IB_WR_ATOMIC_FETCH_AND_ADD:
546 rc_sq_wqe->rkey = cpu_to_le32(atomic_wr(wr)->rkey);
547 rc_sq_wqe->va = cpu_to_le64(atomic_wr(wr)->remote_addr);
550 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
551 set_frmr_seg(rc_sq_wqe, reg_wr(wr));
555 case IB_WR_LOCAL_INV:
556 roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_SO_S, 1);
558 case IB_WR_SEND_WITH_INV:
559 rc_sq_wqe->inv_key = cpu_to_le32(wr->ex.invalidate_rkey);
568 roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
569 V2_RC_SEND_WQE_BYTE_4_OPCODE_S, to_hr_opcode(ib_op));
573 static inline int set_rc_wqe(struct hns_roce_qp *qp,
574 const struct ib_send_wr *wr,
575 void *wqe, unsigned int *sge_idx,
576 unsigned int owner_bit)
578 struct hns_roce_dev *hr_dev = to_hr_dev(qp->ibqp.device);
579 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe = wqe;
580 unsigned int curr_idx = *sge_idx;
581 unsigned int valid_num_sge;
585 valid_num_sge = calc_wr_sge_num(wr, &msg_len);
587 rc_sq_wqe->msg_len = cpu_to_le32(msg_len);
589 ret = set_rc_opcode(hr_dev, rc_sq_wqe, wr);
593 roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_FENCE_S,
594 (wr->send_flags & IB_SEND_FENCE) ? 1 : 0);
596 roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_SE_S,
597 (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0);
599 roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_CQE_S,
600 (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
602 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
603 wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
604 set_atomic_seg(wr, rc_sq_wqe, valid_num_sge);
605 else if (wr->opcode != IB_WR_REG_MR)
606 ret = set_rwqe_data_seg(&qp->ibqp, wr, rc_sq_wqe,
607 &curr_idx, valid_num_sge);
610 * The pipeline can sequentially post all valid WQEs into WQ buffer,
611 * including new WQEs waiting for the doorbell to update the PI again.
612 * Therefore, the owner bit of WQE MUST be updated after all fields
613 * and extSGEs have been written into DDR instead of cache.
615 if (qp->en_flags & HNS_ROCE_QP_CAP_OWNER_DB)
619 roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_OWNER_S,
625 static inline void update_sq_db(struct hns_roce_dev *hr_dev,
626 struct hns_roce_qp *qp)
628 if (unlikely(qp->state == IB_QPS_ERR)) {
629 flush_cqe(hr_dev, qp);
631 struct hns_roce_v2_db sq_db = {};
633 hr_reg_write(&sq_db, DB_TAG, qp->doorbell_qpn);
634 hr_reg_write(&sq_db, DB_CMD, HNS_ROCE_V2_SQ_DB);
635 hr_reg_write(&sq_db, DB_PI, qp->sq.head);
636 hr_reg_write(&sq_db, DB_SL, qp->sl);
638 hns_roce_write64(hr_dev, (__le32 *)&sq_db, qp->sq.db_reg);
642 static inline void update_rq_db(struct hns_roce_dev *hr_dev,
643 struct hns_roce_qp *qp)
645 if (unlikely(qp->state == IB_QPS_ERR)) {
646 flush_cqe(hr_dev, qp);
648 if (likely(qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)) {
650 qp->rq.head & V2_DB_PRODUCER_IDX_M;
652 struct hns_roce_v2_db rq_db = {};
654 hr_reg_write(&rq_db, DB_TAG, qp->qpn);
655 hr_reg_write(&rq_db, DB_CMD, HNS_ROCE_V2_RQ_DB);
656 hr_reg_write(&rq_db, DB_PI, qp->rq.head);
658 hns_roce_write64(hr_dev, (__le32 *)&rq_db,
664 static void hns_roce_write512(struct hns_roce_dev *hr_dev, u64 *val,
667 #define HNS_ROCE_WRITE_TIMES 8
668 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
669 struct hnae3_handle *handle = priv->handle;
670 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
673 if (!hr_dev->dis_db && !ops->get_hw_reset_stat(handle))
674 for (i = 0; i < HNS_ROCE_WRITE_TIMES; i++)
675 writeq_relaxed(*(val + i), dest + i);
678 static void write_dwqe(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
681 #define HNS_ROCE_SL_SHIFT 2
682 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe = wqe;
684 /* All kinds of DirectWQE have the same header field layout */
685 roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_FLAG_S, 1);
686 roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_DB_SL_L_M,
687 V2_RC_SEND_WQE_BYTE_4_DB_SL_L_S, qp->sl);
688 roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_DB_SL_H_M,
689 V2_RC_SEND_WQE_BYTE_4_DB_SL_H_S,
690 qp->sl >> HNS_ROCE_SL_SHIFT);
691 roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_WQE_INDEX_M,
692 V2_RC_SEND_WQE_BYTE_4_WQE_INDEX_S, qp->sq.head);
694 hns_roce_write512(hr_dev, wqe, qp->sq.db_reg);
697 static int hns_roce_v2_post_send(struct ib_qp *ibqp,
698 const struct ib_send_wr *wr,
699 const struct ib_send_wr **bad_wr)
701 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
702 struct ib_device *ibdev = &hr_dev->ib_dev;
703 struct hns_roce_qp *qp = to_hr_qp(ibqp);
704 unsigned long flags = 0;
705 unsigned int owner_bit;
706 unsigned int sge_idx;
707 unsigned int wqe_idx;
712 spin_lock_irqsave(&qp->sq.lock, flags);
714 ret = check_send_valid(hr_dev, qp);
721 sge_idx = qp->next_sge;
723 for (nreq = 0; wr; ++nreq, wr = wr->next) {
724 if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
730 wqe_idx = (qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1);
732 if (unlikely(wr->num_sge > qp->sq.max_gs)) {
733 ibdev_err(ibdev, "num_sge = %d > qp->sq.max_gs = %u.\n",
734 wr->num_sge, qp->sq.max_gs);
740 wqe = hns_roce_get_send_wqe(qp, wqe_idx);
741 qp->sq.wrid[wqe_idx] = wr->wr_id;
743 ~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1);
745 /* Corresponding to the QP type, wqe process separately */
746 if (ibqp->qp_type == IB_QPT_RC)
747 ret = set_rc_wqe(qp, wr, wqe, &sge_idx, owner_bit);
749 ret = set_ud_wqe(qp, wr, wqe, &sge_idx, owner_bit);
760 qp->next_sge = sge_idx;
762 if (nreq == 1 && (qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE))
763 write_dwqe(hr_dev, qp, wqe);
765 update_sq_db(hr_dev, qp);
768 spin_unlock_irqrestore(&qp->sq.lock, flags);
773 static int check_recv_valid(struct hns_roce_dev *hr_dev,
774 struct hns_roce_qp *hr_qp)
776 struct ib_device *ibdev = &hr_dev->ib_dev;
777 struct ib_qp *ibqp = &hr_qp->ibqp;
779 if (unlikely(ibqp->qp_type != IB_QPT_RC &&
780 ibqp->qp_type != IB_QPT_GSI &&
781 ibqp->qp_type != IB_QPT_UD)) {
782 ibdev_err(ibdev, "unsupported qp type, qp_type = %d.\n",
787 if (unlikely(hr_dev->state >= HNS_ROCE_DEVICE_STATE_RST_DOWN))
790 if (hr_qp->state == IB_QPS_RESET)
796 static void fill_recv_sge_to_wqe(const struct ib_recv_wr *wr, void *wqe,
797 u32 max_sge, bool rsv)
799 struct hns_roce_v2_wqe_data_seg *dseg = wqe;
802 for (i = 0, cnt = 0; i < wr->num_sge; i++) {
803 /* Skip zero-length sge */
804 if (!wr->sg_list[i].length)
806 set_data_seg_v2(dseg + cnt, wr->sg_list + i);
810 /* Fill a reserved sge to make hw stop reading remaining segments */
812 dseg[cnt].lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY);
814 dseg[cnt].len = cpu_to_le32(HNS_ROCE_INVALID_SGE_LENGTH);
816 /* Clear remaining segments to make ROCEE ignore sges */
818 memset(dseg + cnt, 0,
819 (max_sge - cnt) * HNS_ROCE_SGE_SIZE);
823 static void fill_rq_wqe(struct hns_roce_qp *hr_qp, const struct ib_recv_wr *wr,
824 u32 wqe_idx, u32 max_sge)
826 struct hns_roce_rinl_sge *sge_list;
830 wqe = hns_roce_get_recv_wqe(hr_qp, wqe_idx);
831 fill_recv_sge_to_wqe(wr, wqe, max_sge, hr_qp->rq.rsv_sge);
833 /* rq support inline data */
834 if (hr_qp->rq_inl_buf.wqe_cnt) {
835 sge_list = hr_qp->rq_inl_buf.wqe_list[wqe_idx].sg_list;
836 hr_qp->rq_inl_buf.wqe_list[wqe_idx].sge_cnt = (u32)wr->num_sge;
837 for (i = 0; i < wr->num_sge; i++) {
838 sge_list[i].addr = (void *)(u64)wr->sg_list[i].addr;
839 sge_list[i].len = wr->sg_list[i].length;
844 static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
845 const struct ib_recv_wr *wr,
846 const struct ib_recv_wr **bad_wr)
848 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
849 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
850 struct ib_device *ibdev = &hr_dev->ib_dev;
851 u32 wqe_idx, nreq, max_sge;
855 spin_lock_irqsave(&hr_qp->rq.lock, flags);
857 ret = check_recv_valid(hr_dev, hr_qp);
864 max_sge = hr_qp->rq.max_gs - hr_qp->rq.rsv_sge;
865 for (nreq = 0; wr; ++nreq, wr = wr->next) {
866 if (unlikely(hns_roce_wq_overflow(&hr_qp->rq, nreq,
867 hr_qp->ibqp.recv_cq))) {
873 if (unlikely(wr->num_sge > max_sge)) {
874 ibdev_err(ibdev, "num_sge = %d >= max_sge = %u.\n",
875 wr->num_sge, max_sge);
881 wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1);
882 fill_rq_wqe(hr_qp, wr, wqe_idx, max_sge);
883 hr_qp->rq.wrid[wqe_idx] = wr->wr_id;
888 hr_qp->rq.head += nreq;
890 update_rq_db(hr_dev, hr_qp);
892 spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
897 static void *get_srq_wqe_buf(struct hns_roce_srq *srq, u32 n)
899 return hns_roce_buf_offset(srq->buf_mtr.kmem, n << srq->wqe_shift);
902 static void *get_idx_buf(struct hns_roce_idx_que *idx_que, u32 n)
904 return hns_roce_buf_offset(idx_que->mtr.kmem,
905 n << idx_que->entry_shift);
908 static void hns_roce_free_srq_wqe(struct hns_roce_srq *srq, u32 wqe_index)
910 /* always called with interrupts disabled. */
911 spin_lock(&srq->lock);
913 bitmap_clear(srq->idx_que.bitmap, wqe_index, 1);
916 spin_unlock(&srq->lock);
919 static int hns_roce_srqwq_overflow(struct hns_roce_srq *srq)
921 struct hns_roce_idx_que *idx_que = &srq->idx_que;
923 return idx_que->head - idx_que->tail >= srq->wqe_cnt;
926 static int check_post_srq_valid(struct hns_roce_srq *srq, u32 max_sge,
927 const struct ib_recv_wr *wr)
929 struct ib_device *ib_dev = srq->ibsrq.device;
931 if (unlikely(wr->num_sge > max_sge)) {
933 "failed to check sge, wr->num_sge = %d, max_sge = %u.\n",
934 wr->num_sge, max_sge);
938 if (unlikely(hns_roce_srqwq_overflow(srq))) {
940 "failed to check srqwq status, srqwq is full.\n");
947 static int get_srq_wqe_idx(struct hns_roce_srq *srq, u32 *wqe_idx)
949 struct hns_roce_idx_que *idx_que = &srq->idx_que;
952 pos = find_first_zero_bit(idx_que->bitmap, srq->wqe_cnt);
953 if (unlikely(pos == srq->wqe_cnt))
956 bitmap_set(idx_que->bitmap, pos, 1);
961 static void fill_wqe_idx(struct hns_roce_srq *srq, unsigned int wqe_idx)
963 struct hns_roce_idx_que *idx_que = &srq->idx_que;
967 head = idx_que->head & (srq->wqe_cnt - 1);
969 buf = get_idx_buf(idx_que, head);
970 *buf = cpu_to_le32(wqe_idx);
975 static void update_srq_db(struct hns_roce_v2_db *db, struct hns_roce_srq *srq)
977 hr_reg_write(db, DB_TAG, srq->srqn);
978 hr_reg_write(db, DB_CMD, HNS_ROCE_V2_SRQ_DB);
979 hr_reg_write(db, DB_PI, srq->idx_que.head);
982 static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
983 const struct ib_recv_wr *wr,
984 const struct ib_recv_wr **bad_wr)
986 struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
987 struct hns_roce_srq *srq = to_hr_srq(ibsrq);
988 struct hns_roce_v2_db srq_db;
996 spin_lock_irqsave(&srq->lock, flags);
998 max_sge = srq->max_gs - srq->rsv_sge;
999 for (nreq = 0; wr; ++nreq, wr = wr->next) {
1000 ret = check_post_srq_valid(srq, max_sge, wr);
1006 ret = get_srq_wqe_idx(srq, &wqe_idx);
1007 if (unlikely(ret)) {
1012 wqe = get_srq_wqe_buf(srq, wqe_idx);
1013 fill_recv_sge_to_wqe(wr, wqe, max_sge, srq->rsv_sge);
1014 fill_wqe_idx(srq, wqe_idx);
1015 srq->wrid[wqe_idx] = wr->wr_id;
1019 update_srq_db(&srq_db, srq);
1021 hns_roce_write64(hr_dev, (__le32 *)&srq_db, srq->db_reg);
1024 spin_unlock_irqrestore(&srq->lock, flags);
1029 static u32 hns_roce_v2_cmd_hw_reseted(struct hns_roce_dev *hr_dev,
1030 unsigned long instance_stage,
1031 unsigned long reset_stage)
1033 /* When hardware reset has been completed once or more, we should stop
1034 * sending mailbox&cmq&doorbell to hardware. If now in .init_instance()
1035 * function, we should exit with error. If now at HNAE3_INIT_CLIENT
1036 * stage of soft reset process, we should exit with error, and then
1037 * HNAE3_INIT_CLIENT related process can rollback the operation like
1038 * notifing hardware to free resources, HNAE3_INIT_CLIENT related
1039 * process will exit with error to notify NIC driver to reschedule soft
1040 * reset process once again.
1042 hr_dev->is_reset = true;
1043 hr_dev->dis_db = true;
1045 if (reset_stage == HNS_ROCE_STATE_RST_INIT ||
1046 instance_stage == HNS_ROCE_STATE_INIT)
1047 return CMD_RST_PRC_EBUSY;
1049 return CMD_RST_PRC_SUCCESS;
1052 static u32 hns_roce_v2_cmd_hw_resetting(struct hns_roce_dev *hr_dev,
1053 unsigned long instance_stage,
1054 unsigned long reset_stage)
1056 #define HW_RESET_TIMEOUT_US 1000000
1057 #define HW_RESET_SLEEP_US 1000
1059 struct hns_roce_v2_priv *priv = hr_dev->priv;
1060 struct hnae3_handle *handle = priv->handle;
1061 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1065 /* When hardware reset is detected, we should stop sending mailbox&cmq&
1066 * doorbell to hardware. If now in .init_instance() function, we should
1067 * exit with error. If now at HNAE3_INIT_CLIENT stage of soft reset
1068 * process, we should exit with error, and then HNAE3_INIT_CLIENT
1069 * related process can rollback the operation like notifing hardware to
1070 * free resources, HNAE3_INIT_CLIENT related process will exit with
1071 * error to notify NIC driver to reschedule soft reset process once
1074 hr_dev->dis_db = true;
1076 ret = read_poll_timeout(ops->ae_dev_reset_cnt, val,
1077 val > hr_dev->reset_cnt, HW_RESET_SLEEP_US,
1078 HW_RESET_TIMEOUT_US, false, handle);
1080 hr_dev->is_reset = true;
1082 if (!hr_dev->is_reset || reset_stage == HNS_ROCE_STATE_RST_INIT ||
1083 instance_stage == HNS_ROCE_STATE_INIT)
1084 return CMD_RST_PRC_EBUSY;
1086 return CMD_RST_PRC_SUCCESS;
1089 static u32 hns_roce_v2_cmd_sw_resetting(struct hns_roce_dev *hr_dev)
1091 struct hns_roce_v2_priv *priv = hr_dev->priv;
1092 struct hnae3_handle *handle = priv->handle;
1093 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1095 /* When software reset is detected at .init_instance() function, we
1096 * should stop sending mailbox&cmq&doorbell to hardware, and exit
1099 hr_dev->dis_db = true;
1100 if (ops->ae_dev_reset_cnt(handle) != hr_dev->reset_cnt)
1101 hr_dev->is_reset = true;
1103 return CMD_RST_PRC_EBUSY;
1106 static u32 check_aedev_reset_status(struct hns_roce_dev *hr_dev,
1107 struct hnae3_handle *handle)
1109 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1110 unsigned long instance_stage; /* the current instance stage */
1111 unsigned long reset_stage; /* the current reset stage */
1112 unsigned long reset_cnt;
1116 /* Get information about reset from NIC driver or RoCE driver itself,
1117 * the meaning of the following variables from NIC driver are described
1119 * reset_cnt -- The count value of completed hardware reset.
1120 * hw_resetting -- Whether hardware device is resetting now.
1121 * sw_resetting -- Whether NIC's software reset process is running now.
1123 instance_stage = handle->rinfo.instance_state;
1124 reset_stage = handle->rinfo.reset_state;
1125 reset_cnt = ops->ae_dev_reset_cnt(handle);
1126 if (reset_cnt != hr_dev->reset_cnt)
1127 return hns_roce_v2_cmd_hw_reseted(hr_dev, instance_stage,
1130 hw_resetting = ops->get_cmdq_stat(handle);
1132 return hns_roce_v2_cmd_hw_resetting(hr_dev, instance_stage,
1135 sw_resetting = ops->ae_dev_resetting(handle);
1136 if (sw_resetting && instance_stage == HNS_ROCE_STATE_INIT)
1137 return hns_roce_v2_cmd_sw_resetting(hr_dev);
1139 return CMD_RST_PRC_OTHERS;
1142 static bool check_device_is_in_reset(struct hns_roce_dev *hr_dev)
1144 struct hns_roce_v2_priv *priv = hr_dev->priv;
1145 struct hnae3_handle *handle = priv->handle;
1146 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1148 if (hr_dev->reset_cnt != ops->ae_dev_reset_cnt(handle))
1151 if (ops->get_hw_reset_stat(handle))
1154 if (ops->ae_dev_resetting(handle))
1160 static bool v2_chk_mbox_is_avail(struct hns_roce_dev *hr_dev, bool *busy)
1162 struct hns_roce_v2_priv *priv = hr_dev->priv;
1165 if (hr_dev->is_reset)
1166 status = CMD_RST_PRC_SUCCESS;
1168 status = check_aedev_reset_status(hr_dev, priv->handle);
1170 *busy = (status == CMD_RST_PRC_EBUSY);
1172 return status == CMD_RST_PRC_OTHERS;
1175 static int hns_roce_alloc_cmq_desc(struct hns_roce_dev *hr_dev,
1176 struct hns_roce_v2_cmq_ring *ring)
1178 int size = ring->desc_num * sizeof(struct hns_roce_cmq_desc);
1180 ring->desc = dma_alloc_coherent(hr_dev->dev, size,
1181 &ring->desc_dma_addr, GFP_KERNEL);
1188 static void hns_roce_free_cmq_desc(struct hns_roce_dev *hr_dev,
1189 struct hns_roce_v2_cmq_ring *ring)
1191 dma_free_coherent(hr_dev->dev,
1192 ring->desc_num * sizeof(struct hns_roce_cmq_desc),
1193 ring->desc, ring->desc_dma_addr);
1195 ring->desc_dma_addr = 0;
1198 static int init_csq(struct hns_roce_dev *hr_dev,
1199 struct hns_roce_v2_cmq_ring *csq)
1204 csq->desc_num = CMD_CSQ_DESC_NUM;
1205 spin_lock_init(&csq->lock);
1206 csq->flag = TYPE_CSQ;
1209 ret = hns_roce_alloc_cmq_desc(hr_dev, csq);
1213 dma = csq->desc_dma_addr;
1214 roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_L_REG, lower_32_bits(dma));
1215 roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_H_REG, upper_32_bits(dma));
1216 roce_write(hr_dev, ROCEE_TX_CMQ_DEPTH_REG,
1217 (u32)csq->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S);
1219 /* Make sure to write CI first and then PI */
1220 roce_write(hr_dev, ROCEE_TX_CMQ_CI_REG, 0);
1221 roce_write(hr_dev, ROCEE_TX_CMQ_PI_REG, 0);
1226 static int hns_roce_v2_cmq_init(struct hns_roce_dev *hr_dev)
1228 struct hns_roce_v2_priv *priv = hr_dev->priv;
1231 priv->cmq.tx_timeout = HNS_ROCE_CMQ_TX_TIMEOUT;
1233 ret = init_csq(hr_dev, &priv->cmq.csq);
1235 dev_err(hr_dev->dev, "failed to init CSQ, ret = %d.\n", ret);
1240 static void hns_roce_v2_cmq_exit(struct hns_roce_dev *hr_dev)
1242 struct hns_roce_v2_priv *priv = hr_dev->priv;
1244 hns_roce_free_cmq_desc(hr_dev, &priv->cmq.csq);
1247 static void hns_roce_cmq_setup_basic_desc(struct hns_roce_cmq_desc *desc,
1248 enum hns_roce_opcode_type opcode,
1251 memset((void *)desc, 0, sizeof(struct hns_roce_cmq_desc));
1252 desc->opcode = cpu_to_le16(opcode);
1253 desc->flag = cpu_to_le16(HNS_ROCE_CMD_FLAG_IN);
1255 desc->flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_WR);
1257 desc->flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR);
1260 static int hns_roce_cmq_csq_done(struct hns_roce_dev *hr_dev)
1262 u32 tail = roce_read(hr_dev, ROCEE_TX_CMQ_CI_REG);
1263 struct hns_roce_v2_priv *priv = hr_dev->priv;
1265 return tail == priv->cmq.csq.head;
1268 static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
1269 struct hns_roce_cmq_desc *desc, int num)
1271 struct hns_roce_v2_priv *priv = hr_dev->priv;
1272 struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
1279 spin_lock_bh(&csq->lock);
1283 for (i = 0; i < num; i++) {
1284 csq->desc[csq->head++] = desc[i];
1285 if (csq->head == csq->desc_num)
1289 /* Write to hardware */
1290 roce_write(hr_dev, ROCEE_TX_CMQ_PI_REG, csq->head);
1293 if (hns_roce_cmq_csq_done(hr_dev))
1296 } while (++timeout < priv->cmq.tx_timeout);
1298 if (hns_roce_cmq_csq_done(hr_dev)) {
1299 for (ret = 0, i = 0; i < num; i++) {
1300 /* check the result of hardware write back */
1301 desc[i] = csq->desc[tail++];
1302 if (tail == csq->desc_num)
1305 desc_ret = le16_to_cpu(desc[i].retval);
1306 if (likely(desc_ret == CMD_EXEC_SUCCESS))
1309 dev_err_ratelimited(hr_dev->dev,
1310 "Cmdq IO error, opcode = 0x%x, return = 0x%x.\n",
1311 desc->opcode, desc_ret);
1315 /* FW/HW reset or incorrect number of desc */
1316 tail = roce_read(hr_dev, ROCEE_TX_CMQ_CI_REG);
1317 dev_warn(hr_dev->dev, "CMDQ move tail from %u to %u.\n",
1324 spin_unlock_bh(&csq->lock);
1329 static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
1330 struct hns_roce_cmq_desc *desc, int num)
1335 if (!v2_chk_mbox_is_avail(hr_dev, &busy))
1336 return busy ? -EBUSY : 0;
1338 ret = __hns_roce_cmq_send(hr_dev, desc, num);
1340 if (!v2_chk_mbox_is_avail(hr_dev, &busy))
1341 return busy ? -EBUSY : 0;
1347 static int config_hem_ba_to_hw(struct hns_roce_dev *hr_dev, unsigned long obj,
1348 dma_addr_t base_addr, u16 op)
1350 struct hns_roce_cmd_mailbox *mbox = hns_roce_alloc_cmd_mailbox(hr_dev);
1354 return PTR_ERR(mbox);
1356 ret = hns_roce_cmd_mbox(hr_dev, base_addr, mbox->dma, obj, 0, op,
1357 HNS_ROCE_CMD_TIMEOUT_MSECS);
1358 hns_roce_free_cmd_mailbox(hr_dev, mbox);
1362 static int hns_roce_cmq_query_hw_info(struct hns_roce_dev *hr_dev)
1364 struct hns_roce_query_version *resp;
1365 struct hns_roce_cmq_desc desc;
1368 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_HW_VER, true);
1369 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1373 resp = (struct hns_roce_query_version *)desc.data;
1374 hr_dev->hw_rev = le16_to_cpu(resp->rocee_hw_version);
1375 hr_dev->vendor_id = hr_dev->pci_dev->vendor;
1380 static void func_clr_hw_resetting_state(struct hns_roce_dev *hr_dev,
1381 struct hnae3_handle *handle)
1383 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1386 hr_dev->dis_db = true;
1388 dev_warn(hr_dev->dev,
1389 "Func clear is pending, device in resetting state.\n");
1390 end = HNS_ROCE_V2_HW_RST_TIMEOUT;
1392 if (!ops->get_hw_reset_stat(handle)) {
1393 hr_dev->is_reset = true;
1394 dev_info(hr_dev->dev,
1395 "Func clear success after reset.\n");
1398 msleep(HNS_ROCE_V2_HW_RST_COMPLETION_WAIT);
1399 end -= HNS_ROCE_V2_HW_RST_COMPLETION_WAIT;
1402 dev_warn(hr_dev->dev, "Func clear failed.\n");
1405 static void func_clr_sw_resetting_state(struct hns_roce_dev *hr_dev,
1406 struct hnae3_handle *handle)
1408 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1411 hr_dev->dis_db = true;
1413 dev_warn(hr_dev->dev,
1414 "Func clear is pending, device in resetting state.\n");
1415 end = HNS_ROCE_V2_HW_RST_TIMEOUT;
1417 if (ops->ae_dev_reset_cnt(handle) !=
1418 hr_dev->reset_cnt) {
1419 hr_dev->is_reset = true;
1420 dev_info(hr_dev->dev,
1421 "Func clear success after sw reset\n");
1424 msleep(HNS_ROCE_V2_HW_RST_COMPLETION_WAIT);
1425 end -= HNS_ROCE_V2_HW_RST_COMPLETION_WAIT;
1428 dev_warn(hr_dev->dev, "Func clear failed because of unfinished sw reset\n");
1431 static void hns_roce_func_clr_rst_proc(struct hns_roce_dev *hr_dev, int retval,
1434 struct hns_roce_v2_priv *priv = hr_dev->priv;
1435 struct hnae3_handle *handle = priv->handle;
1436 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1438 if (ops->ae_dev_reset_cnt(handle) != hr_dev->reset_cnt) {
1439 hr_dev->dis_db = true;
1440 hr_dev->is_reset = true;
1441 dev_info(hr_dev->dev, "Func clear success after reset.\n");
1445 if (ops->get_hw_reset_stat(handle)) {
1446 func_clr_hw_resetting_state(hr_dev, handle);
1450 if (ops->ae_dev_resetting(handle) &&
1451 handle->rinfo.instance_state == HNS_ROCE_STATE_INIT) {
1452 func_clr_sw_resetting_state(hr_dev, handle);
1456 if (retval && !flag)
1457 dev_warn(hr_dev->dev,
1458 "Func clear read failed, ret = %d.\n", retval);
1460 dev_warn(hr_dev->dev, "Func clear failed.\n");
1463 static void __hns_roce_function_clear(struct hns_roce_dev *hr_dev, int vf_id)
1465 bool fclr_write_fail_flag = false;
1466 struct hns_roce_func_clear *resp;
1467 struct hns_roce_cmq_desc desc;
1471 if (check_device_is_in_reset(hr_dev))
1474 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_FUNC_CLEAR, false);
1475 resp = (struct hns_roce_func_clear *)desc.data;
1476 resp->rst_funcid_en = cpu_to_le32(vf_id);
1478 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1480 fclr_write_fail_flag = true;
1481 dev_err(hr_dev->dev, "Func clear write failed, ret = %d.\n",
1486 msleep(HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_INTERVAL);
1487 end = HNS_ROCE_V2_FUNC_CLEAR_TIMEOUT_MSECS;
1489 if (check_device_is_in_reset(hr_dev))
1491 msleep(HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_FAIL_WAIT);
1492 end -= HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_FAIL_WAIT;
1494 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_FUNC_CLEAR,
1497 resp->rst_funcid_en = cpu_to_le32(vf_id);
1498 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1502 if (roce_get_bit(resp->func_done, FUNC_CLEAR_RST_FUN_DONE_S)) {
1504 hr_dev->is_reset = true;
1510 hns_roce_func_clr_rst_proc(hr_dev, ret, fclr_write_fail_flag);
1513 static void hns_roce_free_vf_resource(struct hns_roce_dev *hr_dev, int vf_id)
1515 enum hns_roce_opcode_type opcode = HNS_ROCE_OPC_ALLOC_VF_RES;
1516 struct hns_roce_cmq_desc desc[2];
1517 struct hns_roce_cmq_req *req_a;
1519 req_a = (struct hns_roce_cmq_req *)desc[0].data;
1520 hns_roce_cmq_setup_basic_desc(&desc[0], opcode, false);
1521 desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1522 hns_roce_cmq_setup_basic_desc(&desc[1], opcode, false);
1523 hr_reg_write(req_a, FUNC_RES_A_VF_ID, vf_id);
1524 hns_roce_cmq_send(hr_dev, desc, 2);
1527 static void hns_roce_function_clear(struct hns_roce_dev *hr_dev)
1531 for (i = hr_dev->func_num - 1; i >= 0; i--) {
1532 __hns_roce_function_clear(hr_dev, i);
1534 hns_roce_free_vf_resource(hr_dev, i);
1538 static int hns_roce_clear_extdb_list_info(struct hns_roce_dev *hr_dev)
1540 struct hns_roce_cmq_desc desc;
1543 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CLEAR_EXTDB_LIST_INFO,
1545 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1547 ibdev_err(&hr_dev->ib_dev,
1548 "failed to clear extended doorbell info, ret = %d.\n",
1554 static int hns_roce_query_fw_ver(struct hns_roce_dev *hr_dev)
1556 struct hns_roce_query_fw_info *resp;
1557 struct hns_roce_cmq_desc desc;
1560 hns_roce_cmq_setup_basic_desc(&desc, HNS_QUERY_FW_VER, true);
1561 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1565 resp = (struct hns_roce_query_fw_info *)desc.data;
1566 hr_dev->caps.fw_ver = (u64)(le32_to_cpu(resp->fw_ver));
1571 static int hns_roce_query_func_info(struct hns_roce_dev *hr_dev)
1573 struct hns_roce_cmq_desc desc;
1576 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
1577 hr_dev->func_num = 1;
1581 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_FUNC_INFO,
1583 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1585 hr_dev->func_num = 1;
1589 hr_dev->func_num = le32_to_cpu(desc.func_info.own_func_num);
1590 hr_dev->cong_algo_tmpl_id = le32_to_cpu(desc.func_info.own_mac_id);
1595 static int hns_roce_config_global_param(struct hns_roce_dev *hr_dev)
1597 struct hns_roce_cmq_desc desc;
1598 struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
1599 u32 clock_cycles_of_1us;
1601 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GLOBAL_PARAM,
1604 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08)
1605 clock_cycles_of_1us = HNS_ROCE_1NS_CFG;
1607 clock_cycles_of_1us = HNS_ROCE_1US_CFG;
1609 hr_reg_write(req, CFG_GLOBAL_PARAM_1US_CYCLES, clock_cycles_of_1us);
1610 hr_reg_write(req, CFG_GLOBAL_PARAM_UDP_PORT, ROCE_V2_UDP_DPORT);
1612 return hns_roce_cmq_send(hr_dev, &desc, 1);
1615 static int load_func_res_caps(struct hns_roce_dev *hr_dev, bool is_vf)
1617 struct hns_roce_cmq_desc desc[2];
1618 struct hns_roce_cmq_req *r_a = (struct hns_roce_cmq_req *)desc[0].data;
1619 struct hns_roce_cmq_req *r_b = (struct hns_roce_cmq_req *)desc[1].data;
1620 struct hns_roce_caps *caps = &hr_dev->caps;
1621 enum hns_roce_opcode_type opcode;
1626 opcode = HNS_ROCE_OPC_QUERY_VF_RES;
1629 opcode = HNS_ROCE_OPC_QUERY_PF_RES;
1630 func_num = hr_dev->func_num;
1633 hns_roce_cmq_setup_basic_desc(&desc[0], opcode, true);
1634 desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1635 hns_roce_cmq_setup_basic_desc(&desc[1], opcode, true);
1637 ret = hns_roce_cmq_send(hr_dev, desc, 2);
1641 caps->qpc_bt_num = hr_reg_read(r_a, FUNC_RES_A_QPC_BT_NUM) / func_num;
1642 caps->srqc_bt_num = hr_reg_read(r_a, FUNC_RES_A_SRQC_BT_NUM) / func_num;
1643 caps->cqc_bt_num = hr_reg_read(r_a, FUNC_RES_A_CQC_BT_NUM) / func_num;
1644 caps->mpt_bt_num = hr_reg_read(r_a, FUNC_RES_A_MPT_BT_NUM) / func_num;
1645 caps->eqc_bt_num = hr_reg_read(r_a, FUNC_RES_A_EQC_BT_NUM) / func_num;
1646 caps->smac_bt_num = hr_reg_read(r_b, FUNC_RES_B_SMAC_NUM) / func_num;
1647 caps->sgid_bt_num = hr_reg_read(r_b, FUNC_RES_B_SGID_NUM) / func_num;
1648 caps->sccc_bt_num = hr_reg_read(r_b, FUNC_RES_B_SCCC_BT_NUM) / func_num;
1651 caps->sl_num = hr_reg_read(r_b, FUNC_RES_V_QID_NUM) / func_num;
1652 caps->gmv_bt_num = hr_reg_read(r_b, FUNC_RES_V_GMV_BT_NUM) /
1655 caps->sl_num = hr_reg_read(r_b, FUNC_RES_B_QID_NUM) / func_num;
1656 caps->gmv_bt_num = hr_reg_read(r_b, FUNC_RES_B_GMV_BT_NUM) /
1663 static int load_ext_cfg_caps(struct hns_roce_dev *hr_dev, bool is_vf)
1665 struct hns_roce_cmq_desc desc;
1666 struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
1667 struct hns_roce_caps *caps = &hr_dev->caps;
1668 u32 func_num, qp_num;
1671 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_EXT_CFG, true);
1672 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1676 func_num = is_vf ? 1 : max_t(u32, 1, hr_dev->func_num);
1677 qp_num = hr_reg_read(req, EXT_CFG_QP_PI_NUM) / func_num;
1678 caps->num_pi_qps = round_down(qp_num, HNS_ROCE_QP_BANK_NUM);
1680 qp_num = hr_reg_read(req, EXT_CFG_QP_NUM) / func_num;
1681 caps->num_qps = round_down(qp_num, HNS_ROCE_QP_BANK_NUM);
1686 static int load_pf_timer_res_caps(struct hns_roce_dev *hr_dev)
1688 struct hns_roce_cmq_desc desc;
1689 struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
1690 struct hns_roce_caps *caps = &hr_dev->caps;
1693 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_PF_TIMER_RES,
1696 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1700 caps->qpc_timer_bt_num = hr_reg_read(req, PF_TIMER_RES_QPC_ITEM_NUM);
1701 caps->cqc_timer_bt_num = hr_reg_read(req, PF_TIMER_RES_CQC_ITEM_NUM);
1706 static int query_func_resource_caps(struct hns_roce_dev *hr_dev, bool is_vf)
1708 struct device *dev = hr_dev->dev;
1711 ret = load_func_res_caps(hr_dev, is_vf);
1713 dev_err(dev, "failed to load res caps, ret = %d (%s).\n", ret,
1714 is_vf ? "vf" : "pf");
1718 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
1719 ret = load_ext_cfg_caps(hr_dev, is_vf);
1721 dev_err(dev, "failed to load ext cfg, ret = %d (%s).\n",
1722 ret, is_vf ? "vf" : "pf");
1728 static int hns_roce_query_pf_resource(struct hns_roce_dev *hr_dev)
1730 struct device *dev = hr_dev->dev;
1733 ret = query_func_resource_caps(hr_dev, false);
1737 ret = load_pf_timer_res_caps(hr_dev);
1739 dev_err(dev, "failed to load pf timer resource, ret = %d.\n",
1745 static int hns_roce_query_vf_resource(struct hns_roce_dev *hr_dev)
1747 return query_func_resource_caps(hr_dev, true);
1750 static int __hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev,
1753 struct hns_roce_vf_switch *swt;
1754 struct hns_roce_cmq_desc desc;
1757 swt = (struct hns_roce_vf_switch *)desc.data;
1758 hns_roce_cmq_setup_basic_desc(&desc, HNS_SWITCH_PARAMETER_CFG, true);
1759 swt->rocee_sel |= cpu_to_le32(HNS_ICL_SWITCH_CMD_ROCEE_SEL);
1760 roce_set_field(swt->fun_id, VF_SWITCH_DATA_FUN_ID_VF_ID_M,
1761 VF_SWITCH_DATA_FUN_ID_VF_ID_S, vf_id);
1762 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1766 desc.flag = cpu_to_le16(HNS_ROCE_CMD_FLAG_IN);
1767 desc.flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR);
1768 roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_LPBK_S, 1);
1769 roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_LCL_LPBK_S, 0);
1770 roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_DST_OVRD_S, 1);
1772 return hns_roce_cmq_send(hr_dev, &desc, 1);
1775 static int hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev)
1780 for (vf_id = 0; vf_id < hr_dev->func_num; vf_id++) {
1781 ret = __hns_roce_set_vf_switch_param(hr_dev, vf_id);
1788 static int config_vf_hem_resource(struct hns_roce_dev *hr_dev, int vf_id)
1790 struct hns_roce_cmq_desc desc[2];
1791 struct hns_roce_cmq_req *r_a = (struct hns_roce_cmq_req *)desc[0].data;
1792 struct hns_roce_cmq_req *r_b = (struct hns_roce_cmq_req *)desc[1].data;
1793 enum hns_roce_opcode_type opcode = HNS_ROCE_OPC_ALLOC_VF_RES;
1794 struct hns_roce_caps *caps = &hr_dev->caps;
1796 hns_roce_cmq_setup_basic_desc(&desc[0], opcode, false);
1797 desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1798 hns_roce_cmq_setup_basic_desc(&desc[1], opcode, false);
1800 hr_reg_write(r_a, FUNC_RES_A_VF_ID, vf_id);
1802 hr_reg_write(r_a, FUNC_RES_A_QPC_BT_NUM, caps->qpc_bt_num);
1803 hr_reg_write(r_a, FUNC_RES_A_QPC_BT_IDX, vf_id * caps->qpc_bt_num);
1804 hr_reg_write(r_a, FUNC_RES_A_SRQC_BT_NUM, caps->srqc_bt_num);
1805 hr_reg_write(r_a, FUNC_RES_A_SRQC_BT_IDX, vf_id * caps->srqc_bt_num);
1806 hr_reg_write(r_a, FUNC_RES_A_CQC_BT_NUM, caps->cqc_bt_num);
1807 hr_reg_write(r_a, FUNC_RES_A_CQC_BT_IDX, vf_id * caps->cqc_bt_num);
1808 hr_reg_write(r_a, FUNC_RES_A_MPT_BT_NUM, caps->mpt_bt_num);
1809 hr_reg_write(r_a, FUNC_RES_A_MPT_BT_IDX, vf_id * caps->mpt_bt_num);
1810 hr_reg_write(r_a, FUNC_RES_A_EQC_BT_NUM, caps->eqc_bt_num);
1811 hr_reg_write(r_a, FUNC_RES_A_EQC_BT_IDX, vf_id * caps->eqc_bt_num);
1812 hr_reg_write(r_b, FUNC_RES_V_QID_NUM, caps->sl_num);
1813 hr_reg_write(r_b, FUNC_RES_B_QID_IDX, vf_id * caps->sl_num);
1814 hr_reg_write(r_b, FUNC_RES_B_SCCC_BT_NUM, caps->sccc_bt_num);
1815 hr_reg_write(r_b, FUNC_RES_B_SCCC_BT_IDX, vf_id * caps->sccc_bt_num);
1817 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
1818 hr_reg_write(r_b, FUNC_RES_V_GMV_BT_NUM, caps->gmv_bt_num);
1819 hr_reg_write(r_b, FUNC_RES_B_GMV_BT_IDX,
1820 vf_id * caps->gmv_bt_num);
1822 hr_reg_write(r_b, FUNC_RES_B_SGID_NUM, caps->sgid_bt_num);
1823 hr_reg_write(r_b, FUNC_RES_B_SGID_IDX,
1824 vf_id * caps->sgid_bt_num);
1825 hr_reg_write(r_b, FUNC_RES_B_SMAC_NUM, caps->smac_bt_num);
1826 hr_reg_write(r_b, FUNC_RES_B_SMAC_IDX,
1827 vf_id * caps->smac_bt_num);
1830 return hns_roce_cmq_send(hr_dev, desc, 2);
1833 static int config_vf_ext_resource(struct hns_roce_dev *hr_dev, u32 vf_id)
1835 struct hns_roce_cmq_desc desc;
1836 struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
1837 struct hns_roce_caps *caps = &hr_dev->caps;
1839 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_EXT_CFG, false);
1841 hr_reg_write(req, EXT_CFG_VF_ID, vf_id);
1843 hr_reg_write(req, EXT_CFG_QP_PI_NUM, caps->num_pi_qps);
1844 hr_reg_write(req, EXT_CFG_QP_PI_IDX, vf_id * caps->num_pi_qps);
1845 hr_reg_write(req, EXT_CFG_QP_NUM, caps->num_qps);
1846 hr_reg_write(req, EXT_CFG_QP_IDX, vf_id * caps->num_qps);
1848 return hns_roce_cmq_send(hr_dev, &desc, 1);
1851 static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev)
1853 u32 func_num = max_t(u32, 1, hr_dev->func_num);
1857 for (vf_id = 0; vf_id < func_num; vf_id++) {
1858 ret = config_vf_hem_resource(hr_dev, vf_id);
1860 dev_err(hr_dev->dev,
1861 "failed to config vf-%u hem res, ret = %d.\n",
1866 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
1867 ret = config_vf_ext_resource(hr_dev, vf_id);
1869 dev_err(hr_dev->dev,
1870 "failed to config vf-%u ext res, ret = %d.\n",
1880 static int hns_roce_v2_set_bt(struct hns_roce_dev *hr_dev)
1882 struct hns_roce_cmq_desc desc;
1883 struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
1884 struct hns_roce_caps *caps = &hr_dev->caps;
1886 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_BT_ATTR, false);
1888 hr_reg_write(req, CFG_BT_ATTR_QPC_BA_PGSZ,
1889 caps->qpc_ba_pg_sz + PG_SHIFT_OFFSET);
1890 hr_reg_write(req, CFG_BT_ATTR_QPC_BUF_PGSZ,
1891 caps->qpc_buf_pg_sz + PG_SHIFT_OFFSET);
1892 hr_reg_write(req, CFG_BT_ATTR_QPC_HOPNUM,
1893 to_hr_hem_hopnum(caps->qpc_hop_num, caps->num_qps));
1895 hr_reg_write(req, CFG_BT_ATTR_SRQC_BA_PGSZ,
1896 caps->srqc_ba_pg_sz + PG_SHIFT_OFFSET);
1897 hr_reg_write(req, CFG_BT_ATTR_SRQC_BUF_PGSZ,
1898 caps->srqc_buf_pg_sz + PG_SHIFT_OFFSET);
1899 hr_reg_write(req, CFG_BT_ATTR_SRQC_HOPNUM,
1900 to_hr_hem_hopnum(caps->srqc_hop_num, caps->num_srqs));
1902 hr_reg_write(req, CFG_BT_ATTR_CQC_BA_PGSZ,
1903 caps->cqc_ba_pg_sz + PG_SHIFT_OFFSET);
1904 hr_reg_write(req, CFG_BT_ATTR_CQC_BUF_PGSZ,
1905 caps->cqc_buf_pg_sz + PG_SHIFT_OFFSET);
1906 hr_reg_write(req, CFG_BT_ATTR_CQC_HOPNUM,
1907 to_hr_hem_hopnum(caps->cqc_hop_num, caps->num_cqs));
1909 hr_reg_write(req, CFG_BT_ATTR_MPT_BA_PGSZ,
1910 caps->mpt_ba_pg_sz + PG_SHIFT_OFFSET);
1911 hr_reg_write(req, CFG_BT_ATTR_MPT_BUF_PGSZ,
1912 caps->mpt_buf_pg_sz + PG_SHIFT_OFFSET);
1913 hr_reg_write(req, CFG_BT_ATTR_MPT_HOPNUM,
1914 to_hr_hem_hopnum(caps->mpt_hop_num, caps->num_mtpts));
1916 hr_reg_write(req, CFG_BT_ATTR_SCCC_BA_PGSZ,
1917 caps->sccc_ba_pg_sz + PG_SHIFT_OFFSET);
1918 hr_reg_write(req, CFG_BT_ATTR_SCCC_BUF_PGSZ,
1919 caps->sccc_buf_pg_sz + PG_SHIFT_OFFSET);
1920 hr_reg_write(req, CFG_BT_ATTR_SCCC_HOPNUM,
1921 to_hr_hem_hopnum(caps->sccc_hop_num, caps->num_qps));
1923 return hns_roce_cmq_send(hr_dev, &desc, 1);
1926 /* Use default caps when hns_roce_query_pf_caps() failed or init VF profile */
1927 static void set_default_caps(struct hns_roce_dev *hr_dev)
1929 struct hns_roce_caps *caps = &hr_dev->caps;
1931 caps->num_qps = HNS_ROCE_V2_MAX_QP_NUM;
1932 caps->max_wqes = HNS_ROCE_V2_MAX_WQE_NUM;
1933 caps->num_cqs = HNS_ROCE_V2_MAX_CQ_NUM;
1934 caps->num_srqs = HNS_ROCE_V2_MAX_SRQ_NUM;
1935 caps->min_cqes = HNS_ROCE_MIN_CQE_NUM;
1936 caps->max_cqes = HNS_ROCE_V2_MAX_CQE_NUM;
1937 caps->max_sq_sg = HNS_ROCE_V2_MAX_SQ_SGE_NUM;
1938 caps->max_extend_sg = HNS_ROCE_V2_MAX_EXTEND_SGE_NUM;
1939 caps->max_rq_sg = HNS_ROCE_V2_MAX_RQ_SGE_NUM;
1941 caps->num_uars = HNS_ROCE_V2_UAR_NUM;
1942 caps->phy_num_uars = HNS_ROCE_V2_PHY_UAR_NUM;
1943 caps->num_aeq_vectors = HNS_ROCE_V2_AEQE_VEC_NUM;
1944 caps->num_other_vectors = HNS_ROCE_V2_ABNORMAL_VEC_NUM;
1945 caps->num_comp_vectors = 0;
1947 caps->num_mtpts = HNS_ROCE_V2_MAX_MTPT_NUM;
1948 caps->num_pds = HNS_ROCE_V2_MAX_PD_NUM;
1949 caps->num_qpc_timer = HNS_ROCE_V2_MAX_QPC_TIMER_NUM;
1950 caps->num_cqc_timer = HNS_ROCE_V2_MAX_CQC_TIMER_NUM;
1952 caps->max_qp_init_rdma = HNS_ROCE_V2_MAX_QP_INIT_RDMA;
1953 caps->max_qp_dest_rdma = HNS_ROCE_V2_MAX_QP_DEST_RDMA;
1954 caps->max_sq_desc_sz = HNS_ROCE_V2_MAX_SQ_DESC_SZ;
1955 caps->max_rq_desc_sz = HNS_ROCE_V2_MAX_RQ_DESC_SZ;
1956 caps->max_srq_desc_sz = HNS_ROCE_V2_MAX_SRQ_DESC_SZ;
1957 caps->irrl_entry_sz = HNS_ROCE_V2_IRRL_ENTRY_SZ;
1958 caps->trrl_entry_sz = HNS_ROCE_V2_EXT_ATOMIC_TRRL_ENTRY_SZ;
1959 caps->cqc_entry_sz = HNS_ROCE_V2_CQC_ENTRY_SZ;
1960 caps->srqc_entry_sz = HNS_ROCE_V2_SRQC_ENTRY_SZ;
1961 caps->mtpt_entry_sz = HNS_ROCE_V2_MTPT_ENTRY_SZ;
1962 caps->idx_entry_sz = HNS_ROCE_V2_IDX_ENTRY_SZ;
1963 caps->page_size_cap = HNS_ROCE_V2_PAGE_SIZE_SUPPORTED;
1964 caps->reserved_lkey = 0;
1965 caps->reserved_pds = 0;
1966 caps->reserved_mrws = 1;
1967 caps->reserved_uars = 0;
1968 caps->reserved_cqs = 0;
1969 caps->reserved_srqs = 0;
1970 caps->reserved_qps = HNS_ROCE_V2_RSV_QPS;
1972 caps->qpc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
1973 caps->srqc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
1974 caps->cqc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
1975 caps->mpt_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
1976 caps->sccc_hop_num = HNS_ROCE_SCCC_HOP_NUM;
1978 caps->mtt_hop_num = HNS_ROCE_MTT_HOP_NUM;
1979 caps->wqe_sq_hop_num = HNS_ROCE_SQWQE_HOP_NUM;
1980 caps->wqe_sge_hop_num = HNS_ROCE_EXT_SGE_HOP_NUM;
1981 caps->wqe_rq_hop_num = HNS_ROCE_RQWQE_HOP_NUM;
1982 caps->cqe_hop_num = HNS_ROCE_CQE_HOP_NUM;
1983 caps->srqwqe_hop_num = HNS_ROCE_SRQWQE_HOP_NUM;
1984 caps->idx_hop_num = HNS_ROCE_IDX_HOP_NUM;
1985 caps->chunk_sz = HNS_ROCE_V2_TABLE_CHUNK_SIZE;
1987 caps->flags = HNS_ROCE_CAP_FLAG_REREG_MR |
1988 HNS_ROCE_CAP_FLAG_ROCE_V1_V2 |
1989 HNS_ROCE_CAP_FLAG_CQ_RECORD_DB |
1990 HNS_ROCE_CAP_FLAG_QP_RECORD_DB;
1992 caps->pkey_table_len[0] = 1;
1993 caps->ceqe_depth = HNS_ROCE_V2_COMP_EQE_NUM;
1994 caps->aeqe_depth = HNS_ROCE_V2_ASYNC_EQE_NUM;
1995 caps->local_ca_ack_delay = 0;
1996 caps->max_mtu = IB_MTU_4096;
1998 caps->max_srq_wrs = HNS_ROCE_V2_MAX_SRQ_WR;
1999 caps->max_srq_sges = HNS_ROCE_V2_MAX_SRQ_SGE;
2001 caps->flags |= HNS_ROCE_CAP_FLAG_ATOMIC | HNS_ROCE_CAP_FLAG_MW |
2002 HNS_ROCE_CAP_FLAG_SRQ | HNS_ROCE_CAP_FLAG_FRMR |
2003 HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL | HNS_ROCE_CAP_FLAG_XRC;
2005 caps->gid_table_len[0] = HNS_ROCE_V2_GID_INDEX_NUM;
2007 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
2008 caps->flags |= HNS_ROCE_CAP_FLAG_STASH |
2009 HNS_ROCE_CAP_FLAG_DIRECT_WQE;
2010 caps->max_sq_inline = HNS_ROCE_V3_MAX_SQ_INLINE;
2012 caps->max_sq_inline = HNS_ROCE_V2_MAX_SQ_INLINE;
2014 /* The following configuration are only valid for HIP08 */
2015 caps->qpc_sz = HNS_ROCE_V2_QPC_SZ;
2016 caps->sccc_sz = HNS_ROCE_V2_SCCC_SZ;
2017 caps->cqe_sz = HNS_ROCE_V2_CQE_SIZE;
2021 static void calc_pg_sz(u32 obj_num, u32 obj_size, u32 hop_num, u32 ctx_bt_num,
2022 u32 *buf_page_size, u32 *bt_page_size, u32 hem_type)
2025 u64 bt_chunk_size = PAGE_SIZE;
2026 u64 buf_chunk_size = PAGE_SIZE;
2027 u64 obj_per_chunk_default = buf_chunk_size / obj_size;
2034 obj_per_chunk = ctx_bt_num * (bt_chunk_size / BA_BYTE_LEN) *
2035 (bt_chunk_size / BA_BYTE_LEN) *
2036 (bt_chunk_size / BA_BYTE_LEN) *
2037 obj_per_chunk_default;
2040 obj_per_chunk = ctx_bt_num * (bt_chunk_size / BA_BYTE_LEN) *
2041 (bt_chunk_size / BA_BYTE_LEN) *
2042 obj_per_chunk_default;
2045 obj_per_chunk = ctx_bt_num * (bt_chunk_size / BA_BYTE_LEN) *
2046 obj_per_chunk_default;
2048 case HNS_ROCE_HOP_NUM_0:
2049 obj_per_chunk = ctx_bt_num * obj_per_chunk_default;
2052 pr_err("table %u not support hop_num = %u!\n", hem_type,
2057 if (hem_type >= HEM_TYPE_MTT)
2058 *bt_page_size = ilog2(DIV_ROUND_UP(obj_num, obj_per_chunk));
2060 *buf_page_size = ilog2(DIV_ROUND_UP(obj_num, obj_per_chunk));
2063 static void set_hem_page_size(struct hns_roce_dev *hr_dev)
2065 struct hns_roce_caps *caps = &hr_dev->caps;
2068 caps->eqe_ba_pg_sz = 0;
2069 caps->eqe_buf_pg_sz = 0;
2072 caps->llm_buf_pg_sz = 0;
2075 caps->mpt_ba_pg_sz = 0;
2076 caps->mpt_buf_pg_sz = 0;
2077 caps->pbl_ba_pg_sz = HNS_ROCE_BA_PG_SZ_SUPPORTED_16K;
2078 caps->pbl_buf_pg_sz = 0;
2079 calc_pg_sz(caps->num_mtpts, caps->mtpt_entry_sz, caps->mpt_hop_num,
2080 caps->mpt_bt_num, &caps->mpt_buf_pg_sz, &caps->mpt_ba_pg_sz,
2084 caps->qpc_ba_pg_sz = 0;
2085 caps->qpc_buf_pg_sz = 0;
2086 caps->qpc_timer_ba_pg_sz = 0;
2087 caps->qpc_timer_buf_pg_sz = 0;
2088 caps->sccc_ba_pg_sz = 0;
2089 caps->sccc_buf_pg_sz = 0;
2090 caps->mtt_ba_pg_sz = 0;
2091 caps->mtt_buf_pg_sz = 0;
2092 calc_pg_sz(caps->num_qps, caps->qpc_sz, caps->qpc_hop_num,
2093 caps->qpc_bt_num, &caps->qpc_buf_pg_sz, &caps->qpc_ba_pg_sz,
2096 if (caps->flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL)
2097 calc_pg_sz(caps->num_qps, caps->sccc_sz, caps->sccc_hop_num,
2098 caps->sccc_bt_num, &caps->sccc_buf_pg_sz,
2099 &caps->sccc_ba_pg_sz, HEM_TYPE_SCCC);
2102 caps->cqc_ba_pg_sz = 0;
2103 caps->cqc_buf_pg_sz = 0;
2104 caps->cqc_timer_ba_pg_sz = 0;
2105 caps->cqc_timer_buf_pg_sz = 0;
2106 caps->cqe_ba_pg_sz = HNS_ROCE_BA_PG_SZ_SUPPORTED_256K;
2107 caps->cqe_buf_pg_sz = 0;
2108 calc_pg_sz(caps->num_cqs, caps->cqc_entry_sz, caps->cqc_hop_num,
2109 caps->cqc_bt_num, &caps->cqc_buf_pg_sz, &caps->cqc_ba_pg_sz,
2111 calc_pg_sz(caps->max_cqes, caps->cqe_sz, caps->cqe_hop_num,
2112 1, &caps->cqe_buf_pg_sz, &caps->cqe_ba_pg_sz, HEM_TYPE_CQE);
2115 if (caps->flags & HNS_ROCE_CAP_FLAG_SRQ) {
2116 caps->srqc_ba_pg_sz = 0;
2117 caps->srqc_buf_pg_sz = 0;
2118 caps->srqwqe_ba_pg_sz = 0;
2119 caps->srqwqe_buf_pg_sz = 0;
2120 caps->idx_ba_pg_sz = 0;
2121 caps->idx_buf_pg_sz = 0;
2122 calc_pg_sz(caps->num_srqs, caps->srqc_entry_sz,
2123 caps->srqc_hop_num, caps->srqc_bt_num,
2124 &caps->srqc_buf_pg_sz, &caps->srqc_ba_pg_sz,
2126 calc_pg_sz(caps->num_srqwqe_segs, caps->mtt_entry_sz,
2127 caps->srqwqe_hop_num, 1, &caps->srqwqe_buf_pg_sz,
2128 &caps->srqwqe_ba_pg_sz, HEM_TYPE_SRQWQE);
2129 calc_pg_sz(caps->num_idx_segs, caps->idx_entry_sz,
2130 caps->idx_hop_num, 1, &caps->idx_buf_pg_sz,
2131 &caps->idx_ba_pg_sz, HEM_TYPE_IDX);
2135 caps->gmv_ba_pg_sz = 0;
2136 caps->gmv_buf_pg_sz = 0;
2139 /* Apply all loaded caps before setting to hardware */
2140 static void apply_func_caps(struct hns_roce_dev *hr_dev)
2142 struct hns_roce_caps *caps = &hr_dev->caps;
2143 struct hns_roce_v2_priv *priv = hr_dev->priv;
2145 /* The following configurations don't need to be got from firmware. */
2146 caps->qpc_timer_entry_sz = HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ;
2147 caps->cqc_timer_entry_sz = HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ;
2148 caps->mtt_entry_sz = HNS_ROCE_V2_MTT_ENTRY_SZ;
2150 caps->pbl_hop_num = HNS_ROCE_PBL_HOP_NUM;
2151 caps->qpc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
2152 caps->cqc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
2154 caps->num_xrcds = HNS_ROCE_V2_MAX_XRCD_NUM;
2155 caps->reserved_xrcds = HNS_ROCE_V2_RSV_XRCD_NUM;
2157 caps->num_mtt_segs = HNS_ROCE_V2_MAX_MTT_SEGS;
2158 caps->num_srqwqe_segs = HNS_ROCE_V2_MAX_SRQWQE_SEGS;
2159 caps->num_idx_segs = HNS_ROCE_V2_MAX_IDX_SEGS;
2161 if (!caps->num_comp_vectors)
2162 caps->num_comp_vectors = min_t(u32, caps->eqc_bt_num - 1,
2163 (u32)priv->handle->rinfo.num_vectors - 2);
2165 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
2166 caps->eqe_hop_num = HNS_ROCE_V3_EQE_HOP_NUM;
2167 caps->ceqe_size = HNS_ROCE_V3_EQE_SIZE;
2168 caps->aeqe_size = HNS_ROCE_V3_EQE_SIZE;
2170 /* The following configurations will be overwritten */
2171 caps->qpc_sz = HNS_ROCE_V3_QPC_SZ;
2172 caps->cqe_sz = HNS_ROCE_V3_CQE_SIZE;
2173 caps->sccc_sz = HNS_ROCE_V3_SCCC_SZ;
2175 /* The following configurations are not got from firmware */
2176 caps->gmv_entry_sz = HNS_ROCE_V3_GMV_ENTRY_SZ;
2178 caps->gmv_hop_num = HNS_ROCE_HOP_NUM_0;
2179 caps->gid_table_len[0] = caps->gmv_bt_num *
2180 (HNS_HW_PAGE_SIZE / caps->gmv_entry_sz);
2182 caps->gmv_entry_num = caps->gmv_bt_num * (PAGE_SIZE /
2183 caps->gmv_entry_sz);
2185 u32 func_num = max_t(u32, 1, hr_dev->func_num);
2187 caps->eqe_hop_num = HNS_ROCE_V2_EQE_HOP_NUM;
2188 caps->ceqe_size = HNS_ROCE_CEQE_SIZE;
2189 caps->aeqe_size = HNS_ROCE_AEQE_SIZE;
2190 caps->gid_table_len[0] /= func_num;
2193 if (hr_dev->is_vf) {
2194 caps->default_aeq_arm_st = 0x3;
2195 caps->default_ceq_arm_st = 0x3;
2196 caps->default_ceq_max_cnt = 0x1;
2197 caps->default_ceq_period = 0x10;
2198 caps->default_aeq_max_cnt = 0x1;
2199 caps->default_aeq_period = 0x10;
2202 set_hem_page_size(hr_dev);
2205 static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev)
2207 struct hns_roce_cmq_desc desc[HNS_ROCE_QUERY_PF_CAPS_CMD_NUM];
2208 struct hns_roce_caps *caps = &hr_dev->caps;
2209 struct hns_roce_query_pf_caps_a *resp_a;
2210 struct hns_roce_query_pf_caps_b *resp_b;
2211 struct hns_roce_query_pf_caps_c *resp_c;
2212 struct hns_roce_query_pf_caps_d *resp_d;
2213 struct hns_roce_query_pf_caps_e *resp_e;
2219 for (i = 0; i < HNS_ROCE_QUERY_PF_CAPS_CMD_NUM; i++) {
2220 hns_roce_cmq_setup_basic_desc(&desc[i],
2221 HNS_ROCE_OPC_QUERY_PF_CAPS_NUM,
2223 if (i < (HNS_ROCE_QUERY_PF_CAPS_CMD_NUM - 1))
2224 desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
2226 desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
2229 ret = hns_roce_cmq_send(hr_dev, desc, HNS_ROCE_QUERY_PF_CAPS_CMD_NUM);
2233 resp_a = (struct hns_roce_query_pf_caps_a *)desc[0].data;
2234 resp_b = (struct hns_roce_query_pf_caps_b *)desc[1].data;
2235 resp_c = (struct hns_roce_query_pf_caps_c *)desc[2].data;
2236 resp_d = (struct hns_roce_query_pf_caps_d *)desc[3].data;
2237 resp_e = (struct hns_roce_query_pf_caps_e *)desc[4].data;
2239 caps->local_ca_ack_delay = resp_a->local_ca_ack_delay;
2240 caps->max_sq_sg = le16_to_cpu(resp_a->max_sq_sg);
2241 caps->max_sq_inline = le16_to_cpu(resp_a->max_sq_inline);
2242 caps->max_rq_sg = le16_to_cpu(resp_a->max_rq_sg);
2243 caps->max_rq_sg = roundup_pow_of_two(caps->max_rq_sg);
2244 caps->max_extend_sg = le32_to_cpu(resp_a->max_extend_sg);
2245 caps->num_qpc_timer = le16_to_cpu(resp_a->num_qpc_timer);
2246 caps->num_cqc_timer = le16_to_cpu(resp_a->num_cqc_timer);
2247 caps->max_srq_sges = le16_to_cpu(resp_a->max_srq_sges);
2248 caps->max_srq_sges = roundup_pow_of_two(caps->max_srq_sges);
2249 caps->num_aeq_vectors = resp_a->num_aeq_vectors;
2250 caps->num_other_vectors = resp_a->num_other_vectors;
2251 caps->max_sq_desc_sz = resp_a->max_sq_desc_sz;
2252 caps->max_rq_desc_sz = resp_a->max_rq_desc_sz;
2253 caps->max_srq_desc_sz = resp_a->max_srq_desc_sz;
2254 caps->cqe_sz = resp_a->cqe_sz;
2256 caps->mtpt_entry_sz = resp_b->mtpt_entry_sz;
2257 caps->irrl_entry_sz = resp_b->irrl_entry_sz;
2258 caps->trrl_entry_sz = resp_b->trrl_entry_sz;
2259 caps->cqc_entry_sz = resp_b->cqc_entry_sz;
2260 caps->srqc_entry_sz = resp_b->srqc_entry_sz;
2261 caps->idx_entry_sz = resp_b->idx_entry_sz;
2262 caps->sccc_sz = resp_b->sccc_sz;
2263 caps->max_mtu = resp_b->max_mtu;
2264 caps->qpc_sz = le16_to_cpu(resp_b->qpc_sz);
2265 caps->min_cqes = resp_b->min_cqes;
2266 caps->min_wqes = resp_b->min_wqes;
2267 caps->page_size_cap = le32_to_cpu(resp_b->page_size_cap);
2268 caps->pkey_table_len[0] = resp_b->pkey_table_len;
2269 caps->phy_num_uars = resp_b->phy_num_uars;
2270 ctx_hop_num = resp_b->ctx_hop_num;
2271 pbl_hop_num = resp_b->pbl_hop_num;
2273 caps->num_pds = 1 << roce_get_field(resp_c->cap_flags_num_pds,
2274 V2_QUERY_PF_CAPS_C_NUM_PDS_M,
2275 V2_QUERY_PF_CAPS_C_NUM_PDS_S);
2276 caps->flags = roce_get_field(resp_c->cap_flags_num_pds,
2277 V2_QUERY_PF_CAPS_C_CAP_FLAGS_M,
2278 V2_QUERY_PF_CAPS_C_CAP_FLAGS_S);
2279 caps->flags |= le16_to_cpu(resp_d->cap_flags_ex) <<
2280 HNS_ROCE_CAP_FLAGS_EX_SHIFT;
2282 caps->num_cqs = 1 << roce_get_field(resp_c->max_gid_num_cqs,
2283 V2_QUERY_PF_CAPS_C_NUM_CQS_M,
2284 V2_QUERY_PF_CAPS_C_NUM_CQS_S);
2285 caps->gid_table_len[0] = roce_get_field(resp_c->max_gid_num_cqs,
2286 V2_QUERY_PF_CAPS_C_MAX_GID_M,
2287 V2_QUERY_PF_CAPS_C_MAX_GID_S);
2289 caps->max_cqes = 1 << roce_get_field(resp_c->cq_depth,
2290 V2_QUERY_PF_CAPS_C_CQ_DEPTH_M,
2291 V2_QUERY_PF_CAPS_C_CQ_DEPTH_S);
2292 caps->num_mtpts = 1 << roce_get_field(resp_c->num_mrws,
2293 V2_QUERY_PF_CAPS_C_NUM_MRWS_M,
2294 V2_QUERY_PF_CAPS_C_NUM_MRWS_S);
2295 caps->num_qps = 1 << roce_get_field(resp_c->ord_num_qps,
2296 V2_QUERY_PF_CAPS_C_NUM_QPS_M,
2297 V2_QUERY_PF_CAPS_C_NUM_QPS_S);
2298 caps->max_qp_init_rdma = roce_get_field(resp_c->ord_num_qps,
2299 V2_QUERY_PF_CAPS_C_MAX_ORD_M,
2300 V2_QUERY_PF_CAPS_C_MAX_ORD_S);
2301 caps->max_qp_dest_rdma = caps->max_qp_init_rdma;
2302 caps->max_wqes = 1 << le16_to_cpu(resp_c->sq_depth);
2303 caps->num_srqs = 1 << roce_get_field(resp_d->wq_hop_num_max_srqs,
2304 V2_QUERY_PF_CAPS_D_NUM_SRQS_M,
2305 V2_QUERY_PF_CAPS_D_NUM_SRQS_S);
2306 caps->cong_type = roce_get_field(resp_d->wq_hop_num_max_srqs,
2307 V2_QUERY_PF_CAPS_D_CONG_TYPE_M,
2308 V2_QUERY_PF_CAPS_D_CONG_TYPE_S);
2309 caps->max_srq_wrs = 1 << le16_to_cpu(resp_d->srq_depth);
2311 caps->ceqe_depth = 1 << roce_get_field(resp_d->num_ceqs_ceq_depth,
2312 V2_QUERY_PF_CAPS_D_CEQ_DEPTH_M,
2313 V2_QUERY_PF_CAPS_D_CEQ_DEPTH_S);
2314 caps->num_comp_vectors = roce_get_field(resp_d->num_ceqs_ceq_depth,
2315 V2_QUERY_PF_CAPS_D_NUM_CEQS_M,
2316 V2_QUERY_PF_CAPS_D_NUM_CEQS_S);
2318 caps->aeqe_depth = 1 << roce_get_field(resp_d->arm_st_aeq_depth,
2319 V2_QUERY_PF_CAPS_D_AEQ_DEPTH_M,
2320 V2_QUERY_PF_CAPS_D_AEQ_DEPTH_S);
2321 caps->default_aeq_arm_st = roce_get_field(resp_d->arm_st_aeq_depth,
2322 V2_QUERY_PF_CAPS_D_AEQ_ARM_ST_M,
2323 V2_QUERY_PF_CAPS_D_AEQ_ARM_ST_S);
2324 caps->default_ceq_arm_st = roce_get_field(resp_d->arm_st_aeq_depth,
2325 V2_QUERY_PF_CAPS_D_CEQ_ARM_ST_M,
2326 V2_QUERY_PF_CAPS_D_CEQ_ARM_ST_S);
2327 caps->reserved_pds = roce_get_field(resp_d->num_uars_rsv_pds,
2328 V2_QUERY_PF_CAPS_D_RSV_PDS_M,
2329 V2_QUERY_PF_CAPS_D_RSV_PDS_S);
2330 caps->num_uars = 1 << roce_get_field(resp_d->num_uars_rsv_pds,
2331 V2_QUERY_PF_CAPS_D_NUM_UARS_M,
2332 V2_QUERY_PF_CAPS_D_NUM_UARS_S);
2333 caps->reserved_qps = roce_get_field(resp_d->rsv_uars_rsv_qps,
2334 V2_QUERY_PF_CAPS_D_RSV_QPS_M,
2335 V2_QUERY_PF_CAPS_D_RSV_QPS_S);
2336 caps->reserved_uars = roce_get_field(resp_d->rsv_uars_rsv_qps,
2337 V2_QUERY_PF_CAPS_D_RSV_UARS_M,
2338 V2_QUERY_PF_CAPS_D_RSV_UARS_S);
2339 caps->reserved_mrws = roce_get_field(resp_e->chunk_size_shift_rsv_mrws,
2340 V2_QUERY_PF_CAPS_E_RSV_MRWS_M,
2341 V2_QUERY_PF_CAPS_E_RSV_MRWS_S);
2342 caps->chunk_sz = 1 << roce_get_field(resp_e->chunk_size_shift_rsv_mrws,
2343 V2_QUERY_PF_CAPS_E_CHUNK_SIZE_SHIFT_M,
2344 V2_QUERY_PF_CAPS_E_CHUNK_SIZE_SHIFT_S);
2345 caps->reserved_cqs = roce_get_field(resp_e->rsv_cqs,
2346 V2_QUERY_PF_CAPS_E_RSV_CQS_M,
2347 V2_QUERY_PF_CAPS_E_RSV_CQS_S);
2348 caps->reserved_srqs = roce_get_field(resp_e->rsv_srqs,
2349 V2_QUERY_PF_CAPS_E_RSV_SRQS_M,
2350 V2_QUERY_PF_CAPS_E_RSV_SRQS_S);
2351 caps->reserved_lkey = roce_get_field(resp_e->rsv_lkey,
2352 V2_QUERY_PF_CAPS_E_RSV_LKEYS_M,
2353 V2_QUERY_PF_CAPS_E_RSV_LKEYS_S);
2354 caps->default_ceq_max_cnt = le16_to_cpu(resp_e->ceq_max_cnt);
2355 caps->default_ceq_period = le16_to_cpu(resp_e->ceq_period);
2356 caps->default_aeq_max_cnt = le16_to_cpu(resp_e->aeq_max_cnt);
2357 caps->default_aeq_period = le16_to_cpu(resp_e->aeq_period);
2359 caps->qpc_hop_num = ctx_hop_num;
2360 caps->sccc_hop_num = ctx_hop_num;
2361 caps->srqc_hop_num = ctx_hop_num;
2362 caps->cqc_hop_num = ctx_hop_num;
2363 caps->mpt_hop_num = ctx_hop_num;
2364 caps->mtt_hop_num = pbl_hop_num;
2365 caps->cqe_hop_num = pbl_hop_num;
2366 caps->srqwqe_hop_num = pbl_hop_num;
2367 caps->idx_hop_num = pbl_hop_num;
2368 caps->wqe_sq_hop_num = roce_get_field(resp_d->wq_hop_num_max_srqs,
2369 V2_QUERY_PF_CAPS_D_SQWQE_HOP_NUM_M,
2370 V2_QUERY_PF_CAPS_D_SQWQE_HOP_NUM_S);
2371 caps->wqe_sge_hop_num = roce_get_field(resp_d->wq_hop_num_max_srqs,
2372 V2_QUERY_PF_CAPS_D_EX_SGE_HOP_NUM_M,
2373 V2_QUERY_PF_CAPS_D_EX_SGE_HOP_NUM_S);
2374 caps->wqe_rq_hop_num = roce_get_field(resp_d->wq_hop_num_max_srqs,
2375 V2_QUERY_PF_CAPS_D_RQWQE_HOP_NUM_M,
2376 V2_QUERY_PF_CAPS_D_RQWQE_HOP_NUM_S);
2381 static int config_hem_entry_size(struct hns_roce_dev *hr_dev, u32 type, u32 val)
2383 struct hns_roce_cmq_desc desc;
2384 struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
2386 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_ENTRY_SIZE,
2389 hr_reg_write(req, CFG_HEM_ENTRY_SIZE_TYPE, type);
2390 hr_reg_write(req, CFG_HEM_ENTRY_SIZE_VALUE, val);
2392 return hns_roce_cmq_send(hr_dev, &desc, 1);
2395 static int hns_roce_config_entry_size(struct hns_roce_dev *hr_dev)
2397 struct hns_roce_caps *caps = &hr_dev->caps;
2400 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08)
2403 ret = config_hem_entry_size(hr_dev, HNS_ROCE_CFG_QPC_SIZE,
2406 dev_err(hr_dev->dev, "failed to cfg qpc sz, ret = %d.\n", ret);
2410 ret = config_hem_entry_size(hr_dev, HNS_ROCE_CFG_SCCC_SIZE,
2413 dev_err(hr_dev->dev, "failed to cfg sccc sz, ret = %d.\n", ret);
2418 static int hns_roce_v2_vf_profile(struct hns_roce_dev *hr_dev)
2420 struct device *dev = hr_dev->dev;
2423 hr_dev->func_num = 1;
2425 set_default_caps(hr_dev);
2427 ret = hns_roce_query_vf_resource(hr_dev);
2429 dev_err(dev, "failed to query VF resource, ret = %d.\n", ret);
2433 apply_func_caps(hr_dev);
2435 ret = hns_roce_v2_set_bt(hr_dev);
2437 dev_err(dev, "failed to config VF BA table, ret = %d.\n", ret);
2442 static int hns_roce_v2_pf_profile(struct hns_roce_dev *hr_dev)
2444 struct device *dev = hr_dev->dev;
2447 ret = hns_roce_query_func_info(hr_dev);
2449 dev_err(dev, "failed to query func info, ret = %d.\n", ret);
2453 ret = hns_roce_config_global_param(hr_dev);
2455 dev_err(dev, "failed to config global param, ret = %d.\n", ret);
2459 ret = hns_roce_set_vf_switch_param(hr_dev);
2461 dev_err(dev, "failed to set switch param, ret = %d.\n", ret);
2465 ret = hns_roce_query_pf_caps(hr_dev);
2467 set_default_caps(hr_dev);
2469 ret = hns_roce_query_pf_resource(hr_dev);
2471 dev_err(dev, "failed to query pf resource, ret = %d.\n", ret);
2475 apply_func_caps(hr_dev);
2477 ret = hns_roce_alloc_vf_resource(hr_dev);
2479 dev_err(dev, "failed to alloc vf resource, ret = %d.\n", ret);
2483 ret = hns_roce_v2_set_bt(hr_dev);
2485 dev_err(dev, "failed to config BA table, ret = %d.\n", ret);
2489 /* Configure the size of QPC, SCCC, etc. */
2490 return hns_roce_config_entry_size(hr_dev);
2493 static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
2495 struct device *dev = hr_dev->dev;
2498 ret = hns_roce_cmq_query_hw_info(hr_dev);
2500 dev_err(dev, "failed to query hardware info, ret = %d.\n", ret);
2504 ret = hns_roce_query_fw_ver(hr_dev);
2506 dev_err(dev, "failed to query firmware info, ret = %d.\n", ret);
2510 hr_dev->vendor_part_id = hr_dev->pci_dev->device;
2511 hr_dev->sys_image_guid = be64_to_cpu(hr_dev->ib_dev.node_guid);
2514 return hns_roce_v2_vf_profile(hr_dev);
2516 return hns_roce_v2_pf_profile(hr_dev);
2519 static void config_llm_table(struct hns_roce_buf *data_buf, void *cfg_buf)
2521 u32 i, next_ptr, page_num;
2522 __le64 *entry = cfg_buf;
2526 page_num = data_buf->npages;
2527 for (i = 0; i < page_num; i++) {
2528 addr = hns_roce_buf_page(data_buf, i);
2529 if (i == (page_num - 1))
2534 val = HNS_ROCE_EXT_LLM_ENTRY(addr, (u64)next_ptr);
2535 entry[i] = cpu_to_le64(val);
2539 static int set_llm_cfg_to_hw(struct hns_roce_dev *hr_dev,
2540 struct hns_roce_link_table *table)
2542 struct hns_roce_cmq_desc desc[2];
2543 struct hns_roce_cmq_req *r_a = (struct hns_roce_cmq_req *)desc[0].data;
2544 struct hns_roce_cmq_req *r_b = (struct hns_roce_cmq_req *)desc[1].data;
2545 struct hns_roce_buf *buf = table->buf;
2546 enum hns_roce_opcode_type opcode;
2549 opcode = HNS_ROCE_OPC_CFG_EXT_LLM;
2550 hns_roce_cmq_setup_basic_desc(&desc[0], opcode, false);
2551 desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
2552 hns_roce_cmq_setup_basic_desc(&desc[1], opcode, false);
2554 hr_reg_write(r_a, CFG_LLM_A_BA_L, lower_32_bits(table->table.map));
2555 hr_reg_write(r_a, CFG_LLM_A_BA_H, upper_32_bits(table->table.map));
2556 hr_reg_write(r_a, CFG_LLM_A_DEPTH, buf->npages);
2557 hr_reg_write(r_a, CFG_LLM_A_PGSZ, to_hr_hw_page_shift(buf->page_shift));
2558 hr_reg_enable(r_a, CFG_LLM_A_INIT_EN);
2560 addr = to_hr_hw_page_addr(hns_roce_buf_page(buf, 0));
2561 hr_reg_write(r_a, CFG_LLM_A_HEAD_BA_L, lower_32_bits(addr));
2562 hr_reg_write(r_a, CFG_LLM_A_HEAD_BA_H, upper_32_bits(addr));
2563 hr_reg_write(r_a, CFG_LLM_A_HEAD_NXTPTR, 1);
2564 hr_reg_write(r_a, CFG_LLM_A_HEAD_PTR, 0);
2566 addr = to_hr_hw_page_addr(hns_roce_buf_page(buf, buf->npages - 1));
2567 hr_reg_write(r_b, CFG_LLM_B_TAIL_BA_L, lower_32_bits(addr));
2568 hr_reg_write(r_b, CFG_LLM_B_TAIL_BA_H, upper_32_bits(addr));
2569 hr_reg_write(r_b, CFG_LLM_B_TAIL_PTR, buf->npages - 1);
2571 return hns_roce_cmq_send(hr_dev, desc, 2);
2574 static struct hns_roce_link_table *
2575 alloc_link_table_buf(struct hns_roce_dev *hr_dev)
2577 struct hns_roce_v2_priv *priv = hr_dev->priv;
2578 struct hns_roce_link_table *link_tbl;
2579 u32 pg_shift, size, min_size;
2581 link_tbl = &priv->ext_llm;
2582 pg_shift = hr_dev->caps.llm_buf_pg_sz + PAGE_SHIFT;
2583 size = hr_dev->caps.num_qps * HNS_ROCE_V2_EXT_LLM_ENTRY_SZ;
2584 min_size = HNS_ROCE_EXT_LLM_MIN_PAGES(hr_dev->caps.sl_num) << pg_shift;
2586 /* Alloc data table */
2587 size = max(size, min_size);
2588 link_tbl->buf = hns_roce_buf_alloc(hr_dev, size, pg_shift, 0);
2589 if (IS_ERR(link_tbl->buf))
2590 return ERR_PTR(-ENOMEM);
2592 /* Alloc config table */
2593 size = link_tbl->buf->npages * sizeof(u64);
2594 link_tbl->table.buf = dma_alloc_coherent(hr_dev->dev, size,
2595 &link_tbl->table.map,
2597 if (!link_tbl->table.buf) {
2598 hns_roce_buf_free(hr_dev, link_tbl->buf);
2599 return ERR_PTR(-ENOMEM);
2605 static void free_link_table_buf(struct hns_roce_dev *hr_dev,
2606 struct hns_roce_link_table *tbl)
2609 u32 size = tbl->buf->npages * sizeof(u64);
2611 dma_free_coherent(hr_dev->dev, size, tbl->table.buf,
2615 hns_roce_buf_free(hr_dev, tbl->buf);
2618 static int hns_roce_init_link_table(struct hns_roce_dev *hr_dev)
2620 struct hns_roce_link_table *link_tbl;
2623 link_tbl = alloc_link_table_buf(hr_dev);
2624 if (IS_ERR(link_tbl))
2627 if (WARN_ON(link_tbl->buf->npages > HNS_ROCE_V2_EXT_LLM_MAX_DEPTH)) {
2632 config_llm_table(link_tbl->buf, link_tbl->table.buf);
2633 ret = set_llm_cfg_to_hw(hr_dev, link_tbl);
2640 free_link_table_buf(hr_dev, link_tbl);
2644 static void hns_roce_free_link_table(struct hns_roce_dev *hr_dev)
2646 struct hns_roce_v2_priv *priv = hr_dev->priv;
2648 free_link_table_buf(hr_dev, &priv->ext_llm);
2651 static void free_dip_list(struct hns_roce_dev *hr_dev)
2653 struct hns_roce_dip *hr_dip;
2654 struct hns_roce_dip *tmp;
2655 unsigned long flags;
2657 spin_lock_irqsave(&hr_dev->dip_list_lock, flags);
2659 list_for_each_entry_safe(hr_dip, tmp, &hr_dev->dip_list, node) {
2660 list_del(&hr_dip->node);
2664 spin_unlock_irqrestore(&hr_dev->dip_list_lock, flags);
2667 static int get_hem_table(struct hns_roce_dev *hr_dev)
2669 unsigned int qpc_count;
2670 unsigned int cqc_count;
2671 unsigned int gmv_count;
2675 /* Alloc memory for source address table buffer space chunk */
2676 for (gmv_count = 0; gmv_count < hr_dev->caps.gmv_entry_num;
2678 ret = hns_roce_table_get(hr_dev, &hr_dev->gmv_table, gmv_count);
2680 goto err_gmv_failed;
2686 /* Alloc memory for QPC Timer buffer space chunk */
2687 for (qpc_count = 0; qpc_count < hr_dev->caps.qpc_timer_bt_num;
2689 ret = hns_roce_table_get(hr_dev, &hr_dev->qpc_timer_table,
2692 dev_err(hr_dev->dev, "QPC Timer get failed\n");
2693 goto err_qpc_timer_failed;
2697 /* Alloc memory for CQC Timer buffer space chunk */
2698 for (cqc_count = 0; cqc_count < hr_dev->caps.cqc_timer_bt_num;
2700 ret = hns_roce_table_get(hr_dev, &hr_dev->cqc_timer_table,
2703 dev_err(hr_dev->dev, "CQC Timer get failed\n");
2704 goto err_cqc_timer_failed;
2710 err_cqc_timer_failed:
2711 for (i = 0; i < cqc_count; i++)
2712 hns_roce_table_put(hr_dev, &hr_dev->cqc_timer_table, i);
2714 err_qpc_timer_failed:
2715 for (i = 0; i < qpc_count; i++)
2716 hns_roce_table_put(hr_dev, &hr_dev->qpc_timer_table, i);
2719 for (i = 0; i < gmv_count; i++)
2720 hns_roce_table_put(hr_dev, &hr_dev->gmv_table, i);
2725 static void put_hem_table(struct hns_roce_dev *hr_dev)
2729 for (i = 0; i < hr_dev->caps.gmv_entry_num; i++)
2730 hns_roce_table_put(hr_dev, &hr_dev->gmv_table, i);
2735 for (i = 0; i < hr_dev->caps.qpc_timer_bt_num; i++)
2736 hns_roce_table_put(hr_dev, &hr_dev->qpc_timer_table, i);
2738 for (i = 0; i < hr_dev->caps.cqc_timer_bt_num; i++)
2739 hns_roce_table_put(hr_dev, &hr_dev->cqc_timer_table, i);
2742 static int hns_roce_v2_init(struct hns_roce_dev *hr_dev)
2746 /* The hns ROCEE requires the extdb info to be cleared before using */
2747 ret = hns_roce_clear_extdb_list_info(hr_dev);
2751 ret = get_hem_table(hr_dev);
2758 ret = hns_roce_init_link_table(hr_dev);
2760 dev_err(hr_dev->dev, "failed to init llm, ret = %d.\n", ret);
2761 goto err_llm_init_failed;
2766 err_llm_init_failed:
2767 put_hem_table(hr_dev);
2772 static void hns_roce_v2_exit(struct hns_roce_dev *hr_dev)
2774 hns_roce_function_clear(hr_dev);
2777 hns_roce_free_link_table(hr_dev);
2779 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP09)
2780 free_dip_list(hr_dev);
2783 static int hns_roce_mbox_post(struct hns_roce_dev *hr_dev, u64 in_param,
2784 u64 out_param, u32 in_modifier, u8 op_modifier,
2785 u16 op, u16 token, int event)
2787 struct hns_roce_cmq_desc desc;
2788 struct hns_roce_post_mbox *mb = (struct hns_roce_post_mbox *)desc.data;
2790 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_POST_MB, false);
2792 mb->in_param_l = cpu_to_le32(in_param);
2793 mb->in_param_h = cpu_to_le32(in_param >> 32);
2794 mb->out_param_l = cpu_to_le32(out_param);
2795 mb->out_param_h = cpu_to_le32(out_param >> 32);
2796 mb->cmd_tag = cpu_to_le32(in_modifier << 8 | op);
2797 mb->token_event_en = cpu_to_le32(event << 16 | token);
2799 return hns_roce_cmq_send(hr_dev, &desc, 1);
2802 static int v2_wait_mbox_complete(struct hns_roce_dev *hr_dev, u32 timeout,
2803 u8 *complete_status)
2805 struct hns_roce_mbox_status *mb_st;
2806 struct hns_roce_cmq_desc desc;
2812 mb_st = (struct hns_roce_mbox_status *)desc.data;
2813 end = msecs_to_jiffies(timeout) + jiffies;
2814 while (v2_chk_mbox_is_avail(hr_dev, &busy)) {
2816 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_MB_ST,
2818 ret = __hns_roce_cmq_send(hr_dev, &desc, 1);
2820 status = le32_to_cpu(mb_st->mb_status_hw_run);
2821 /* No pending message exists in ROCEE mbox. */
2822 if (!(status & MB_ST_HW_RUN_M))
2824 } else if (!v2_chk_mbox_is_avail(hr_dev, &busy)) {
2828 if (time_after(jiffies, end)) {
2829 dev_err_ratelimited(hr_dev->dev,
2830 "failed to wait mbox status 0x%x\n",
2840 *complete_status = (u8)(status & MB_ST_COMPLETE_M);
2841 } else if (!v2_chk_mbox_is_avail(hr_dev, &busy)) {
2842 /* Ignore all errors if the mbox is unavailable. */
2844 *complete_status = MB_ST_COMPLETE_M;
2850 static int v2_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param,
2851 u64 out_param, u32 in_modifier, u8 op_modifier,
2852 u16 op, u16 token, int event)
2857 /* Waiting for the mbox to be idle */
2858 ret = v2_wait_mbox_complete(hr_dev, HNS_ROCE_V2_GO_BIT_TIMEOUT_MSECS,
2860 if (unlikely(ret)) {
2861 dev_err_ratelimited(hr_dev->dev,
2862 "failed to check post mbox status = 0x%x, ret = %d.\n",
2867 /* Post new message to mbox */
2868 ret = hns_roce_mbox_post(hr_dev, in_param, out_param, in_modifier,
2869 op_modifier, op, token, event);
2871 dev_err_ratelimited(hr_dev->dev,
2872 "failed to post mailbox, ret = %d.\n", ret);
2877 static int v2_poll_mbox_done(struct hns_roce_dev *hr_dev, unsigned int timeout)
2882 ret = v2_wait_mbox_complete(hr_dev, timeout, &status);
2884 if (status != MB_ST_COMPLETE_SUCC)
2887 dev_err_ratelimited(hr_dev->dev,
2888 "failed to check mbox status = 0x%x, ret = %d.\n",
2895 static void copy_gid(void *dest, const union ib_gid *gid)
2898 const union ib_gid *src = gid;
2899 __le32 (*p)[GID_SIZE] = dest;
2905 for (i = 0; i < GID_SIZE; i++)
2906 (*p)[i] = cpu_to_le32(*(u32 *)&src->raw[i * sizeof(u32)]);
2909 static int config_sgid_table(struct hns_roce_dev *hr_dev,
2910 int gid_index, const union ib_gid *gid,
2911 enum hns_roce_sgid_type sgid_type)
2913 struct hns_roce_cmq_desc desc;
2914 struct hns_roce_cfg_sgid_tb *sgid_tb =
2915 (struct hns_roce_cfg_sgid_tb *)desc.data;
2917 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_SGID_TB, false);
2919 roce_set_field(sgid_tb->table_idx_rsv, CFG_SGID_TB_TABLE_IDX_M,
2920 CFG_SGID_TB_TABLE_IDX_S, gid_index);
2921 roce_set_field(sgid_tb->vf_sgid_type_rsv, CFG_SGID_TB_VF_SGID_TYPE_M,
2922 CFG_SGID_TB_VF_SGID_TYPE_S, sgid_type);
2924 copy_gid(&sgid_tb->vf_sgid_l, gid);
2926 return hns_roce_cmq_send(hr_dev, &desc, 1);
2929 static int config_gmv_table(struct hns_roce_dev *hr_dev,
2930 int gid_index, const union ib_gid *gid,
2931 enum hns_roce_sgid_type sgid_type,
2932 const struct ib_gid_attr *attr)
2934 struct hns_roce_cmq_desc desc[2];
2935 struct hns_roce_cfg_gmv_tb_a *tb_a =
2936 (struct hns_roce_cfg_gmv_tb_a *)desc[0].data;
2937 struct hns_roce_cfg_gmv_tb_b *tb_b =
2938 (struct hns_roce_cfg_gmv_tb_b *)desc[1].data;
2940 u16 vlan_id = VLAN_CFI_MASK;
2941 u8 mac[ETH_ALEN] = {};
2945 ret = rdma_read_gid_l2_fields(attr, &vlan_id, mac);
2950 hns_roce_cmq_setup_basic_desc(&desc[0], HNS_ROCE_OPC_CFG_GMV_TBL, false);
2951 desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
2953 hns_roce_cmq_setup_basic_desc(&desc[1], HNS_ROCE_OPC_CFG_GMV_TBL, false);
2955 copy_gid(&tb_a->vf_sgid_l, gid);
2957 roce_set_field(tb_a->vf_sgid_type_vlan, CFG_GMV_TB_VF_SGID_TYPE_M,
2958 CFG_GMV_TB_VF_SGID_TYPE_S, sgid_type);
2959 roce_set_bit(tb_a->vf_sgid_type_vlan, CFG_GMV_TB_VF_VLAN_EN_S,
2960 vlan_id < VLAN_CFI_MASK);
2961 roce_set_field(tb_a->vf_sgid_type_vlan, CFG_GMV_TB_VF_VLAN_ID_M,
2962 CFG_GMV_TB_VF_VLAN_ID_S, vlan_id);
2964 tb_b->vf_smac_l = cpu_to_le32(*(u32 *)mac);
2965 roce_set_field(tb_b->vf_smac_h, CFG_GMV_TB_SMAC_H_M,
2966 CFG_GMV_TB_SMAC_H_S, *(u16 *)&mac[4]);
2968 roce_set_field(tb_b->table_idx_rsv, CFG_GMV_TB_SGID_IDX_M,
2969 CFG_GMV_TB_SGID_IDX_S, gid_index);
2971 return hns_roce_cmq_send(hr_dev, desc, 2);
2974 static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, int gid_index,
2975 const union ib_gid *gid,
2976 const struct ib_gid_attr *attr)
2978 enum hns_roce_sgid_type sgid_type = GID_TYPE_FLAG_ROCE_V1;
2982 if (attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
2983 if (ipv6_addr_v4mapped((void *)gid))
2984 sgid_type = GID_TYPE_FLAG_ROCE_V2_IPV4;
2986 sgid_type = GID_TYPE_FLAG_ROCE_V2_IPV6;
2987 } else if (attr->gid_type == IB_GID_TYPE_ROCE) {
2988 sgid_type = GID_TYPE_FLAG_ROCE_V1;
2992 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
2993 ret = config_gmv_table(hr_dev, gid_index, gid, sgid_type, attr);
2995 ret = config_sgid_table(hr_dev, gid_index, gid, sgid_type);
2998 ibdev_err(&hr_dev->ib_dev, "failed to set gid, ret = %d!\n",
3004 static int hns_roce_v2_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
3007 struct hns_roce_cmq_desc desc;
3008 struct hns_roce_cfg_smac_tb *smac_tb =
3009 (struct hns_roce_cfg_smac_tb *)desc.data;
3013 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_SMAC_TB, false);
3015 reg_smac_l = *(u32 *)(&addr[0]);
3016 reg_smac_h = *(u16 *)(&addr[4]);
3018 roce_set_field(smac_tb->tb_idx_rsv, CFG_SMAC_TB_IDX_M,
3019 CFG_SMAC_TB_IDX_S, phy_port);
3020 roce_set_field(smac_tb->vf_smac_h_rsv, CFG_SMAC_TB_VF_SMAC_H_M,
3021 CFG_SMAC_TB_VF_SMAC_H_S, reg_smac_h);
3022 smac_tb->vf_smac_l = cpu_to_le32(reg_smac_l);
3024 return hns_roce_cmq_send(hr_dev, &desc, 1);
3027 static int set_mtpt_pbl(struct hns_roce_dev *hr_dev,
3028 struct hns_roce_v2_mpt_entry *mpt_entry,
3029 struct hns_roce_mr *mr)
3031 u64 pages[HNS_ROCE_V2_MAX_INNER_MTPT_NUM] = { 0 };
3032 struct ib_device *ibdev = &hr_dev->ib_dev;
3036 count = hns_roce_mtr_find(hr_dev, &mr->pbl_mtr, 0, pages,
3037 ARRAY_SIZE(pages), &pbl_ba);
3039 ibdev_err(ibdev, "failed to find PBL mtr, count = %d.\n",
3044 /* Aligned to the hardware address access unit */
3045 for (i = 0; i < count; i++)
3048 mpt_entry->pbl_size = cpu_to_le32(mr->npages);
3049 mpt_entry->pbl_ba_l = cpu_to_le32(pbl_ba >> 3);
3050 roce_set_field(mpt_entry->byte_48_mode_ba,
3051 V2_MPT_BYTE_48_PBL_BA_H_M, V2_MPT_BYTE_48_PBL_BA_H_S,
3052 upper_32_bits(pbl_ba >> 3));
3054 mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0]));
3055 roce_set_field(mpt_entry->byte_56_pa0_h, V2_MPT_BYTE_56_PA0_H_M,
3056 V2_MPT_BYTE_56_PA0_H_S, upper_32_bits(pages[0]));
3058 mpt_entry->pa1_l = cpu_to_le32(lower_32_bits(pages[1]));
3059 roce_set_field(mpt_entry->byte_64_buf_pa1, V2_MPT_BYTE_64_PA1_H_M,
3060 V2_MPT_BYTE_64_PA1_H_S, upper_32_bits(pages[1]));
3061 roce_set_field(mpt_entry->byte_64_buf_pa1,
3062 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
3063 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
3064 to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift));
3069 static int hns_roce_v2_write_mtpt(struct hns_roce_dev *hr_dev,
3070 void *mb_buf, struct hns_roce_mr *mr)
3072 struct hns_roce_v2_mpt_entry *mpt_entry;
3076 memset(mpt_entry, 0, sizeof(*mpt_entry));
3078 hr_reg_write(mpt_entry, MPT_ST, V2_MPT_ST_VALID);
3079 hr_reg_write(mpt_entry, MPT_PD, mr->pd);
3080 hr_reg_enable(mpt_entry, MPT_L_INV_EN);
3082 hr_reg_write_bool(mpt_entry, MPT_BIND_EN,
3083 mr->access & IB_ACCESS_MW_BIND);
3084 hr_reg_write_bool(mpt_entry, MPT_ATOMIC_EN,
3085 mr->access & IB_ACCESS_REMOTE_ATOMIC);
3086 hr_reg_write_bool(mpt_entry, MPT_RR_EN,
3087 mr->access & IB_ACCESS_REMOTE_READ);
3088 hr_reg_write_bool(mpt_entry, MPT_RW_EN,
3089 mr->access & IB_ACCESS_REMOTE_WRITE);
3090 hr_reg_write_bool(mpt_entry, MPT_LW_EN,
3091 mr->access & IB_ACCESS_LOCAL_WRITE);
3093 mpt_entry->len_l = cpu_to_le32(lower_32_bits(mr->size));
3094 mpt_entry->len_h = cpu_to_le32(upper_32_bits(mr->size));
3095 mpt_entry->lkey = cpu_to_le32(mr->key);
3096 mpt_entry->va_l = cpu_to_le32(lower_32_bits(mr->iova));
3097 mpt_entry->va_h = cpu_to_le32(upper_32_bits(mr->iova));
3099 if (mr->type != MR_TYPE_MR)
3100 hr_reg_enable(mpt_entry, MPT_PA);
3102 if (mr->type == MR_TYPE_DMA)
3105 if (mr->pbl_hop_num != HNS_ROCE_HOP_NUM_0)
3106 hr_reg_write(mpt_entry, MPT_PBL_HOP_NUM, mr->pbl_hop_num);
3108 hr_reg_write(mpt_entry, MPT_PBL_BA_PG_SZ,
3109 to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.ba_pg_shift));
3110 hr_reg_enable(mpt_entry, MPT_INNER_PA_VLD);
3112 ret = set_mtpt_pbl(hr_dev, mpt_entry, mr);
3117 static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
3118 struct hns_roce_mr *mr, int flags,
3121 struct hns_roce_v2_mpt_entry *mpt_entry = mb_buf;
3122 u32 mr_access_flags = mr->access;
3125 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
3126 V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_VALID);
3128 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
3129 V2_MPT_BYTE_4_PD_S, mr->pd);
3131 if (flags & IB_MR_REREG_ACCESS) {
3132 roce_set_bit(mpt_entry->byte_8_mw_cnt_en,
3133 V2_MPT_BYTE_8_BIND_EN_S,
3134 (mr_access_flags & IB_ACCESS_MW_BIND ? 1 : 0));
3135 roce_set_bit(mpt_entry->byte_8_mw_cnt_en,
3136 V2_MPT_BYTE_8_ATOMIC_EN_S,
3137 mr_access_flags & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
3138 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RR_EN_S,
3139 mr_access_flags & IB_ACCESS_REMOTE_READ ? 1 : 0);
3140 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RW_EN_S,
3141 mr_access_flags & IB_ACCESS_REMOTE_WRITE ? 1 : 0);
3142 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S,
3143 mr_access_flags & IB_ACCESS_LOCAL_WRITE ? 1 : 0);
3146 if (flags & IB_MR_REREG_TRANS) {
3147 mpt_entry->va_l = cpu_to_le32(lower_32_bits(mr->iova));
3148 mpt_entry->va_h = cpu_to_le32(upper_32_bits(mr->iova));
3149 mpt_entry->len_l = cpu_to_le32(lower_32_bits(mr->size));
3150 mpt_entry->len_h = cpu_to_le32(upper_32_bits(mr->size));
3152 ret = set_mtpt_pbl(hr_dev, mpt_entry, mr);
3158 static int hns_roce_v2_frmr_write_mtpt(struct hns_roce_dev *hr_dev,
3159 void *mb_buf, struct hns_roce_mr *mr)
3161 struct ib_device *ibdev = &hr_dev->ib_dev;
3162 struct hns_roce_v2_mpt_entry *mpt_entry;
3163 dma_addr_t pbl_ba = 0;
3166 memset(mpt_entry, 0, sizeof(*mpt_entry));
3168 if (hns_roce_mtr_find(hr_dev, &mr->pbl_mtr, 0, NULL, 0, &pbl_ba) < 0) {
3169 ibdev_err(ibdev, "failed to find frmr mtr.\n");
3173 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
3174 V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_FREE);
3175 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_HOP_NUM_M,
3176 V2_MPT_BYTE_4_PBL_HOP_NUM_S, 1);
3177 roce_set_field(mpt_entry->byte_4_pd_hop_st,
3178 V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
3179 V2_MPT_BYTE_4_PBL_BA_PG_SZ_S,
3180 to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.ba_pg_shift));
3181 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
3182 V2_MPT_BYTE_4_PD_S, mr->pd);
3184 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 1);
3185 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
3186 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
3188 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_FRE_S, 1);
3189 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, 0);
3190 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_MR_MW_S, 0);
3191 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BPD_S, 1);
3193 mpt_entry->pbl_size = cpu_to_le32(mr->npages);
3195 mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(pbl_ba >> 3));
3196 roce_set_field(mpt_entry->byte_48_mode_ba, V2_MPT_BYTE_48_PBL_BA_H_M,
3197 V2_MPT_BYTE_48_PBL_BA_H_S,
3198 upper_32_bits(pbl_ba >> 3));
3200 roce_set_field(mpt_entry->byte_64_buf_pa1,
3201 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
3202 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
3203 to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift));
3208 static int hns_roce_v2_mw_write_mtpt(void *mb_buf, struct hns_roce_mw *mw)
3210 struct hns_roce_v2_mpt_entry *mpt_entry;
3213 memset(mpt_entry, 0, sizeof(*mpt_entry));
3215 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
3216 V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_FREE);
3217 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
3218 V2_MPT_BYTE_4_PD_S, mw->pdn);
3219 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_HOP_NUM_M,
3220 V2_MPT_BYTE_4_PBL_HOP_NUM_S,
3221 mw->pbl_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 :
3223 roce_set_field(mpt_entry->byte_4_pd_hop_st,
3224 V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
3225 V2_MPT_BYTE_4_PBL_BA_PG_SZ_S,
3226 mw->pbl_ba_pg_sz + PG_SHIFT_OFFSET);
3228 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
3229 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
3230 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S, 1);
3232 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, 0);
3233 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_MR_MW_S, 1);
3234 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BPD_S, 1);
3235 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BQP_S,
3236 mw->ibmw.type == IB_MW_TYPE_1 ? 0 : 1);
3238 roce_set_field(mpt_entry->byte_64_buf_pa1,
3239 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
3240 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
3241 mw->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
3243 mpt_entry->lkey = cpu_to_le32(mw->rkey);
3248 static void *get_cqe_v2(struct hns_roce_cq *hr_cq, int n)
3250 return hns_roce_buf_offset(hr_cq->mtr.kmem, n * hr_cq->cqe_size);
3253 static void *get_sw_cqe_v2(struct hns_roce_cq *hr_cq, unsigned int n)
3255 struct hns_roce_v2_cqe *cqe = get_cqe_v2(hr_cq, n & hr_cq->ib_cq.cqe);
3257 /* Get cqe when Owner bit is Conversely with the MSB of cons_idx */
3258 return (hr_reg_read(cqe, CQE_OWNER) ^ !!(n & hr_cq->cq_depth)) ? cqe :
3262 static inline void update_cq_db(struct hns_roce_dev *hr_dev,
3263 struct hns_roce_cq *hr_cq)
3265 if (likely(hr_cq->flags & HNS_ROCE_CQ_FLAG_RECORD_DB)) {
3266 *hr_cq->set_ci_db = hr_cq->cons_index & V2_CQ_DB_CONS_IDX_M;
3268 struct hns_roce_v2_db cq_db = {};
3270 hr_reg_write(&cq_db, DB_TAG, hr_cq->cqn);
3271 hr_reg_write(&cq_db, DB_CMD, HNS_ROCE_V2_CQ_DB);
3272 hr_reg_write(&cq_db, DB_CQ_CI, hr_cq->cons_index);
3273 hr_reg_write(&cq_db, DB_CQ_CMD_SN, 1);
3275 hns_roce_write64(hr_dev, (__le32 *)&cq_db, hr_cq->db_reg);
3279 static void __hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
3280 struct hns_roce_srq *srq)
3282 struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
3283 struct hns_roce_v2_cqe *cqe, *dest;
3289 for (prod_index = hr_cq->cons_index; get_sw_cqe_v2(hr_cq, prod_index);
3291 if (prod_index > hr_cq->cons_index + hr_cq->ib_cq.cqe)
3296 * Now backwards through the CQ, removing CQ entries
3297 * that match our QP by overwriting them with next entries.
3299 while ((int) --prod_index - (int) hr_cq->cons_index >= 0) {
3300 cqe = get_cqe_v2(hr_cq, prod_index & hr_cq->ib_cq.cqe);
3301 if (hr_reg_read(cqe, CQE_LCL_QPN) == qpn) {
3302 if (srq && hr_reg_read(cqe, CQE_S_R)) {
3303 wqe_index = hr_reg_read(cqe, CQE_WQE_IDX);
3304 hns_roce_free_srq_wqe(srq, wqe_index);
3307 } else if (nfreed) {
3308 dest = get_cqe_v2(hr_cq, (prod_index + nfreed) &
3310 owner_bit = hr_reg_read(dest, CQE_OWNER);
3311 memcpy(dest, cqe, hr_cq->cqe_size);
3312 hr_reg_write(dest, CQE_OWNER, owner_bit);
3317 hr_cq->cons_index += nfreed;
3318 update_cq_db(hr_dev, hr_cq);
3322 static void hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
3323 struct hns_roce_srq *srq)
3325 spin_lock_irq(&hr_cq->lock);
3326 __hns_roce_v2_cq_clean(hr_cq, qpn, srq);
3327 spin_unlock_irq(&hr_cq->lock);
3330 static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
3331 struct hns_roce_cq *hr_cq, void *mb_buf,
3332 u64 *mtts, dma_addr_t dma_handle)
3334 struct hns_roce_v2_cq_context *cq_context;
3336 cq_context = mb_buf;
3337 memset(cq_context, 0, sizeof(*cq_context));
3339 hr_reg_write(cq_context, CQC_CQ_ST, V2_CQ_STATE_VALID);
3340 hr_reg_write(cq_context, CQC_ARM_ST, NO_ARMED);
3341 hr_reg_write(cq_context, CQC_SHIFT, ilog2(hr_cq->cq_depth));
3342 hr_reg_write(cq_context, CQC_CEQN, hr_cq->vector);
3343 hr_reg_write(cq_context, CQC_CQN, hr_cq->cqn);
3345 if (hr_cq->cqe_size == HNS_ROCE_V3_CQE_SIZE)
3346 hr_reg_write(cq_context, CQC_CQE_SIZE, CQE_SIZE_64B);
3348 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_STASH)
3349 hr_reg_enable(cq_context, CQC_STASH);
3351 hr_reg_write(cq_context, CQC_CQE_CUR_BLK_ADDR_L,
3352 to_hr_hw_page_addr(mtts[0]));
3353 hr_reg_write(cq_context, CQC_CQE_CUR_BLK_ADDR_H,
3354 upper_32_bits(to_hr_hw_page_addr(mtts[0])));
3355 hr_reg_write(cq_context, CQC_CQE_HOP_NUM, hr_dev->caps.cqe_hop_num ==
3356 HNS_ROCE_HOP_NUM_0 ? 0 : hr_dev->caps.cqe_hop_num);
3357 hr_reg_write(cq_context, CQC_CQE_NEX_BLK_ADDR_L,
3358 to_hr_hw_page_addr(mtts[1]));
3359 hr_reg_write(cq_context, CQC_CQE_NEX_BLK_ADDR_H,
3360 upper_32_bits(to_hr_hw_page_addr(mtts[1])));
3361 hr_reg_write(cq_context, CQC_CQE_BAR_PG_SZ,
3362 to_hr_hw_page_shift(hr_cq->mtr.hem_cfg.ba_pg_shift));
3363 hr_reg_write(cq_context, CQC_CQE_BUF_PG_SZ,
3364 to_hr_hw_page_shift(hr_cq->mtr.hem_cfg.buf_pg_shift));
3365 hr_reg_write(cq_context, CQC_CQE_BA_L, dma_handle >> 3);
3366 hr_reg_write(cq_context, CQC_CQE_BA_H, (dma_handle >> (32 + 3)));
3367 hr_reg_write_bool(cq_context, CQC_DB_RECORD_EN,
3368 hr_cq->flags & HNS_ROCE_CQ_FLAG_RECORD_DB);
3369 hr_reg_write(cq_context, CQC_CQE_DB_RECORD_ADDR_L,
3370 ((u32)hr_cq->db.dma) >> 1);
3371 hr_reg_write(cq_context, CQC_CQE_DB_RECORD_ADDR_H,
3372 hr_cq->db.dma >> 32);
3373 hr_reg_write(cq_context, CQC_CQ_MAX_CNT,
3374 HNS_ROCE_V2_CQ_DEFAULT_BURST_NUM);
3375 hr_reg_write(cq_context, CQC_CQ_PERIOD,
3376 HNS_ROCE_V2_CQ_DEFAULT_INTERVAL);
3379 static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq,
3380 enum ib_cq_notify_flags flags)
3382 struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device);
3383 struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
3384 struct hns_roce_v2_db cq_db = {};
3388 * flags = 0, then notify_flag : next
3389 * flags = 1, then notify flag : solocited
3391 notify_flag = (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
3392 V2_CQ_DB_REQ_NOT : V2_CQ_DB_REQ_NOT_SOL;
3394 hr_reg_write(&cq_db, DB_TAG, hr_cq->cqn);
3395 hr_reg_write(&cq_db, DB_CMD, HNS_ROCE_V2_CQ_DB_NOTIFY);
3396 hr_reg_write(&cq_db, DB_CQ_CI, hr_cq->cons_index);
3397 hr_reg_write(&cq_db, DB_CQ_CMD_SN, hr_cq->arm_sn);
3398 hr_reg_write(&cq_db, DB_CQ_NOTIFY, notify_flag);
3400 hns_roce_write64(hr_dev, (__le32 *)&cq_db, hr_cq->db_reg);
3405 static int hns_roce_handle_recv_inl_wqe(struct hns_roce_v2_cqe *cqe,
3406 struct hns_roce_qp *qp,
3409 struct hns_roce_rinl_sge *sge_list;
3410 u32 wr_num, wr_cnt, sge_num;
3411 u32 sge_cnt, data_len, size;
3414 wr_num = hr_reg_read(cqe, CQE_WQE_IDX);
3415 wr_cnt = wr_num & (qp->rq.wqe_cnt - 1);
3417 sge_list = qp->rq_inl_buf.wqe_list[wr_cnt].sg_list;
3418 sge_num = qp->rq_inl_buf.wqe_list[wr_cnt].sge_cnt;
3419 wqe_buf = hns_roce_get_recv_wqe(qp, wr_cnt);
3420 data_len = wc->byte_len;
3422 for (sge_cnt = 0; (sge_cnt < sge_num) && (data_len); sge_cnt++) {
3423 size = min(sge_list[sge_cnt].len, data_len);
3424 memcpy((void *)sge_list[sge_cnt].addr, wqe_buf, size);
3430 if (unlikely(data_len)) {
3431 wc->status = IB_WC_LOC_LEN_ERR;
3438 static int sw_comp(struct hns_roce_qp *hr_qp, struct hns_roce_wq *wq,
3439 int num_entries, struct ib_wc *wc)
3444 left = wq->head - wq->tail;
3448 left = min_t(unsigned int, (unsigned int)num_entries, left);
3449 while (npolled < left) {
3450 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
3451 wc->status = IB_WC_WR_FLUSH_ERR;
3453 wc->qp = &hr_qp->ibqp;
3463 static int hns_roce_v2_sw_poll_cq(struct hns_roce_cq *hr_cq, int num_entries,
3466 struct hns_roce_qp *hr_qp;
3469 list_for_each_entry(hr_qp, &hr_cq->sq_list, sq_node) {
3470 npolled += sw_comp(hr_qp, &hr_qp->sq,
3471 num_entries - npolled, wc + npolled);
3472 if (npolled >= num_entries)
3476 list_for_each_entry(hr_qp, &hr_cq->rq_list, rq_node) {
3477 npolled += sw_comp(hr_qp, &hr_qp->rq,
3478 num_entries - npolled, wc + npolled);
3479 if (npolled >= num_entries)
3487 static void get_cqe_status(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
3488 struct hns_roce_cq *cq, struct hns_roce_v2_cqe *cqe,
3491 static const struct {
3493 enum ib_wc_status wc_status;
3495 { HNS_ROCE_CQE_V2_SUCCESS, IB_WC_SUCCESS },
3496 { HNS_ROCE_CQE_V2_LOCAL_LENGTH_ERR, IB_WC_LOC_LEN_ERR },
3497 { HNS_ROCE_CQE_V2_LOCAL_QP_OP_ERR, IB_WC_LOC_QP_OP_ERR },
3498 { HNS_ROCE_CQE_V2_LOCAL_PROT_ERR, IB_WC_LOC_PROT_ERR },
3499 { HNS_ROCE_CQE_V2_WR_FLUSH_ERR, IB_WC_WR_FLUSH_ERR },
3500 { HNS_ROCE_CQE_V2_MW_BIND_ERR, IB_WC_MW_BIND_ERR },
3501 { HNS_ROCE_CQE_V2_BAD_RESP_ERR, IB_WC_BAD_RESP_ERR },
3502 { HNS_ROCE_CQE_V2_LOCAL_ACCESS_ERR, IB_WC_LOC_ACCESS_ERR },
3503 { HNS_ROCE_CQE_V2_REMOTE_INVAL_REQ_ERR, IB_WC_REM_INV_REQ_ERR },
3504 { HNS_ROCE_CQE_V2_REMOTE_ACCESS_ERR, IB_WC_REM_ACCESS_ERR },
3505 { HNS_ROCE_CQE_V2_REMOTE_OP_ERR, IB_WC_REM_OP_ERR },
3506 { HNS_ROCE_CQE_V2_TRANSPORT_RETRY_EXC_ERR,
3507 IB_WC_RETRY_EXC_ERR },
3508 { HNS_ROCE_CQE_V2_RNR_RETRY_EXC_ERR, IB_WC_RNR_RETRY_EXC_ERR },
3509 { HNS_ROCE_CQE_V2_REMOTE_ABORT_ERR, IB_WC_REM_ABORT_ERR },
3510 { HNS_ROCE_CQE_V2_GENERAL_ERR, IB_WC_GENERAL_ERR}
3513 u32 cqe_status = hr_reg_read(cqe, CQE_STATUS);
3516 wc->status = IB_WC_GENERAL_ERR;
3517 for (i = 0; i < ARRAY_SIZE(map); i++)
3518 if (cqe_status == map[i].cqe_status) {
3519 wc->status = map[i].wc_status;
3523 if (likely(wc->status == IB_WC_SUCCESS ||
3524 wc->status == IB_WC_WR_FLUSH_ERR))
3527 ibdev_err(&hr_dev->ib_dev, "error cqe status 0x%x:\n", cqe_status);
3528 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 4, cqe,
3529 cq->cqe_size, false);
3530 wc->vendor_err = hr_reg_read(cqe, CQE_SUB_STATUS);
3533 * For hns ROCEE, GENERAL_ERR is an error type that is not defined in
3534 * the standard protocol, the driver must ignore it and needn't to set
3535 * the QP to an error state.
3537 if (cqe_status == HNS_ROCE_CQE_V2_GENERAL_ERR)
3540 flush_cqe(hr_dev, qp);
3543 static int get_cur_qp(struct hns_roce_cq *hr_cq, struct hns_roce_v2_cqe *cqe,
3544 struct hns_roce_qp **cur_qp)
3546 struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
3547 struct hns_roce_qp *hr_qp = *cur_qp;
3550 qpn = hr_reg_read(cqe, CQE_LCL_QPN);
3552 if (!hr_qp || qpn != hr_qp->qpn) {
3553 hr_qp = __hns_roce_qp_lookup(hr_dev, qpn);
3554 if (unlikely(!hr_qp)) {
3555 ibdev_err(&hr_dev->ib_dev,
3556 "CQ %06lx with entry for unknown QPN %06x\n",
3567 * mapped-value = 1 + real-value
3568 * The ib wc opcode's real value is start from 0, In order to distinguish
3569 * between initialized and uninitialized map values, we plus 1 to the actual
3570 * value when defining the mapping, so that the validity can be identified by
3571 * checking whether the mapped value is greater than 0.
3573 #define HR_WC_OP_MAP(hr_key, ib_key) \
3574 [HNS_ROCE_V2_WQE_OP_ ## hr_key] = 1 + IB_WC_ ## ib_key
3576 static const u32 wc_send_op_map[] = {
3577 HR_WC_OP_MAP(SEND, SEND),
3578 HR_WC_OP_MAP(SEND_WITH_INV, SEND),
3579 HR_WC_OP_MAP(SEND_WITH_IMM, SEND),
3580 HR_WC_OP_MAP(RDMA_READ, RDMA_READ),
3581 HR_WC_OP_MAP(RDMA_WRITE, RDMA_WRITE),
3582 HR_WC_OP_MAP(RDMA_WRITE_WITH_IMM, RDMA_WRITE),
3583 HR_WC_OP_MAP(LOCAL_INV, LOCAL_INV),
3584 HR_WC_OP_MAP(ATOM_CMP_AND_SWAP, COMP_SWAP),
3585 HR_WC_OP_MAP(ATOM_FETCH_AND_ADD, FETCH_ADD),
3586 HR_WC_OP_MAP(ATOM_MSK_CMP_AND_SWAP, MASKED_COMP_SWAP),
3587 HR_WC_OP_MAP(ATOM_MSK_FETCH_AND_ADD, MASKED_FETCH_ADD),
3588 HR_WC_OP_MAP(FAST_REG_PMR, REG_MR),
3589 HR_WC_OP_MAP(BIND_MW, REG_MR),
3592 static int to_ib_wc_send_op(u32 hr_opcode)
3594 if (hr_opcode >= ARRAY_SIZE(wc_send_op_map))
3597 return wc_send_op_map[hr_opcode] ? wc_send_op_map[hr_opcode] - 1 :
3601 static const u32 wc_recv_op_map[] = {
3602 HR_WC_OP_MAP(RDMA_WRITE_WITH_IMM, WITH_IMM),
3603 HR_WC_OP_MAP(SEND, RECV),
3604 HR_WC_OP_MAP(SEND_WITH_IMM, WITH_IMM),
3605 HR_WC_OP_MAP(SEND_WITH_INV, RECV),
3608 static int to_ib_wc_recv_op(u32 hr_opcode)
3610 if (hr_opcode >= ARRAY_SIZE(wc_recv_op_map))
3613 return wc_recv_op_map[hr_opcode] ? wc_recv_op_map[hr_opcode] - 1 :
3617 static void fill_send_wc(struct ib_wc *wc, struct hns_roce_v2_cqe *cqe)
3624 hr_opcode = hr_reg_read(cqe, CQE_OPCODE);
3625 switch (hr_opcode) {
3626 case HNS_ROCE_V2_WQE_OP_RDMA_READ:
3627 wc->byte_len = le32_to_cpu(cqe->byte_cnt);
3629 case HNS_ROCE_V2_WQE_OP_SEND_WITH_IMM:
3630 case HNS_ROCE_V2_WQE_OP_RDMA_WRITE_WITH_IMM:
3631 wc->wc_flags |= IB_WC_WITH_IMM;
3633 case HNS_ROCE_V2_WQE_OP_LOCAL_INV:
3634 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
3636 case HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP:
3637 case HNS_ROCE_V2_WQE_OP_ATOM_FETCH_AND_ADD:
3638 case HNS_ROCE_V2_WQE_OP_ATOM_MSK_CMP_AND_SWAP:
3639 case HNS_ROCE_V2_WQE_OP_ATOM_MSK_FETCH_AND_ADD:
3646 ib_opcode = to_ib_wc_send_op(hr_opcode);
3648 wc->status = IB_WC_GENERAL_ERR;
3650 wc->opcode = ib_opcode;
3653 static inline bool is_rq_inl_enabled(struct ib_wc *wc, u32 hr_opcode,
3654 struct hns_roce_v2_cqe *cqe)
3656 return wc->qp->qp_type != IB_QPT_UD && wc->qp->qp_type != IB_QPT_GSI &&
3657 (hr_opcode == HNS_ROCE_V2_OPCODE_SEND ||
3658 hr_opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_IMM ||
3659 hr_opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_INV) &&
3660 hr_reg_read(cqe, CQE_RQ_INLINE);
3663 static int fill_recv_wc(struct ib_wc *wc, struct hns_roce_v2_cqe *cqe)
3665 struct hns_roce_qp *qp = to_hr_qp(wc->qp);
3670 wc->byte_len = le32_to_cpu(cqe->byte_cnt);
3672 hr_opcode = hr_reg_read(cqe, CQE_OPCODE);
3673 switch (hr_opcode) {
3674 case HNS_ROCE_V2_OPCODE_RDMA_WRITE_IMM:
3675 case HNS_ROCE_V2_OPCODE_SEND_WITH_IMM:
3676 wc->wc_flags = IB_WC_WITH_IMM;
3677 wc->ex.imm_data = cpu_to_be32(le32_to_cpu(cqe->immtdata));
3679 case HNS_ROCE_V2_OPCODE_SEND_WITH_INV:
3680 wc->wc_flags = IB_WC_WITH_INVALIDATE;
3681 wc->ex.invalidate_rkey = le32_to_cpu(cqe->rkey);
3687 ib_opcode = to_ib_wc_recv_op(hr_opcode);
3689 wc->status = IB_WC_GENERAL_ERR;
3691 wc->opcode = ib_opcode;
3693 if (is_rq_inl_enabled(wc, hr_opcode, cqe)) {
3694 ret = hns_roce_handle_recv_inl_wqe(cqe, qp, wc);
3699 wc->sl = hr_reg_read(cqe, CQE_SL);
3700 wc->src_qp = hr_reg_read(cqe, CQE_RMT_QPN);
3702 wc->wc_flags |= hr_reg_read(cqe, CQE_GRH) ? IB_WC_GRH : 0;
3703 wc->port_num = hr_reg_read(cqe, CQE_PORTN);
3706 if (hr_reg_read(cqe, CQE_VID_VLD)) {
3707 wc->vlan_id = hr_reg_read(cqe, CQE_VID);
3708 wc->wc_flags |= IB_WC_WITH_VLAN;
3710 wc->vlan_id = 0xffff;
3713 wc->network_hdr_type = hr_reg_read(cqe, CQE_PORT_TYPE);
3718 static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
3719 struct hns_roce_qp **cur_qp, struct ib_wc *wc)
3721 struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
3722 struct hns_roce_qp *qp = *cur_qp;
3723 struct hns_roce_srq *srq = NULL;
3724 struct hns_roce_v2_cqe *cqe;
3725 struct hns_roce_wq *wq;
3730 cqe = get_sw_cqe_v2(hr_cq, hr_cq->cons_index);
3734 ++hr_cq->cons_index;
3735 /* Memory barrier */
3738 ret = get_cur_qp(hr_cq, cqe, &qp);
3745 wqe_idx = hr_reg_read(cqe, CQE_WQE_IDX);
3747 is_send = !hr_reg_read(cqe, CQE_S_R);
3751 /* If sg_signal_bit is set, tail pointer will be updated to
3752 * the WQE corresponding to the current CQE.
3754 if (qp->sq_signal_bits)
3755 wq->tail += (wqe_idx - (u16)wq->tail) &
3758 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
3761 fill_send_wc(wc, cqe);
3764 srq = to_hr_srq(qp->ibqp.srq);
3765 wc->wr_id = srq->wrid[wqe_idx];
3766 hns_roce_free_srq_wqe(srq, wqe_idx);
3769 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
3773 ret = fill_recv_wc(wc, cqe);
3776 get_cqe_status(hr_dev, qp, hr_cq, cqe, wc);
3777 if (unlikely(wc->status != IB_WC_SUCCESS))
3783 static int hns_roce_v2_poll_cq(struct ib_cq *ibcq, int num_entries,
3786 struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device);
3787 struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
3788 struct hns_roce_qp *cur_qp = NULL;
3789 unsigned long flags;
3792 spin_lock_irqsave(&hr_cq->lock, flags);
3795 * When the device starts to reset, the state is RST_DOWN. At this time,
3796 * there may still be some valid CQEs in the hardware that are not
3797 * polled. Therefore, it is not allowed to switch to the software mode
3798 * immediately. When the state changes to UNINIT, CQE no longer exists
3799 * in the hardware, and then switch to software mode.
3801 if (hr_dev->state == HNS_ROCE_DEVICE_STATE_UNINIT) {
3802 npolled = hns_roce_v2_sw_poll_cq(hr_cq, num_entries, wc);
3806 for (npolled = 0; npolled < num_entries; ++npolled) {
3807 if (hns_roce_v2_poll_one(hr_cq, &cur_qp, wc + npolled))
3812 update_cq_db(hr_dev, hr_cq);
3815 spin_unlock_irqrestore(&hr_cq->lock, flags);
3820 static int get_op_for_set_hem(struct hns_roce_dev *hr_dev, u32 type,
3821 int step_idx, u16 *mbox_op)
3827 op = HNS_ROCE_CMD_WRITE_QPC_BT0;
3830 op = HNS_ROCE_CMD_WRITE_MPT_BT0;
3833 op = HNS_ROCE_CMD_WRITE_CQC_BT0;
3836 op = HNS_ROCE_CMD_WRITE_SRQC_BT0;
3839 op = HNS_ROCE_CMD_WRITE_SCCC_BT0;
3841 case HEM_TYPE_QPC_TIMER:
3842 op = HNS_ROCE_CMD_WRITE_QPC_TIMER_BT0;
3844 case HEM_TYPE_CQC_TIMER:
3845 op = HNS_ROCE_CMD_WRITE_CQC_TIMER_BT0;
3848 dev_warn(hr_dev->dev, "failed to check hem type %u.\n", type);
3852 *mbox_op = op + step_idx;
3857 static int config_gmv_ba_to_hw(struct hns_roce_dev *hr_dev, unsigned long obj,
3858 dma_addr_t base_addr)
3860 struct hns_roce_cmq_desc desc;
3861 struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
3862 u32 idx = obj / (HNS_HW_PAGE_SIZE / hr_dev->caps.gmv_entry_sz);
3863 u64 addr = to_hr_hw_page_addr(base_addr);
3865 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GMV_BT, false);
3867 hr_reg_write(req, CFG_GMV_BT_BA_L, lower_32_bits(addr));
3868 hr_reg_write(req, CFG_GMV_BT_BA_H, upper_32_bits(addr));
3869 hr_reg_write(req, CFG_GMV_BT_IDX, idx);
3871 return hns_roce_cmq_send(hr_dev, &desc, 1);
3874 static int set_hem_to_hw(struct hns_roce_dev *hr_dev, int obj,
3875 dma_addr_t base_addr, u32 hem_type, int step_idx)
3880 if (unlikely(hem_type == HEM_TYPE_GMV))
3881 return config_gmv_ba_to_hw(hr_dev, obj, base_addr);
3883 if (unlikely(hem_type == HEM_TYPE_SCCC && step_idx))
3886 ret = get_op_for_set_hem(hr_dev, hem_type, step_idx, &op);
3890 return config_hem_ba_to_hw(hr_dev, obj, base_addr, op);
3893 static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev,
3894 struct hns_roce_hem_table *table, int obj,
3897 struct hns_roce_hem_iter iter;
3898 struct hns_roce_hem_mhop mhop;
3899 struct hns_roce_hem *hem;
3900 unsigned long mhop_obj = obj;
3909 if (!hns_roce_check_whether_mhop(hr_dev, table->type))
3912 hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop);
3916 hop_num = mhop.hop_num;
3917 chunk_ba_num = mhop.bt_chunk_size / 8;
3920 hem_idx = i * chunk_ba_num * chunk_ba_num + j * chunk_ba_num +
3922 l1_idx = i * chunk_ba_num + j;
3923 } else if (hop_num == 1) {
3924 hem_idx = i * chunk_ba_num + j;
3925 } else if (hop_num == HNS_ROCE_HOP_NUM_0) {
3929 if (table->type == HEM_TYPE_SCCC)
3932 if (check_whether_last_step(hop_num, step_idx)) {
3933 hem = table->hem[hem_idx];
3934 for (hns_roce_hem_first(hem, &iter);
3935 !hns_roce_hem_last(&iter); hns_roce_hem_next(&iter)) {
3936 bt_ba = hns_roce_hem_addr(&iter);
3937 ret = set_hem_to_hw(hr_dev, obj, bt_ba, table->type,
3942 bt_ba = table->bt_l0_dma_addr[i];
3943 else if (step_idx == 1 && hop_num == 2)
3944 bt_ba = table->bt_l1_dma_addr[l1_idx];
3946 ret = set_hem_to_hw(hr_dev, obj, bt_ba, table->type, step_idx);
3952 static int hns_roce_v2_clear_hem(struct hns_roce_dev *hr_dev,
3953 struct hns_roce_hem_table *table, int obj,
3956 struct device *dev = hr_dev->dev;
3957 struct hns_roce_cmd_mailbox *mailbox;
3961 if (!hns_roce_check_whether_mhop(hr_dev, table->type))
3964 switch (table->type) {
3966 op = HNS_ROCE_CMD_DESTROY_QPC_BT0;
3969 op = HNS_ROCE_CMD_DESTROY_MPT_BT0;
3972 op = HNS_ROCE_CMD_DESTROY_CQC_BT0;
3975 op = HNS_ROCE_CMD_DESTROY_SRQC_BT0;
3978 case HEM_TYPE_QPC_TIMER:
3979 case HEM_TYPE_CQC_TIMER:
3983 dev_warn(dev, "table %u not to be destroyed by mailbox!\n",
3990 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
3991 if (IS_ERR(mailbox))
3992 return PTR_ERR(mailbox);
3994 /* configure the tag and op */
3995 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, obj, 0, op,
3996 HNS_ROCE_CMD_TIMEOUT_MSECS);
3998 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
4002 static int hns_roce_v2_qp_modify(struct hns_roce_dev *hr_dev,
4003 struct hns_roce_v2_qp_context *context,
4004 struct hns_roce_v2_qp_context *qpc_mask,
4005 struct hns_roce_qp *hr_qp)
4007 struct hns_roce_cmd_mailbox *mailbox;
4011 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
4012 if (IS_ERR(mailbox))
4013 return PTR_ERR(mailbox);
4015 /* The qpc size of HIP08 is only 256B, which is half of HIP09 */
4016 qpc_size = hr_dev->caps.qpc_sz;
4017 memcpy(mailbox->buf, context, qpc_size);
4018 memcpy(mailbox->buf + qpc_size, qpc_mask, qpc_size);
4020 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_qp->qpn, 0,
4021 HNS_ROCE_CMD_MODIFY_QPC,
4022 HNS_ROCE_CMD_TIMEOUT_MSECS);
4024 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
4029 static void set_access_flags(struct hns_roce_qp *hr_qp,
4030 struct hns_roce_v2_qp_context *context,
4031 struct hns_roce_v2_qp_context *qpc_mask,
4032 const struct ib_qp_attr *attr, int attr_mask)
4037 dest_rd_atomic = (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) ?
4038 attr->max_dest_rd_atomic : hr_qp->resp_depth;
4040 access_flags = (attr_mask & IB_QP_ACCESS_FLAGS) ?
4041 attr->qp_access_flags : hr_qp->atomic_rd_en;
4043 if (!dest_rd_atomic)
4044 access_flags &= IB_ACCESS_REMOTE_WRITE;
4046 hr_reg_write_bool(context, QPC_RRE,
4047 access_flags & IB_ACCESS_REMOTE_READ);
4048 hr_reg_clear(qpc_mask, QPC_RRE);
4050 hr_reg_write_bool(context, QPC_RWE,
4051 access_flags & IB_ACCESS_REMOTE_WRITE);
4052 hr_reg_clear(qpc_mask, QPC_RWE);
4054 hr_reg_write_bool(context, QPC_ATE,
4055 access_flags & IB_ACCESS_REMOTE_ATOMIC);
4056 hr_reg_clear(qpc_mask, QPC_ATE);
4057 hr_reg_write_bool(context, QPC_EXT_ATE,
4058 access_flags & IB_ACCESS_REMOTE_ATOMIC);
4059 hr_reg_clear(qpc_mask, QPC_EXT_ATE);
4062 static void set_qpc_wqe_cnt(struct hns_roce_qp *hr_qp,
4063 struct hns_roce_v2_qp_context *context,
4064 struct hns_roce_v2_qp_context *qpc_mask)
4066 hr_reg_write(context, QPC_SGE_SHIFT,
4067 to_hr_hem_entries_shift(hr_qp->sge.sge_cnt,
4068 hr_qp->sge.sge_shift));
4070 hr_reg_write(context, QPC_SQ_SHIFT, ilog2(hr_qp->sq.wqe_cnt));
4072 hr_reg_write(context, QPC_RQ_SHIFT, ilog2(hr_qp->rq.wqe_cnt));
4075 static inline int get_cqn(struct ib_cq *ib_cq)
4077 return ib_cq ? to_hr_cq(ib_cq)->cqn : 0;
4080 static inline int get_pdn(struct ib_pd *ib_pd)
4082 return ib_pd ? to_hr_pd(ib_pd)->pdn : 0;
4085 static void modify_qp_reset_to_init(struct ib_qp *ibqp,
4086 const struct ib_qp_attr *attr,
4088 struct hns_roce_v2_qp_context *context,
4089 struct hns_roce_v2_qp_context *qpc_mask)
4091 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4092 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4095 * In v2 engine, software pass context and context mask to hardware
4096 * when modifying qp. If software need modify some fields in context,
4097 * we should set all bits of the relevant fields in context mask to
4098 * 0 at the same time, else set them to 0x1.
4100 hr_reg_write(context, QPC_TST, to_hr_qp_type(ibqp->qp_type));
4102 hr_reg_write(context, QPC_PD, get_pdn(ibqp->pd));
4104 hr_reg_write(context, QPC_RQWS, ilog2(hr_qp->rq.max_gs));
4106 set_qpc_wqe_cnt(hr_qp, context, qpc_mask);
4108 /* No VLAN need to set 0xFFF */
4109 hr_reg_write(context, QPC_VLAN_ID, 0xfff);
4111 if (ibqp->qp_type == IB_QPT_XRC_TGT) {
4112 context->qkey_xrcd = cpu_to_le32(hr_qp->xrcdn);
4114 hr_reg_enable(context, QPC_XRC_QP_TYPE);
4117 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)
4118 hr_reg_enable(context, QPC_RQ_RECORD_EN);
4120 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_OWNER_DB)
4121 hr_reg_enable(context, QPC_OWNER_MODE);
4123 hr_reg_write(context, QPC_RQ_DB_RECORD_ADDR_L,
4124 lower_32_bits(hr_qp->rdb.dma) >> 1);
4125 hr_reg_write(context, QPC_RQ_DB_RECORD_ADDR_H,
4126 upper_32_bits(hr_qp->rdb.dma));
4128 if (ibqp->qp_type != IB_QPT_UD && ibqp->qp_type != IB_QPT_GSI)
4129 hr_reg_write_bool(context, QPC_RQIE,
4130 hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE);
4132 hr_reg_write(context, QPC_RX_CQN, get_cqn(ibqp->recv_cq));
4135 hr_reg_enable(context, QPC_SRQ_EN);
4136 hr_reg_write(context, QPC_SRQN, to_hr_srq(ibqp->srq)->srqn);
4139 hr_reg_enable(context, QPC_FRE);
4141 hr_reg_write(context, QPC_TX_CQN, get_cqn(ibqp->send_cq));
4143 if (hr_dev->caps.qpc_sz < HNS_ROCE_V3_QPC_SZ)
4146 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_STASH)
4147 hr_reg_enable(&context->ext, QPCEX_STASH);
4150 static void modify_qp_init_to_init(struct ib_qp *ibqp,
4151 const struct ib_qp_attr *attr, int attr_mask,
4152 struct hns_roce_v2_qp_context *context,
4153 struct hns_roce_v2_qp_context *qpc_mask)
4156 * In v2 engine, software pass context and context mask to hardware
4157 * when modifying qp. If software need modify some fields in context,
4158 * we should set all bits of the relevant fields in context mask to
4159 * 0 at the same time, else set them to 0x1.
4161 hr_reg_write(context, QPC_TST, to_hr_qp_type(ibqp->qp_type));
4162 hr_reg_clear(qpc_mask, QPC_TST);
4164 hr_reg_write(context, QPC_PD, get_pdn(ibqp->pd));
4165 hr_reg_clear(qpc_mask, QPC_PD);
4167 hr_reg_write(context, QPC_RX_CQN, get_cqn(ibqp->recv_cq));
4168 hr_reg_clear(qpc_mask, QPC_RX_CQN);
4170 hr_reg_write(context, QPC_TX_CQN, get_cqn(ibqp->send_cq));
4171 hr_reg_clear(qpc_mask, QPC_TX_CQN);
4174 hr_reg_enable(context, QPC_SRQ_EN);
4175 hr_reg_clear(qpc_mask, QPC_SRQ_EN);
4176 hr_reg_write(context, QPC_SRQN, to_hr_srq(ibqp->srq)->srqn);
4177 hr_reg_clear(qpc_mask, QPC_SRQN);
4181 static int config_qp_rq_buf(struct hns_roce_dev *hr_dev,
4182 struct hns_roce_qp *hr_qp,
4183 struct hns_roce_v2_qp_context *context,
4184 struct hns_roce_v2_qp_context *qpc_mask)
4186 u64 mtts[MTT_MIN_COUNT] = { 0 };
4190 /* Search qp buf's mtts */
4191 count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, hr_qp->rq.offset, mtts,
4192 MTT_MIN_COUNT, &wqe_sge_ba);
4193 if (hr_qp->rq.wqe_cnt && count < 1) {
4194 ibdev_err(&hr_dev->ib_dev,
4195 "failed to find RQ WQE, QPN = 0x%lx.\n", hr_qp->qpn);
4199 context->wqe_sge_ba = cpu_to_le32(wqe_sge_ba >> 3);
4200 qpc_mask->wqe_sge_ba = 0;
4203 * In v2 engine, software pass context and context mask to hardware
4204 * when modifying qp. If software need modify some fields in context,
4205 * we should set all bits of the relevant fields in context mask to
4206 * 0 at the same time, else set them to 0x1.
4208 hr_reg_write(context, QPC_WQE_SGE_BA_H, wqe_sge_ba >> (32 + 3));
4209 hr_reg_clear(qpc_mask, QPC_WQE_SGE_BA_H);
4211 hr_reg_write(context, QPC_SQ_HOP_NUM,
4212 to_hr_hem_hopnum(hr_dev->caps.wqe_sq_hop_num,
4213 hr_qp->sq.wqe_cnt));
4214 hr_reg_clear(qpc_mask, QPC_SQ_HOP_NUM);
4216 hr_reg_write(context, QPC_SGE_HOP_NUM,
4217 to_hr_hem_hopnum(hr_dev->caps.wqe_sge_hop_num,
4218 hr_qp->sge.sge_cnt));
4219 hr_reg_clear(qpc_mask, QPC_SGE_HOP_NUM);
4221 hr_reg_write(context, QPC_RQ_HOP_NUM,
4222 to_hr_hem_hopnum(hr_dev->caps.wqe_rq_hop_num,
4223 hr_qp->rq.wqe_cnt));
4225 hr_reg_clear(qpc_mask, QPC_RQ_HOP_NUM);
4227 hr_reg_write(context, QPC_WQE_SGE_BA_PG_SZ,
4228 to_hr_hw_page_shift(hr_qp->mtr.hem_cfg.ba_pg_shift));
4229 hr_reg_clear(qpc_mask, QPC_WQE_SGE_BA_PG_SZ);
4231 hr_reg_write(context, QPC_WQE_SGE_BUF_PG_SZ,
4232 to_hr_hw_page_shift(hr_qp->mtr.hem_cfg.buf_pg_shift));
4233 hr_reg_clear(qpc_mask, QPC_WQE_SGE_BUF_PG_SZ);
4235 context->rq_cur_blk_addr = cpu_to_le32(to_hr_hw_page_addr(mtts[0]));
4236 qpc_mask->rq_cur_blk_addr = 0;
4238 hr_reg_write(context, QPC_RQ_CUR_BLK_ADDR_H,
4239 upper_32_bits(to_hr_hw_page_addr(mtts[0])));
4240 hr_reg_clear(qpc_mask, QPC_RQ_CUR_BLK_ADDR_H);
4242 context->rq_nxt_blk_addr = cpu_to_le32(to_hr_hw_page_addr(mtts[1]));
4243 qpc_mask->rq_nxt_blk_addr = 0;
4245 hr_reg_write(context, QPC_RQ_NXT_BLK_ADDR_H,
4246 upper_32_bits(to_hr_hw_page_addr(mtts[1])));
4247 hr_reg_clear(qpc_mask, QPC_RQ_NXT_BLK_ADDR_H);
4252 static int config_qp_sq_buf(struct hns_roce_dev *hr_dev,
4253 struct hns_roce_qp *hr_qp,
4254 struct hns_roce_v2_qp_context *context,
4255 struct hns_roce_v2_qp_context *qpc_mask)
4257 struct ib_device *ibdev = &hr_dev->ib_dev;
4258 u64 sge_cur_blk = 0;
4262 /* search qp buf's mtts */
4263 count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, 0, &sq_cur_blk, 1, NULL);
4265 ibdev_err(ibdev, "failed to find QP(0x%lx) SQ buf.\n",
4269 if (hr_qp->sge.sge_cnt > 0) {
4270 count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr,
4272 &sge_cur_blk, 1, NULL);
4274 ibdev_err(ibdev, "failed to find QP(0x%lx) SGE buf.\n",
4281 * In v2 engine, software pass context and context mask to hardware
4282 * when modifying qp. If software need modify some fields in context,
4283 * we should set all bits of the relevant fields in context mask to
4284 * 0 at the same time, else set them to 0x1.
4286 hr_reg_write(context, QPC_SQ_CUR_BLK_ADDR_L,
4287 lower_32_bits(to_hr_hw_page_addr(sq_cur_blk)));
4288 hr_reg_write(context, QPC_SQ_CUR_BLK_ADDR_H,
4289 upper_32_bits(to_hr_hw_page_addr(sq_cur_blk)));
4290 hr_reg_clear(qpc_mask, QPC_SQ_CUR_BLK_ADDR_L);
4291 hr_reg_clear(qpc_mask, QPC_SQ_CUR_BLK_ADDR_H);
4293 hr_reg_write(context, QPC_SQ_CUR_SGE_BLK_ADDR_L,
4294 lower_32_bits(to_hr_hw_page_addr(sge_cur_blk)));
4295 hr_reg_write(context, QPC_SQ_CUR_SGE_BLK_ADDR_H,
4296 upper_32_bits(to_hr_hw_page_addr(sge_cur_blk)));
4297 hr_reg_clear(qpc_mask, QPC_SQ_CUR_SGE_BLK_ADDR_L);
4298 hr_reg_clear(qpc_mask, QPC_SQ_CUR_SGE_BLK_ADDR_H);
4300 hr_reg_write(context, QPC_RX_SQ_CUR_BLK_ADDR_L,
4301 lower_32_bits(to_hr_hw_page_addr(sq_cur_blk)));
4302 hr_reg_write(context, QPC_RX_SQ_CUR_BLK_ADDR_H,
4303 upper_32_bits(to_hr_hw_page_addr(sq_cur_blk)));
4304 hr_reg_clear(qpc_mask, QPC_RX_SQ_CUR_BLK_ADDR_L);
4305 hr_reg_clear(qpc_mask, QPC_RX_SQ_CUR_BLK_ADDR_H);
4310 static inline enum ib_mtu get_mtu(struct ib_qp *ibqp,
4311 const struct ib_qp_attr *attr)
4313 if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_UD)
4316 return attr->path_mtu;
4319 static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
4320 const struct ib_qp_attr *attr, int attr_mask,
4321 struct hns_roce_v2_qp_context *context,
4322 struct hns_roce_v2_qp_context *qpc_mask)
4324 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4325 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4326 struct ib_device *ibdev = &hr_dev->ib_dev;
4338 ret = config_qp_rq_buf(hr_dev, hr_qp, context, qpc_mask);
4340 ibdev_err(ibdev, "failed to config rq buf, ret = %d.\n", ret);
4344 /* Search IRRL's mtts */
4345 mtts = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table,
4346 hr_qp->qpn, &irrl_ba);
4348 ibdev_err(ibdev, "failed to find qp irrl_table.\n");
4352 /* Search TRRL's mtts */
4353 mtts = hns_roce_table_find(hr_dev, &hr_dev->qp_table.trrl_table,
4354 hr_qp->qpn, &trrl_ba);
4356 ibdev_err(ibdev, "failed to find qp trrl_table.\n");
4360 if (attr_mask & IB_QP_ALT_PATH) {
4361 ibdev_err(ibdev, "INIT2RTR attr_mask (0x%x) error.\n",
4366 hr_reg_write(context, QPC_TRRL_BA_L, trrl_ba >> 4);
4367 hr_reg_clear(qpc_mask, QPC_TRRL_BA_L);
4368 context->trrl_ba = cpu_to_le32(trrl_ba >> (16 + 4));
4369 qpc_mask->trrl_ba = 0;
4370 hr_reg_write(context, QPC_TRRL_BA_H, trrl_ba >> (32 + 16 + 4));
4371 hr_reg_clear(qpc_mask, QPC_TRRL_BA_H);
4373 context->irrl_ba = cpu_to_le32(irrl_ba >> 6);
4374 qpc_mask->irrl_ba = 0;
4375 hr_reg_write(context, QPC_IRRL_BA_H, irrl_ba >> (32 + 6));
4376 hr_reg_clear(qpc_mask, QPC_IRRL_BA_H);
4378 hr_reg_enable(context, QPC_RMT_E2E);
4379 hr_reg_clear(qpc_mask, QPC_RMT_E2E);
4381 hr_reg_write(context, QPC_SIG_TYPE, hr_qp->sq_signal_bits);
4382 hr_reg_clear(qpc_mask, QPC_SIG_TYPE);
4384 port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) : hr_qp->port;
4386 smac = (const u8 *)hr_dev->dev_addr[port];
4387 dmac = (u8 *)attr->ah_attr.roce.dmac;
4388 /* when dmac equals smac or loop_idc is 1, it should loopback */
4389 if (ether_addr_equal_unaligned(dmac, smac) ||
4390 hr_dev->loop_idc == 0x1) {
4391 hr_reg_write(context, QPC_LBI, hr_dev->loop_idc);
4392 hr_reg_clear(qpc_mask, QPC_LBI);
4395 if (attr_mask & IB_QP_DEST_QPN) {
4396 hr_reg_write(context, QPC_DQPN, attr->dest_qp_num);
4397 hr_reg_clear(qpc_mask, QPC_DQPN);
4400 memcpy(&(context->dmac), dmac, sizeof(u32));
4401 hr_reg_write(context, QPC_DMAC_H, *((u16 *)(&dmac[4])));
4403 hr_reg_clear(qpc_mask, QPC_DMAC_H);
4405 ib_mtu = get_mtu(ibqp, attr);
4406 hr_qp->path_mtu = ib_mtu;
4408 mtu = ib_mtu_enum_to_int(ib_mtu);
4409 if (WARN_ON(mtu <= 0))
4411 #define MAX_LP_MSG_LEN 16384
4412 /* MTU * (2 ^ LP_PKTN_INI) shouldn't be bigger than 16KB */
4413 lp_pktn_ini = ilog2(MAX_LP_MSG_LEN / mtu);
4414 if (WARN_ON(lp_pktn_ini >= 0xF))
4417 if (attr_mask & IB_QP_PATH_MTU) {
4418 hr_reg_write(context, QPC_MTU, ib_mtu);
4419 hr_reg_clear(qpc_mask, QPC_MTU);
4422 hr_reg_write(context, QPC_LP_PKTN_INI, lp_pktn_ini);
4423 hr_reg_clear(qpc_mask, QPC_LP_PKTN_INI);
4425 /* ACK_REQ_FREQ should be larger than or equal to LP_PKTN_INI */
4426 hr_reg_write(context, QPC_ACK_REQ_FREQ, lp_pktn_ini);
4427 hr_reg_clear(qpc_mask, QPC_ACK_REQ_FREQ);
4429 hr_reg_clear(qpc_mask, QPC_RX_REQ_PSN_ERR);
4430 hr_reg_clear(qpc_mask, QPC_RX_REQ_MSN);
4431 hr_reg_clear(qpc_mask, QPC_RX_REQ_LAST_OPTYPE);
4433 context->rq_rnr_timer = 0;
4434 qpc_mask->rq_rnr_timer = 0;
4436 hr_reg_clear(qpc_mask, QPC_TRRL_HEAD_MAX);
4437 hr_reg_clear(qpc_mask, QPC_TRRL_TAIL_MAX);
4439 /* rocee send 2^lp_sgen_ini segs every time */
4440 hr_reg_write(context, QPC_LP_SGEN_INI, 3);
4441 hr_reg_clear(qpc_mask, QPC_LP_SGEN_INI);
4446 static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
4447 const struct ib_qp_attr *attr, int attr_mask,
4448 struct hns_roce_v2_qp_context *context,
4449 struct hns_roce_v2_qp_context *qpc_mask)
4451 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4452 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4453 struct ib_device *ibdev = &hr_dev->ib_dev;
4456 /* Not support alternate path and path migration */
4457 if (attr_mask & (IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE)) {
4458 ibdev_err(ibdev, "RTR2RTS attr_mask (0x%x)error\n", attr_mask);
4462 ret = config_qp_sq_buf(hr_dev, hr_qp, context, qpc_mask);
4464 ibdev_err(ibdev, "failed to config sq buf, ret = %d.\n", ret);
4469 * Set some fields in context to zero, Because the default values
4470 * of all fields in context are zero, we need not set them to 0 again.
4471 * but we should set the relevant fields of context mask to 0.
4473 hr_reg_clear(qpc_mask, QPC_IRRL_SGE_IDX);
4475 hr_reg_clear(qpc_mask, QPC_RX_ACK_MSN);
4477 hr_reg_clear(qpc_mask, QPC_ACK_LAST_OPTYPE);
4478 hr_reg_clear(qpc_mask, QPC_IRRL_PSN_VLD);
4479 hr_reg_clear(qpc_mask, QPC_IRRL_PSN);
4481 hr_reg_clear(qpc_mask, QPC_IRRL_TAIL_REAL);
4483 hr_reg_clear(qpc_mask, QPC_RETRY_MSG_MSN);
4485 hr_reg_clear(qpc_mask, QPC_RNR_RETRY_FLAG);
4487 hr_reg_clear(qpc_mask, QPC_CHECK_FLG);
4489 hr_reg_clear(qpc_mask, QPC_V2_IRRL_HEAD);
4494 static int get_dip_ctx_idx(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
4497 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
4498 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4499 u32 *spare_idx = hr_dev->qp_table.idx_table.spare_idx;
4500 u32 *head = &hr_dev->qp_table.idx_table.head;
4501 u32 *tail = &hr_dev->qp_table.idx_table.tail;
4502 struct hns_roce_dip *hr_dip;
4503 unsigned long flags;
4506 spin_lock_irqsave(&hr_dev->dip_list_lock, flags);
4508 spare_idx[*tail] = ibqp->qp_num;
4509 *tail = (*tail == hr_dev->caps.num_qps - 1) ? 0 : (*tail + 1);
4511 list_for_each_entry(hr_dip, &hr_dev->dip_list, node) {
4512 if (!memcmp(grh->dgid.raw, hr_dip->dgid, 16)) {
4513 *dip_idx = hr_dip->dip_idx;
4518 /* If no dgid is found, a new dip and a mapping between dgid and
4519 * dip_idx will be created.
4521 hr_dip = kzalloc(sizeof(*hr_dip), GFP_ATOMIC);
4527 memcpy(hr_dip->dgid, grh->dgid.raw, sizeof(grh->dgid.raw));
4528 hr_dip->dip_idx = *dip_idx = spare_idx[*head];
4529 *head = (*head == hr_dev->caps.num_qps - 1) ? 0 : (*head + 1);
4530 list_add_tail(&hr_dip->node, &hr_dev->dip_list);
4533 spin_unlock_irqrestore(&hr_dev->dip_list_lock, flags);
4543 UNSUPPORT_CONG_LEVEL,
4562 static int check_cong_type(struct ib_qp *ibqp,
4563 struct hns_roce_congestion_algorithm *cong_alg)
4565 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4567 /* different congestion types match different configurations */
4568 switch (hr_dev->caps.cong_type) {
4569 case CONG_TYPE_DCQCN:
4570 cong_alg->alg_sel = CONG_DCQCN;
4571 cong_alg->alg_sub_sel = UNSUPPORT_CONG_LEVEL;
4572 cong_alg->dip_vld = DIP_INVALID;
4573 cong_alg->wnd_mode_sel = WND_LIMIT;
4575 case CONG_TYPE_LDCP:
4576 cong_alg->alg_sel = CONG_WINDOW;
4577 cong_alg->alg_sub_sel = CONG_LDCP;
4578 cong_alg->dip_vld = DIP_INVALID;
4579 cong_alg->wnd_mode_sel = WND_UNLIMIT;
4582 cong_alg->alg_sel = CONG_WINDOW;
4583 cong_alg->alg_sub_sel = CONG_HC3;
4584 cong_alg->dip_vld = DIP_INVALID;
4585 cong_alg->wnd_mode_sel = WND_LIMIT;
4588 cong_alg->alg_sel = CONG_DCQCN;
4589 cong_alg->alg_sub_sel = UNSUPPORT_CONG_LEVEL;
4590 cong_alg->dip_vld = DIP_VALID;
4591 cong_alg->wnd_mode_sel = WND_LIMIT;
4594 ibdev_err(&hr_dev->ib_dev,
4595 "error type(%u) for congestion selection.\n",
4596 hr_dev->caps.cong_type);
4603 static int fill_cong_field(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
4604 struct hns_roce_v2_qp_context *context,
4605 struct hns_roce_v2_qp_context *qpc_mask)
4607 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
4608 struct hns_roce_congestion_algorithm cong_field;
4609 struct ib_device *ibdev = ibqp->device;
4610 struct hns_roce_dev *hr_dev = to_hr_dev(ibdev);
4614 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 ||
4615 grh->sgid_attr->gid_type == IB_GID_TYPE_ROCE)
4618 ret = check_cong_type(ibqp, &cong_field);
4622 hr_reg_write(context, QPC_CONG_ALGO_TMPL_ID, hr_dev->cong_algo_tmpl_id +
4623 hr_dev->caps.cong_type * HNS_ROCE_CONG_SIZE);
4624 hr_reg_clear(qpc_mask, QPC_CONG_ALGO_TMPL_ID);
4625 hr_reg_write(&context->ext, QPCEX_CONG_ALG_SEL, cong_field.alg_sel);
4626 hr_reg_clear(&qpc_mask->ext, QPCEX_CONG_ALG_SEL);
4627 hr_reg_write(&context->ext, QPCEX_CONG_ALG_SUB_SEL,
4628 cong_field.alg_sub_sel);
4629 hr_reg_clear(&qpc_mask->ext, QPCEX_CONG_ALG_SUB_SEL);
4630 hr_reg_write(&context->ext, QPCEX_DIP_CTX_IDX_VLD, cong_field.dip_vld);
4631 hr_reg_clear(&qpc_mask->ext, QPCEX_DIP_CTX_IDX_VLD);
4632 hr_reg_write(&context->ext, QPCEX_SQ_RQ_NOT_FORBID_EN,
4633 cong_field.wnd_mode_sel);
4634 hr_reg_clear(&qpc_mask->ext, QPCEX_SQ_RQ_NOT_FORBID_EN);
4636 /* if dip is disabled, there is no need to set dip idx */
4637 if (cong_field.dip_vld == 0)
4640 ret = get_dip_ctx_idx(ibqp, attr, &dip_idx);
4642 ibdev_err(ibdev, "failed to fill cong field, ret = %d.\n", ret);
4646 hr_reg_write(&context->ext, QPCEX_DIP_CTX_IDX, dip_idx);
4647 hr_reg_write(&qpc_mask->ext, QPCEX_DIP_CTX_IDX, 0);
4652 static int hns_roce_v2_set_path(struct ib_qp *ibqp,
4653 const struct ib_qp_attr *attr,
4655 struct hns_roce_v2_qp_context *context,
4656 struct hns_roce_v2_qp_context *qpc_mask)
4658 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
4659 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4660 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4661 struct ib_device *ibdev = &hr_dev->ib_dev;
4662 const struct ib_gid_attr *gid_attr = NULL;
4663 int is_roce_protocol;
4664 u16 vlan_id = 0xffff;
4665 bool is_udp = false;
4670 ib_port = (attr_mask & IB_QP_PORT) ? attr->port_num : hr_qp->port + 1;
4671 hr_port = ib_port - 1;
4672 is_roce_protocol = rdma_cap_eth_ah(&hr_dev->ib_dev, ib_port) &&
4673 rdma_ah_get_ah_flags(&attr->ah_attr) & IB_AH_GRH;
4675 if (is_roce_protocol) {
4676 gid_attr = attr->ah_attr.grh.sgid_attr;
4677 ret = rdma_read_gid_l2_fields(gid_attr, &vlan_id, NULL);
4682 is_udp = (gid_attr->gid_type ==
4683 IB_GID_TYPE_ROCE_UDP_ENCAP);
4686 /* Only HIP08 needs to set the vlan_en bits in QPC */
4687 if (vlan_id < VLAN_N_VID &&
4688 hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
4689 hr_reg_enable(context, QPC_RQ_VLAN_EN);
4690 hr_reg_clear(qpc_mask, QPC_RQ_VLAN_EN);
4691 hr_reg_enable(context, QPC_SQ_VLAN_EN);
4692 hr_reg_clear(qpc_mask, QPC_SQ_VLAN_EN);
4695 hr_reg_write(context, QPC_VLAN_ID, vlan_id);
4696 hr_reg_clear(qpc_mask, QPC_VLAN_ID);
4698 if (grh->sgid_index >= hr_dev->caps.gid_table_len[hr_port]) {
4699 ibdev_err(ibdev, "sgid_index(%u) too large. max is %d\n",
4700 grh->sgid_index, hr_dev->caps.gid_table_len[hr_port]);
4704 if (attr->ah_attr.type != RDMA_AH_ATTR_TYPE_ROCE) {
4705 ibdev_err(ibdev, "ah attr is not RDMA roce type\n");
4709 hr_reg_write(context, QPC_UDPSPN,
4710 is_udp ? rdma_get_udp_sport(grh->flow_label, ibqp->qp_num,
4711 attr->dest_qp_num) :
4714 hr_reg_clear(qpc_mask, QPC_UDPSPN);
4716 hr_reg_write(context, QPC_GMV_IDX, grh->sgid_index);
4718 hr_reg_clear(qpc_mask, QPC_GMV_IDX);
4720 hr_reg_write(context, QPC_HOPLIMIT, grh->hop_limit);
4721 hr_reg_clear(qpc_mask, QPC_HOPLIMIT);
4723 ret = fill_cong_field(ibqp, attr, context, qpc_mask);
4727 hr_reg_write(context, QPC_TC, get_tclass(&attr->ah_attr.grh));
4728 hr_reg_clear(qpc_mask, QPC_TC);
4730 hr_reg_write(context, QPC_FL, grh->flow_label);
4731 hr_reg_clear(qpc_mask, QPC_FL);
4732 memcpy(context->dgid, grh->dgid.raw, sizeof(grh->dgid.raw));
4733 memset(qpc_mask->dgid, 0, sizeof(grh->dgid.raw));
4735 hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
4736 if (unlikely(hr_qp->sl > MAX_SERVICE_LEVEL)) {
4738 "failed to fill QPC, sl (%u) shouldn't be larger than %d.\n",
4739 hr_qp->sl, MAX_SERVICE_LEVEL);
4743 hr_reg_write(context, QPC_SL, hr_qp->sl);
4744 hr_reg_clear(qpc_mask, QPC_SL);
4749 static bool check_qp_state(enum ib_qp_state cur_state,
4750 enum ib_qp_state new_state)
4752 static const bool sm[][IB_QPS_ERR + 1] = {
4753 [IB_QPS_RESET] = { [IB_QPS_RESET] = true,
4754 [IB_QPS_INIT] = true },
4755 [IB_QPS_INIT] = { [IB_QPS_RESET] = true,
4756 [IB_QPS_INIT] = true,
4757 [IB_QPS_RTR] = true,
4758 [IB_QPS_ERR] = true },
4759 [IB_QPS_RTR] = { [IB_QPS_RESET] = true,
4760 [IB_QPS_RTS] = true,
4761 [IB_QPS_ERR] = true },
4762 [IB_QPS_RTS] = { [IB_QPS_RESET] = true,
4763 [IB_QPS_RTS] = true,
4764 [IB_QPS_ERR] = true },
4767 [IB_QPS_ERR] = { [IB_QPS_RESET] = true,
4768 [IB_QPS_ERR] = true }
4771 return sm[cur_state][new_state];
4774 static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp,
4775 const struct ib_qp_attr *attr,
4777 enum ib_qp_state cur_state,
4778 enum ib_qp_state new_state,
4779 struct hns_roce_v2_qp_context *context,
4780 struct hns_roce_v2_qp_context *qpc_mask)
4782 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4785 if (!check_qp_state(cur_state, new_state)) {
4786 ibdev_err(&hr_dev->ib_dev, "Illegal state for QP!\n");
4790 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
4791 memset(qpc_mask, 0, hr_dev->caps.qpc_sz);
4792 modify_qp_reset_to_init(ibqp, attr, attr_mask, context,
4794 } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
4795 modify_qp_init_to_init(ibqp, attr, attr_mask, context,
4797 } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
4798 ret = modify_qp_init_to_rtr(ibqp, attr, attr_mask, context,
4800 } else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) {
4801 ret = modify_qp_rtr_to_rts(ibqp, attr, attr_mask, context,
4808 static bool check_qp_timeout_cfg_range(struct hns_roce_dev *hr_dev, u8 *timeout)
4810 #define QP_ACK_TIMEOUT_MAX_HIP08 20
4811 #define QP_ACK_TIMEOUT_OFFSET 10
4812 #define QP_ACK_TIMEOUT_MAX 31
4814 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
4815 if (*timeout > QP_ACK_TIMEOUT_MAX_HIP08) {
4816 ibdev_warn(&hr_dev->ib_dev,
4817 "Local ACK timeout shall be 0 to 20.\n");
4820 *timeout += QP_ACK_TIMEOUT_OFFSET;
4821 } else if (hr_dev->pci_dev->revision > PCI_REVISION_ID_HIP08) {
4822 if (*timeout > QP_ACK_TIMEOUT_MAX) {
4823 ibdev_warn(&hr_dev->ib_dev,
4824 "Local ACK timeout shall be 0 to 31.\n");
4832 static int hns_roce_v2_set_opt_fields(struct ib_qp *ibqp,
4833 const struct ib_qp_attr *attr,
4835 struct hns_roce_v2_qp_context *context,
4836 struct hns_roce_v2_qp_context *qpc_mask)
4838 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4839 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4843 if (attr_mask & IB_QP_AV) {
4844 ret = hns_roce_v2_set_path(ibqp, attr, attr_mask, context,
4850 if (attr_mask & IB_QP_TIMEOUT) {
4851 timeout = attr->timeout;
4852 if (check_qp_timeout_cfg_range(hr_dev, &timeout)) {
4853 hr_reg_write(context, QPC_AT, timeout);
4854 hr_reg_clear(qpc_mask, QPC_AT);
4858 if (attr_mask & IB_QP_RETRY_CNT) {
4859 hr_reg_write(context, QPC_RETRY_NUM_INIT, attr->retry_cnt);
4860 hr_reg_clear(qpc_mask, QPC_RETRY_NUM_INIT);
4862 hr_reg_write(context, QPC_RETRY_CNT, attr->retry_cnt);
4863 hr_reg_clear(qpc_mask, QPC_RETRY_CNT);
4866 if (attr_mask & IB_QP_RNR_RETRY) {
4867 hr_reg_write(context, QPC_RNR_NUM_INIT, attr->rnr_retry);
4868 hr_reg_clear(qpc_mask, QPC_RNR_NUM_INIT);
4870 hr_reg_write(context, QPC_RNR_CNT, attr->rnr_retry);
4871 hr_reg_clear(qpc_mask, QPC_RNR_CNT);
4874 if (attr_mask & IB_QP_SQ_PSN) {
4875 hr_reg_write(context, QPC_SQ_CUR_PSN, attr->sq_psn);
4876 hr_reg_clear(qpc_mask, QPC_SQ_CUR_PSN);
4878 hr_reg_write(context, QPC_SQ_MAX_PSN, attr->sq_psn);
4879 hr_reg_clear(qpc_mask, QPC_SQ_MAX_PSN);
4881 hr_reg_write(context, QPC_RETRY_MSG_PSN_L, attr->sq_psn);
4882 hr_reg_clear(qpc_mask, QPC_RETRY_MSG_PSN_L);
4884 hr_reg_write(context, QPC_RETRY_MSG_PSN_H,
4885 attr->sq_psn >> RETRY_MSG_PSN_SHIFT);
4886 hr_reg_clear(qpc_mask, QPC_RETRY_MSG_PSN_H);
4888 hr_reg_write(context, QPC_RETRY_MSG_FPKT_PSN, attr->sq_psn);
4889 hr_reg_clear(qpc_mask, QPC_RETRY_MSG_FPKT_PSN);
4891 hr_reg_write(context, QPC_RX_ACK_EPSN, attr->sq_psn);
4892 hr_reg_clear(qpc_mask, QPC_RX_ACK_EPSN);
4895 if ((attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) &&
4896 attr->max_dest_rd_atomic) {
4897 hr_reg_write(context, QPC_RR_MAX,
4898 fls(attr->max_dest_rd_atomic - 1));
4899 hr_reg_clear(qpc_mask, QPC_RR_MAX);
4902 if ((attr_mask & IB_QP_MAX_QP_RD_ATOMIC) && attr->max_rd_atomic) {
4903 hr_reg_write(context, QPC_SR_MAX, fls(attr->max_rd_atomic - 1));
4904 hr_reg_clear(qpc_mask, QPC_SR_MAX);
4907 if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC))
4908 set_access_flags(hr_qp, context, qpc_mask, attr, attr_mask);
4910 if (attr_mask & IB_QP_MIN_RNR_TIMER) {
4911 hr_reg_write(context, QPC_MIN_RNR_TIME,
4912 hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 ?
4913 HNS_ROCE_RNR_TIMER_10NS : attr->min_rnr_timer);
4914 hr_reg_clear(qpc_mask, QPC_MIN_RNR_TIME);
4917 if (attr_mask & IB_QP_RQ_PSN) {
4918 hr_reg_write(context, QPC_RX_REQ_EPSN, attr->rq_psn);
4919 hr_reg_clear(qpc_mask, QPC_RX_REQ_EPSN);
4921 hr_reg_write(context, QPC_RAQ_PSN, attr->rq_psn - 1);
4922 hr_reg_clear(qpc_mask, QPC_RAQ_PSN);
4925 if (attr_mask & IB_QP_QKEY) {
4926 context->qkey_xrcd = cpu_to_le32(attr->qkey);
4927 qpc_mask->qkey_xrcd = 0;
4928 hr_qp->qkey = attr->qkey;
4934 static void hns_roce_v2_record_opt_fields(struct ib_qp *ibqp,
4935 const struct ib_qp_attr *attr,
4938 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4939 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4941 if (attr_mask & IB_QP_ACCESS_FLAGS)
4942 hr_qp->atomic_rd_en = attr->qp_access_flags;
4944 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
4945 hr_qp->resp_depth = attr->max_dest_rd_atomic;
4946 if (attr_mask & IB_QP_PORT) {
4947 hr_qp->port = attr->port_num - 1;
4948 hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
4952 static void clear_qp(struct hns_roce_qp *hr_qp)
4954 struct ib_qp *ibqp = &hr_qp->ibqp;
4957 hns_roce_v2_cq_clean(to_hr_cq(ibqp->send_cq),
4960 if (ibqp->recv_cq && ibqp->recv_cq != ibqp->send_cq)
4961 hns_roce_v2_cq_clean(to_hr_cq(ibqp->recv_cq),
4962 hr_qp->qpn, ibqp->srq ?
4963 to_hr_srq(ibqp->srq) : NULL);
4965 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)
4966 *hr_qp->rdb.db_record = 0;
4972 hr_qp->next_sge = 0;
4975 static void v2_set_flushed_fields(struct ib_qp *ibqp,
4976 struct hns_roce_v2_qp_context *context,
4977 struct hns_roce_v2_qp_context *qpc_mask)
4979 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4980 unsigned long sq_flag = 0;
4981 unsigned long rq_flag = 0;
4983 if (ibqp->qp_type == IB_QPT_XRC_TGT)
4986 spin_lock_irqsave(&hr_qp->sq.lock, sq_flag);
4987 hr_reg_write(context, QPC_SQ_PRODUCER_IDX, hr_qp->sq.head);
4988 hr_reg_clear(qpc_mask, QPC_SQ_PRODUCER_IDX);
4989 hr_qp->state = IB_QPS_ERR;
4990 spin_unlock_irqrestore(&hr_qp->sq.lock, sq_flag);
4992 if (ibqp->srq || ibqp->qp_type == IB_QPT_XRC_INI) /* no RQ */
4995 spin_lock_irqsave(&hr_qp->rq.lock, rq_flag);
4996 hr_reg_write(context, QPC_RQ_PRODUCER_IDX, hr_qp->rq.head);
4997 hr_reg_clear(qpc_mask, QPC_RQ_PRODUCER_IDX);
4998 spin_unlock_irqrestore(&hr_qp->rq.lock, rq_flag);
5001 static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
5002 const struct ib_qp_attr *attr,
5003 int attr_mask, enum ib_qp_state cur_state,
5004 enum ib_qp_state new_state)
5006 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
5007 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
5008 struct hns_roce_v2_qp_context ctx[2];
5009 struct hns_roce_v2_qp_context *context = ctx;
5010 struct hns_roce_v2_qp_context *qpc_mask = ctx + 1;
5011 struct ib_device *ibdev = &hr_dev->ib_dev;
5014 if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
5018 * In v2 engine, software pass context and context mask to hardware
5019 * when modifying qp. If software need modify some fields in context,
5020 * we should set all bits of the relevant fields in context mask to
5021 * 0 at the same time, else set them to 0x1.
5023 memset(context, 0, hr_dev->caps.qpc_sz);
5024 memset(qpc_mask, 0xff, hr_dev->caps.qpc_sz);
5026 ret = hns_roce_v2_set_abs_fields(ibqp, attr, attr_mask, cur_state,
5027 new_state, context, qpc_mask);
5031 /* When QP state is err, SQ and RQ WQE should be flushed */
5032 if (new_state == IB_QPS_ERR)
5033 v2_set_flushed_fields(ibqp, context, qpc_mask);
5035 /* Configure the optional fields */
5036 ret = hns_roce_v2_set_opt_fields(ibqp, attr, attr_mask, context,
5041 hr_reg_write_bool(context, QPC_INV_CREDIT,
5042 to_hr_qp_type(hr_qp->ibqp.qp_type) == SERV_TYPE_XRC ||
5044 hr_reg_clear(qpc_mask, QPC_INV_CREDIT);
5046 /* Every status migrate must change state */
5047 hr_reg_write(context, QPC_QP_ST, new_state);
5048 hr_reg_clear(qpc_mask, QPC_QP_ST);
5050 /* SW pass context to HW */
5051 ret = hns_roce_v2_qp_modify(hr_dev, context, qpc_mask, hr_qp);
5053 ibdev_err(ibdev, "failed to modify QP, ret = %d.\n", ret);
5057 hr_qp->state = new_state;
5059 hns_roce_v2_record_opt_fields(ibqp, attr, attr_mask);
5061 if (new_state == IB_QPS_RESET && !ibqp->uobject)
5068 static int to_ib_qp_st(enum hns_roce_v2_qp_state state)
5070 static const enum ib_qp_state map[] = {
5071 [HNS_ROCE_QP_ST_RST] = IB_QPS_RESET,
5072 [HNS_ROCE_QP_ST_INIT] = IB_QPS_INIT,
5073 [HNS_ROCE_QP_ST_RTR] = IB_QPS_RTR,
5074 [HNS_ROCE_QP_ST_RTS] = IB_QPS_RTS,
5075 [HNS_ROCE_QP_ST_SQD] = IB_QPS_SQD,
5076 [HNS_ROCE_QP_ST_SQER] = IB_QPS_SQE,
5077 [HNS_ROCE_QP_ST_ERR] = IB_QPS_ERR,
5078 [HNS_ROCE_QP_ST_SQ_DRAINING] = IB_QPS_SQD
5081 return (state < ARRAY_SIZE(map)) ? map[state] : -1;
5084 static int hns_roce_v2_query_qpc(struct hns_roce_dev *hr_dev,
5085 struct hns_roce_qp *hr_qp,
5086 struct hns_roce_v2_qp_context *hr_context)
5088 struct hns_roce_cmd_mailbox *mailbox;
5091 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
5092 if (IS_ERR(mailbox))
5093 return PTR_ERR(mailbox);
5095 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, hr_qp->qpn, 0,
5096 HNS_ROCE_CMD_QUERY_QPC,
5097 HNS_ROCE_CMD_TIMEOUT_MSECS);
5101 memcpy(hr_context, mailbox->buf, hr_dev->caps.qpc_sz);
5104 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5108 static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
5110 struct ib_qp_init_attr *qp_init_attr)
5112 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
5113 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
5114 struct hns_roce_v2_qp_context context = {};
5115 struct ib_device *ibdev = &hr_dev->ib_dev;
5120 memset(qp_attr, 0, sizeof(*qp_attr));
5121 memset(qp_init_attr, 0, sizeof(*qp_init_attr));
5123 mutex_lock(&hr_qp->mutex);
5125 if (hr_qp->state == IB_QPS_RESET) {
5126 qp_attr->qp_state = IB_QPS_RESET;
5131 ret = hns_roce_v2_query_qpc(hr_dev, hr_qp, &context);
5133 ibdev_err(ibdev, "failed to query QPC, ret = %d.\n", ret);
5138 state = hr_reg_read(&context, QPC_QP_ST);
5139 tmp_qp_state = to_ib_qp_st((enum hns_roce_v2_qp_state)state);
5140 if (tmp_qp_state == -1) {
5141 ibdev_err(ibdev, "Illegal ib_qp_state\n");
5145 hr_qp->state = (u8)tmp_qp_state;
5146 qp_attr->qp_state = (enum ib_qp_state)hr_qp->state;
5147 qp_attr->path_mtu = (enum ib_mtu)hr_reg_read(&context, QPC_MTU);
5148 qp_attr->path_mig_state = IB_MIG_ARMED;
5149 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
5150 if (hr_qp->ibqp.qp_type == IB_QPT_UD)
5151 qp_attr->qkey = le32_to_cpu(context.qkey_xrcd);
5153 qp_attr->rq_psn = hr_reg_read(&context, QPC_RX_REQ_EPSN);
5154 qp_attr->sq_psn = (u32)hr_reg_read(&context, QPC_SQ_CUR_PSN);
5155 qp_attr->dest_qp_num = hr_reg_read(&context, QPC_DQPN);
5156 qp_attr->qp_access_flags =
5157 ((hr_reg_read(&context, QPC_RRE)) << V2_QP_RRE_S) |
5158 ((hr_reg_read(&context, QPC_RWE)) << V2_QP_RWE_S) |
5159 ((hr_reg_read(&context, QPC_ATE)) << V2_QP_ATE_S);
5161 if (hr_qp->ibqp.qp_type == IB_QPT_RC ||
5162 hr_qp->ibqp.qp_type == IB_QPT_XRC_INI ||
5163 hr_qp->ibqp.qp_type == IB_QPT_XRC_TGT) {
5164 struct ib_global_route *grh =
5165 rdma_ah_retrieve_grh(&qp_attr->ah_attr);
5167 rdma_ah_set_sl(&qp_attr->ah_attr,
5168 hr_reg_read(&context, QPC_SL));
5169 grh->flow_label = hr_reg_read(&context, QPC_FL);
5170 grh->sgid_index = hr_reg_read(&context, QPC_GMV_IDX);
5171 grh->hop_limit = hr_reg_read(&context, QPC_HOPLIMIT);
5172 grh->traffic_class = hr_reg_read(&context, QPC_TC);
5174 memcpy(grh->dgid.raw, context.dgid, sizeof(grh->dgid.raw));
5177 qp_attr->port_num = hr_qp->port + 1;
5178 qp_attr->sq_draining = 0;
5179 qp_attr->max_rd_atomic = 1 << hr_reg_read(&context, QPC_SR_MAX);
5180 qp_attr->max_dest_rd_atomic = 1 << hr_reg_read(&context, QPC_RR_MAX);
5182 qp_attr->min_rnr_timer = (u8)hr_reg_read(&context, QPC_MIN_RNR_TIME);
5183 qp_attr->timeout = (u8)hr_reg_read(&context, QPC_AT);
5184 qp_attr->retry_cnt = hr_reg_read(&context, QPC_RETRY_NUM_INIT);
5185 qp_attr->rnr_retry = hr_reg_read(&context, QPC_RNR_NUM_INIT);
5188 qp_attr->cur_qp_state = qp_attr->qp_state;
5189 qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
5190 qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs - hr_qp->rq.rsv_sge;
5191 qp_attr->cap.max_inline_data = hr_qp->max_inline_data;
5193 qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
5194 qp_attr->cap.max_send_sge = hr_qp->sq.max_gs;
5196 qp_init_attr->qp_context = ibqp->qp_context;
5197 qp_init_attr->qp_type = ibqp->qp_type;
5198 qp_init_attr->recv_cq = ibqp->recv_cq;
5199 qp_init_attr->send_cq = ibqp->send_cq;
5200 qp_init_attr->srq = ibqp->srq;
5201 qp_init_attr->cap = qp_attr->cap;
5202 qp_init_attr->sq_sig_type = hr_qp->sq_signal_bits;
5205 mutex_unlock(&hr_qp->mutex);
5209 static inline int modify_qp_is_ok(struct hns_roce_qp *hr_qp)
5211 return ((hr_qp->ibqp.qp_type == IB_QPT_RC ||
5212 hr_qp->ibqp.qp_type == IB_QPT_UD ||
5213 hr_qp->ibqp.qp_type == IB_QPT_XRC_INI ||
5214 hr_qp->ibqp.qp_type == IB_QPT_XRC_TGT) &&
5215 hr_qp->state != IB_QPS_RESET);
5218 static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
5219 struct hns_roce_qp *hr_qp,
5220 struct ib_udata *udata)
5222 struct ib_device *ibdev = &hr_dev->ib_dev;
5223 struct hns_roce_cq *send_cq, *recv_cq;
5224 unsigned long flags;
5227 if (modify_qp_is_ok(hr_qp)) {
5228 /* Modify qp to reset before destroying qp */
5229 ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, NULL, 0,
5230 hr_qp->state, IB_QPS_RESET);
5233 "failed to modify QP to RST, ret = %d.\n",
5237 send_cq = hr_qp->ibqp.send_cq ? to_hr_cq(hr_qp->ibqp.send_cq) : NULL;
5238 recv_cq = hr_qp->ibqp.recv_cq ? to_hr_cq(hr_qp->ibqp.recv_cq) : NULL;
5240 spin_lock_irqsave(&hr_dev->qp_list_lock, flags);
5241 hns_roce_lock_cqs(send_cq, recv_cq);
5245 __hns_roce_v2_cq_clean(recv_cq, hr_qp->qpn,
5247 to_hr_srq(hr_qp->ibqp.srq) :
5250 if (send_cq && send_cq != recv_cq)
5251 __hns_roce_v2_cq_clean(send_cq, hr_qp->qpn, NULL);
5254 hns_roce_qp_remove(hr_dev, hr_qp);
5256 hns_roce_unlock_cqs(send_cq, recv_cq);
5257 spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags);
5262 static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
5264 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
5265 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
5268 ret = hns_roce_v2_destroy_qp_common(hr_dev, hr_qp, udata);
5270 ibdev_err(&hr_dev->ib_dev,
5271 "failed to destroy QP, QPN = 0x%06lx, ret = %d.\n",
5274 hns_roce_qp_destroy(hr_dev, hr_qp, udata);
5279 static int hns_roce_v2_qp_flow_control_init(struct hns_roce_dev *hr_dev,
5280 struct hns_roce_qp *hr_qp)
5282 struct ib_device *ibdev = &hr_dev->ib_dev;
5283 struct hns_roce_sccc_clr_done *resp;
5284 struct hns_roce_sccc_clr *clr;
5285 struct hns_roce_cmq_desc desc;
5288 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
5291 mutex_lock(&hr_dev->qp_table.scc_mutex);
5293 /* set scc ctx clear done flag */
5294 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_RESET_SCCC, false);
5295 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
5297 ibdev_err(ibdev, "failed to reset SCC ctx, ret = %d.\n", ret);
5301 /* clear scc context */
5302 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CLR_SCCC, false);
5303 clr = (struct hns_roce_sccc_clr *)desc.data;
5304 clr->qpn = cpu_to_le32(hr_qp->qpn);
5305 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
5307 ibdev_err(ibdev, "failed to clear SCC ctx, ret = %d.\n", ret);
5311 /* query scc context clear is done or not */
5312 resp = (struct hns_roce_sccc_clr_done *)desc.data;
5313 for (i = 0; i <= HNS_ROCE_CMQ_SCC_CLR_DONE_CNT; i++) {
5314 hns_roce_cmq_setup_basic_desc(&desc,
5315 HNS_ROCE_OPC_QUERY_SCCC, true);
5316 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
5318 ibdev_err(ibdev, "failed to query clr cmq, ret = %d\n",
5329 ibdev_err(ibdev, "Query SCC clr done flag overtime.\n");
5333 mutex_unlock(&hr_dev->qp_table.scc_mutex);
5337 #define DMA_IDX_SHIFT 3
5338 #define DMA_WQE_SHIFT 3
5340 static int hns_roce_v2_write_srqc_index_queue(struct hns_roce_srq *srq,
5341 struct hns_roce_srq_context *ctx)
5343 struct hns_roce_idx_que *idx_que = &srq->idx_que;
5344 struct ib_device *ibdev = srq->ibsrq.device;
5345 struct hns_roce_dev *hr_dev = to_hr_dev(ibdev);
5346 u64 mtts_idx[MTT_MIN_COUNT] = {};
5347 dma_addr_t dma_handle_idx = 0;
5350 /* Get physical address of idx que buf */
5351 ret = hns_roce_mtr_find(hr_dev, &idx_que->mtr, 0, mtts_idx,
5352 ARRAY_SIZE(mtts_idx), &dma_handle_idx);
5354 ibdev_err(ibdev, "failed to find mtr for SRQ idx, ret = %d.\n",
5359 hr_reg_write(ctx, SRQC_IDX_HOP_NUM,
5360 to_hr_hem_hopnum(hr_dev->caps.idx_hop_num, srq->wqe_cnt));
5362 hr_reg_write(ctx, SRQC_IDX_BT_BA_L, dma_handle_idx >> DMA_IDX_SHIFT);
5363 hr_reg_write(ctx, SRQC_IDX_BT_BA_H,
5364 upper_32_bits(dma_handle_idx >> DMA_IDX_SHIFT));
5366 hr_reg_write(ctx, SRQC_IDX_BA_PG_SZ,
5367 to_hr_hw_page_shift(idx_que->mtr.hem_cfg.ba_pg_shift));
5368 hr_reg_write(ctx, SRQC_IDX_BUF_PG_SZ,
5369 to_hr_hw_page_shift(idx_que->mtr.hem_cfg.buf_pg_shift));
5371 hr_reg_write(ctx, SRQC_IDX_CUR_BLK_ADDR_L,
5372 to_hr_hw_page_addr(mtts_idx[0]));
5373 hr_reg_write(ctx, SRQC_IDX_CUR_BLK_ADDR_H,
5374 upper_32_bits(to_hr_hw_page_addr(mtts_idx[0])));
5376 hr_reg_write(ctx, SRQC_IDX_NXT_BLK_ADDR_L,
5377 to_hr_hw_page_addr(mtts_idx[1]));
5378 hr_reg_write(ctx, SRQC_IDX_NXT_BLK_ADDR_H,
5379 upper_32_bits(to_hr_hw_page_addr(mtts_idx[1])));
5384 static int hns_roce_v2_write_srqc(struct hns_roce_srq *srq, void *mb_buf)
5386 struct ib_device *ibdev = srq->ibsrq.device;
5387 struct hns_roce_dev *hr_dev = to_hr_dev(ibdev);
5388 struct hns_roce_srq_context *ctx = mb_buf;
5389 u64 mtts_wqe[MTT_MIN_COUNT] = {};
5390 dma_addr_t dma_handle_wqe = 0;
5393 memset(ctx, 0, sizeof(*ctx));
5395 /* Get the physical address of srq buf */
5396 ret = hns_roce_mtr_find(hr_dev, &srq->buf_mtr, 0, mtts_wqe,
5397 ARRAY_SIZE(mtts_wqe), &dma_handle_wqe);
5399 ibdev_err(ibdev, "failed to find mtr for SRQ WQE, ret = %d.\n",
5404 hr_reg_write(ctx, SRQC_SRQ_ST, 1);
5405 hr_reg_write_bool(ctx, SRQC_SRQ_TYPE,
5406 srq->ibsrq.srq_type == IB_SRQT_XRC);
5407 hr_reg_write(ctx, SRQC_PD, to_hr_pd(srq->ibsrq.pd)->pdn);
5408 hr_reg_write(ctx, SRQC_SRQN, srq->srqn);
5409 hr_reg_write(ctx, SRQC_XRCD, srq->xrcdn);
5410 hr_reg_write(ctx, SRQC_XRC_CQN, srq->cqn);
5411 hr_reg_write(ctx, SRQC_SHIFT, ilog2(srq->wqe_cnt));
5412 hr_reg_write(ctx, SRQC_RQWS,
5413 srq->max_gs <= 0 ? 0 : fls(srq->max_gs - 1));
5415 hr_reg_write(ctx, SRQC_WQE_HOP_NUM,
5416 to_hr_hem_hopnum(hr_dev->caps.srqwqe_hop_num,
5419 hr_reg_write(ctx, SRQC_WQE_BT_BA_L, dma_handle_wqe >> DMA_WQE_SHIFT);
5420 hr_reg_write(ctx, SRQC_WQE_BT_BA_H,
5421 upper_32_bits(dma_handle_wqe >> DMA_WQE_SHIFT));
5423 hr_reg_write(ctx, SRQC_WQE_BA_PG_SZ,
5424 to_hr_hw_page_shift(srq->buf_mtr.hem_cfg.ba_pg_shift));
5425 hr_reg_write(ctx, SRQC_WQE_BUF_PG_SZ,
5426 to_hr_hw_page_shift(srq->buf_mtr.hem_cfg.buf_pg_shift));
5428 return hns_roce_v2_write_srqc_index_queue(srq, ctx);
5431 static int hns_roce_v2_modify_srq(struct ib_srq *ibsrq,
5432 struct ib_srq_attr *srq_attr,
5433 enum ib_srq_attr_mask srq_attr_mask,
5434 struct ib_udata *udata)
5436 struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
5437 struct hns_roce_srq *srq = to_hr_srq(ibsrq);
5438 struct hns_roce_srq_context *srq_context;
5439 struct hns_roce_srq_context *srqc_mask;
5440 struct hns_roce_cmd_mailbox *mailbox;
5443 /* Resizing SRQs is not supported yet */
5444 if (srq_attr_mask & IB_SRQ_MAX_WR)
5447 if (srq_attr_mask & IB_SRQ_LIMIT) {
5448 if (srq_attr->srq_limit > srq->wqe_cnt)
5451 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
5452 if (IS_ERR(mailbox))
5453 return PTR_ERR(mailbox);
5455 srq_context = mailbox->buf;
5456 srqc_mask = (struct hns_roce_srq_context *)mailbox->buf + 1;
5458 memset(srqc_mask, 0xff, sizeof(*srqc_mask));
5460 hr_reg_write(srq_context, SRQC_LIMIT_WL, srq_attr->srq_limit);
5461 hr_reg_clear(srqc_mask, SRQC_LIMIT_WL);
5463 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, srq->srqn, 0,
5464 HNS_ROCE_CMD_MODIFY_SRQC,
5465 HNS_ROCE_CMD_TIMEOUT_MSECS);
5466 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5468 ibdev_err(&hr_dev->ib_dev,
5469 "failed to handle cmd of modifying SRQ, ret = %d.\n",
5478 static int hns_roce_v2_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
5480 struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
5481 struct hns_roce_srq *srq = to_hr_srq(ibsrq);
5482 struct hns_roce_srq_context *srq_context;
5483 struct hns_roce_cmd_mailbox *mailbox;
5486 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
5487 if (IS_ERR(mailbox))
5488 return PTR_ERR(mailbox);
5490 srq_context = mailbox->buf;
5491 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, srq->srqn, 0,
5492 HNS_ROCE_CMD_QUERY_SRQC,
5493 HNS_ROCE_CMD_TIMEOUT_MSECS);
5495 ibdev_err(&hr_dev->ib_dev,
5496 "failed to process cmd of querying SRQ, ret = %d.\n",
5501 attr->srq_limit = hr_reg_read(srq_context, SRQC_LIMIT_WL);
5502 attr->max_wr = srq->wqe_cnt;
5503 attr->max_sge = srq->max_gs - srq->rsv_sge;
5506 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5510 static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
5512 struct hns_roce_dev *hr_dev = to_hr_dev(cq->device);
5513 struct hns_roce_v2_cq_context *cq_context;
5514 struct hns_roce_cq *hr_cq = to_hr_cq(cq);
5515 struct hns_roce_v2_cq_context *cqc_mask;
5516 struct hns_roce_cmd_mailbox *mailbox;
5519 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
5520 if (IS_ERR(mailbox))
5521 return PTR_ERR(mailbox);
5523 cq_context = mailbox->buf;
5524 cqc_mask = (struct hns_roce_v2_cq_context *)mailbox->buf + 1;
5526 memset(cqc_mask, 0xff, sizeof(*cqc_mask));
5528 hr_reg_write(cq_context, CQC_CQ_MAX_CNT, cq_count);
5529 hr_reg_clear(cqc_mask, CQC_CQ_MAX_CNT);
5531 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
5532 if (cq_period * HNS_ROCE_CLOCK_ADJUST > USHRT_MAX) {
5533 dev_info(hr_dev->dev,
5534 "cq_period(%u) reached the upper limit, adjusted to 65.\n",
5536 cq_period = HNS_ROCE_MAX_CQ_PERIOD;
5538 cq_period *= HNS_ROCE_CLOCK_ADJUST;
5540 hr_reg_write(cq_context, CQC_CQ_PERIOD, cq_period);
5541 hr_reg_clear(cqc_mask, CQC_CQ_PERIOD);
5543 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_cq->cqn, 1,
5544 HNS_ROCE_CMD_MODIFY_CQC,
5545 HNS_ROCE_CMD_TIMEOUT_MSECS);
5546 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5548 ibdev_err(&hr_dev->ib_dev,
5549 "failed to process cmd when modifying CQ, ret = %d.\n",
5555 static void hns_roce_irq_work_handle(struct work_struct *work)
5557 struct hns_roce_work *irq_work =
5558 container_of(work, struct hns_roce_work, work);
5559 struct ib_device *ibdev = &irq_work->hr_dev->ib_dev;
5561 switch (irq_work->event_type) {
5562 case HNS_ROCE_EVENT_TYPE_PATH_MIG:
5563 ibdev_info(ibdev, "Path migrated succeeded.\n");
5565 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
5566 ibdev_warn(ibdev, "Path migration failed.\n");
5568 case HNS_ROCE_EVENT_TYPE_COMM_EST:
5570 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
5571 ibdev_warn(ibdev, "Send queue drained.\n");
5573 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
5574 ibdev_err(ibdev, "Local work queue 0x%x catast error, sub_event type is: %d\n",
5575 irq_work->queue_num, irq_work->sub_type);
5577 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
5578 ibdev_err(ibdev, "Invalid request local work queue 0x%x error.\n",
5579 irq_work->queue_num);
5581 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
5582 ibdev_err(ibdev, "Local access violation work queue 0x%x error, sub_event type is: %d\n",
5583 irq_work->queue_num, irq_work->sub_type);
5585 case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
5586 ibdev_warn(ibdev, "SRQ limit reach.\n");
5588 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
5589 ibdev_warn(ibdev, "SRQ last wqe reach.\n");
5591 case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
5592 ibdev_err(ibdev, "SRQ catas error.\n");
5594 case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
5595 ibdev_err(ibdev, "CQ 0x%x access err.\n", irq_work->queue_num);
5597 case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
5598 ibdev_warn(ibdev, "CQ 0x%x overflow\n", irq_work->queue_num);
5600 case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
5601 ibdev_warn(ibdev, "DB overflow.\n");
5603 case HNS_ROCE_EVENT_TYPE_FLR:
5604 ibdev_warn(ibdev, "Function level reset.\n");
5606 case HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION:
5607 ibdev_err(ibdev, "xrc domain violation error.\n");
5609 case HNS_ROCE_EVENT_TYPE_INVALID_XRCETH:
5610 ibdev_err(ibdev, "invalid xrceth error.\n");
5619 static void hns_roce_v2_init_irq_work(struct hns_roce_dev *hr_dev,
5620 struct hns_roce_eq *eq, u32 queue_num)
5622 struct hns_roce_work *irq_work;
5624 irq_work = kzalloc(sizeof(struct hns_roce_work), GFP_ATOMIC);
5628 INIT_WORK(&(irq_work->work), hns_roce_irq_work_handle);
5629 irq_work->hr_dev = hr_dev;
5630 irq_work->event_type = eq->event_type;
5631 irq_work->sub_type = eq->sub_type;
5632 irq_work->queue_num = queue_num;
5633 queue_work(hr_dev->irq_workq, &(irq_work->work));
5636 static void update_eq_db(struct hns_roce_eq *eq)
5638 struct hns_roce_dev *hr_dev = eq->hr_dev;
5639 struct hns_roce_v2_db eq_db = {};
5641 if (eq->type_flag == HNS_ROCE_AEQ) {
5642 hr_reg_write(&eq_db, EQ_DB_CMD,
5643 eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ?
5644 HNS_ROCE_EQ_DB_CMD_AEQ :
5645 HNS_ROCE_EQ_DB_CMD_AEQ_ARMED);
5647 hr_reg_write(&eq_db, EQ_DB_TAG, eq->eqn);
5649 hr_reg_write(&eq_db, EQ_DB_CMD,
5650 eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ?
5651 HNS_ROCE_EQ_DB_CMD_CEQ :
5652 HNS_ROCE_EQ_DB_CMD_CEQ_ARMED);
5655 hr_reg_write(&eq_db, EQ_DB_CI, eq->cons_index);
5657 hns_roce_write64(hr_dev, (__le32 *)&eq_db, eq->db_reg);
5660 static struct hns_roce_aeqe *next_aeqe_sw_v2(struct hns_roce_eq *eq)
5662 struct hns_roce_aeqe *aeqe;
5664 aeqe = hns_roce_buf_offset(eq->mtr.kmem,
5665 (eq->cons_index & (eq->entries - 1)) *
5668 return (roce_get_bit(aeqe->asyn, HNS_ROCE_V2_AEQ_AEQE_OWNER_S) ^
5669 !!(eq->cons_index & eq->entries)) ? aeqe : NULL;
5672 static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
5673 struct hns_roce_eq *eq)
5675 struct device *dev = hr_dev->dev;
5676 struct hns_roce_aeqe *aeqe = next_aeqe_sw_v2(eq);
5683 /* Make sure we read AEQ entry after we have checked the
5688 event_type = roce_get_field(aeqe->asyn,
5689 HNS_ROCE_V2_AEQE_EVENT_TYPE_M,
5690 HNS_ROCE_V2_AEQE_EVENT_TYPE_S);
5691 sub_type = roce_get_field(aeqe->asyn,
5692 HNS_ROCE_V2_AEQE_SUB_TYPE_M,
5693 HNS_ROCE_V2_AEQE_SUB_TYPE_S);
5694 queue_num = roce_get_field(aeqe->event.queue_event.num,
5695 HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
5696 HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
5698 switch (event_type) {
5699 case HNS_ROCE_EVENT_TYPE_PATH_MIG:
5700 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
5701 case HNS_ROCE_EVENT_TYPE_COMM_EST:
5702 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
5703 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
5704 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
5705 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
5706 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
5707 case HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION:
5708 case HNS_ROCE_EVENT_TYPE_INVALID_XRCETH:
5709 hns_roce_qp_event(hr_dev, queue_num, event_type);
5711 case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
5712 case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
5713 hns_roce_srq_event(hr_dev, queue_num, event_type);
5715 case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
5716 case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
5717 hns_roce_cq_event(hr_dev, queue_num, event_type);
5719 case HNS_ROCE_EVENT_TYPE_MB:
5720 hns_roce_cmd_event(hr_dev,
5721 le16_to_cpu(aeqe->event.cmd.token),
5722 aeqe->event.cmd.status,
5723 le64_to_cpu(aeqe->event.cmd.out_param));
5725 case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
5726 case HNS_ROCE_EVENT_TYPE_FLR:
5729 dev_err(dev, "Unhandled event %d on EQ %d at idx %u.\n",
5730 event_type, eq->eqn, eq->cons_index);
5734 eq->event_type = event_type;
5735 eq->sub_type = sub_type;
5739 hns_roce_v2_init_irq_work(hr_dev, eq, queue_num);
5741 aeqe = next_aeqe_sw_v2(eq);
5748 static struct hns_roce_ceqe *next_ceqe_sw_v2(struct hns_roce_eq *eq)
5750 struct hns_roce_ceqe *ceqe;
5752 ceqe = hns_roce_buf_offset(eq->mtr.kmem,
5753 (eq->cons_index & (eq->entries - 1)) *
5756 return (!!(roce_get_bit(ceqe->comp, HNS_ROCE_V2_CEQ_CEQE_OWNER_S))) ^
5757 (!!(eq->cons_index & eq->entries)) ? ceqe : NULL;
5760 static int hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev,
5761 struct hns_roce_eq *eq)
5763 struct hns_roce_ceqe *ceqe = next_ceqe_sw_v2(eq);
5768 /* Make sure we read CEQ entry after we have checked the
5773 cqn = roce_get_field(ceqe->comp, HNS_ROCE_V2_CEQE_COMP_CQN_M,
5774 HNS_ROCE_V2_CEQE_COMP_CQN_S);
5776 hns_roce_cq_completion(hr_dev, cqn);
5781 ceqe = next_ceqe_sw_v2(eq);
5789 static irqreturn_t hns_roce_v2_msix_interrupt_eq(int irq, void *eq_ptr)
5791 struct hns_roce_eq *eq = eq_ptr;
5792 struct hns_roce_dev *hr_dev = eq->hr_dev;
5795 if (eq->type_flag == HNS_ROCE_CEQ)
5796 /* Completion event interrupt */
5797 int_work = hns_roce_v2_ceq_int(hr_dev, eq);
5799 /* Asychronous event interrupt */
5800 int_work = hns_roce_v2_aeq_int(hr_dev, eq);
5802 return IRQ_RETVAL(int_work);
5805 static irqreturn_t hns_roce_v2_msix_interrupt_abn(int irq, void *dev_id)
5807 struct hns_roce_dev *hr_dev = dev_id;
5808 struct device *dev = hr_dev->dev;
5813 /* Abnormal interrupt */
5814 int_st = roce_read(hr_dev, ROCEE_VF_ABN_INT_ST_REG);
5815 int_en = roce_read(hr_dev, ROCEE_VF_ABN_INT_EN_REG);
5817 if (int_st & BIT(HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S)) {
5818 struct pci_dev *pdev = hr_dev->pci_dev;
5819 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
5820 const struct hnae3_ae_ops *ops = ae_dev->ops;
5822 dev_err(dev, "AEQ overflow!\n");
5824 int_st |= 1 << HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S;
5825 roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
5827 /* Set reset level for reset_event() */
5828 if (ops->set_default_reset_request)
5829 ops->set_default_reset_request(ae_dev,
5831 if (ops->reset_event)
5832 ops->reset_event(pdev, NULL);
5834 int_en |= 1 << HNS_ROCE_V2_VF_ABN_INT_EN_S;
5835 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
5838 } else if (int_st & BIT(HNS_ROCE_V2_VF_INT_ST_RAS_INT_S)) {
5839 dev_err(dev, "RAS interrupt!\n");
5841 int_st |= 1 << HNS_ROCE_V2_VF_INT_ST_RAS_INT_S;
5842 roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
5844 int_en |= 1 << HNS_ROCE_V2_VF_ABN_INT_EN_S;
5845 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
5849 dev_err(dev, "There is no abnormal irq found!\n");
5852 return IRQ_RETVAL(int_work);
5855 static void hns_roce_v2_int_mask_enable(struct hns_roce_dev *hr_dev,
5856 int eq_num, u32 enable_flag)
5860 for (i = 0; i < eq_num; i++)
5861 roce_write(hr_dev, ROCEE_VF_EVENT_INT_EN_REG +
5862 i * EQ_REG_OFFSET, enable_flag);
5864 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, enable_flag);
5865 roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG, enable_flag);
5868 static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, u32 eqn)
5870 struct device *dev = hr_dev->dev;
5873 if (eqn < hr_dev->caps.num_comp_vectors)
5874 ret = hns_roce_cmd_mbox(hr_dev, 0, 0, eqn & HNS_ROCE_V2_EQN_M,
5875 0, HNS_ROCE_CMD_DESTROY_CEQC,
5876 HNS_ROCE_CMD_TIMEOUT_MSECS);
5878 ret = hns_roce_cmd_mbox(hr_dev, 0, 0, eqn & HNS_ROCE_V2_EQN_M,
5879 0, HNS_ROCE_CMD_DESTROY_AEQC,
5880 HNS_ROCE_CMD_TIMEOUT_MSECS);
5882 dev_err(dev, "[mailbox cmd] destroy eqc(%u) failed.\n", eqn);
5885 static void free_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
5887 hns_roce_mtr_destroy(hr_dev, &eq->mtr);
5890 static void init_eq_config(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
5892 eq->db_reg = hr_dev->reg_base + ROCEE_VF_EQ_DB_CFG0_REG;
5894 eq->over_ignore = HNS_ROCE_V2_EQ_OVER_IGNORE_0;
5895 eq->coalesce = HNS_ROCE_V2_EQ_COALESCE_0;
5896 eq->arm_st = HNS_ROCE_V2_EQ_ALWAYS_ARMED;
5897 eq->shift = ilog2((unsigned int)eq->entries);
5900 static int config_eqc(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq,
5903 u64 eqe_ba[MTT_MIN_COUNT] = { 0 };
5904 struct hns_roce_eq_context *eqc;
5909 memset(eqc, 0, sizeof(struct hns_roce_eq_context));
5911 init_eq_config(hr_dev, eq);
5913 /* if not multi-hop, eqe buffer only use one trunk */
5914 count = hns_roce_mtr_find(hr_dev, &eq->mtr, 0, eqe_ba, MTT_MIN_COUNT,
5917 dev_err(hr_dev->dev, "failed to find EQE mtr\n");
5921 hr_reg_write(eqc, EQC_EQ_ST, HNS_ROCE_V2_EQ_STATE_VALID);
5922 hr_reg_write(eqc, EQC_EQE_HOP_NUM, eq->hop_num);
5923 hr_reg_write(eqc, EQC_OVER_IGNORE, eq->over_ignore);
5924 hr_reg_write(eqc, EQC_COALESCE, eq->coalesce);
5925 hr_reg_write(eqc, EQC_ARM_ST, eq->arm_st);
5926 hr_reg_write(eqc, EQC_EQN, eq->eqn);
5927 hr_reg_write(eqc, EQC_EQE_CNT, HNS_ROCE_EQ_INIT_EQE_CNT);
5928 hr_reg_write(eqc, EQC_EQE_BA_PG_SZ,
5929 to_hr_hw_page_shift(eq->mtr.hem_cfg.ba_pg_shift));
5930 hr_reg_write(eqc, EQC_EQE_BUF_PG_SZ,
5931 to_hr_hw_page_shift(eq->mtr.hem_cfg.buf_pg_shift));
5932 hr_reg_write(eqc, EQC_EQ_PROD_INDX, HNS_ROCE_EQ_INIT_PROD_IDX);
5933 hr_reg_write(eqc, EQC_EQ_MAX_CNT, eq->eq_max_cnt);
5935 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
5936 if (eq->eq_period * HNS_ROCE_CLOCK_ADJUST > USHRT_MAX) {
5937 dev_info(hr_dev->dev, "eq_period(%u) reached the upper limit, adjusted to 65.\n",
5939 eq->eq_period = HNS_ROCE_MAX_EQ_PERIOD;
5941 eq->eq_period *= HNS_ROCE_CLOCK_ADJUST;
5944 hr_reg_write(eqc, EQC_EQ_PERIOD, eq->eq_period);
5945 hr_reg_write(eqc, EQC_EQE_REPORT_TIMER, HNS_ROCE_EQ_INIT_REPORT_TIMER);
5946 hr_reg_write(eqc, EQC_EQE_BA_L, bt_ba >> 3);
5947 hr_reg_write(eqc, EQC_EQE_BA_H, bt_ba >> 35);
5948 hr_reg_write(eqc, EQC_SHIFT, eq->shift);
5949 hr_reg_write(eqc, EQC_MSI_INDX, HNS_ROCE_EQ_INIT_MSI_IDX);
5950 hr_reg_write(eqc, EQC_CUR_EQE_BA_L, eqe_ba[0] >> 12);
5951 hr_reg_write(eqc, EQC_CUR_EQE_BA_M, eqe_ba[0] >> 28);
5952 hr_reg_write(eqc, EQC_CUR_EQE_BA_H, eqe_ba[0] >> 60);
5953 hr_reg_write(eqc, EQC_EQ_CONS_INDX, HNS_ROCE_EQ_INIT_CONS_IDX);
5954 hr_reg_write(eqc, EQC_NEX_EQE_BA_L, eqe_ba[1] >> 12);
5955 hr_reg_write(eqc, EQC_NEX_EQE_BA_H, eqe_ba[1] >> 44);
5956 hr_reg_write(eqc, EQC_EQE_SIZE, eq->eqe_size == HNS_ROCE_V3_EQE_SIZE);
5961 static int alloc_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
5963 struct hns_roce_buf_attr buf_attr = {};
5966 if (hr_dev->caps.eqe_hop_num == HNS_ROCE_HOP_NUM_0)
5969 eq->hop_num = hr_dev->caps.eqe_hop_num;
5971 buf_attr.page_shift = hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT;
5972 buf_attr.region[0].size = eq->entries * eq->eqe_size;
5973 buf_attr.region[0].hopnum = eq->hop_num;
5974 buf_attr.region_count = 1;
5976 err = hns_roce_mtr_create(hr_dev, &eq->mtr, &buf_attr,
5977 hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT, NULL,
5980 dev_err(hr_dev->dev, "Failed to alloc EQE mtr, err %d\n", err);
5985 static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev,
5986 struct hns_roce_eq *eq,
5987 unsigned int eq_cmd)
5989 struct hns_roce_cmd_mailbox *mailbox;
5992 /* Allocate mailbox memory */
5993 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
5994 if (IS_ERR_OR_NULL(mailbox))
5997 ret = alloc_eq_buf(hr_dev, eq);
6001 ret = config_eqc(hr_dev, eq, mailbox->buf);
6005 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, eq->eqn, 0,
6006 eq_cmd, HNS_ROCE_CMD_TIMEOUT_MSECS);
6008 dev_err(hr_dev->dev, "[mailbox cmd] create eqc failed.\n");
6012 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
6017 free_eq_buf(hr_dev, eq);
6020 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
6025 static int __hns_roce_request_irq(struct hns_roce_dev *hr_dev, int irq_num,
6026 int comp_num, int aeq_num, int other_num)
6028 struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
6032 for (i = 0; i < irq_num; i++) {
6033 hr_dev->irq_names[i] = kzalloc(HNS_ROCE_INT_NAME_LEN,
6035 if (!hr_dev->irq_names[i]) {
6037 goto err_kzalloc_failed;
6041 /* irq contains: abnormal + AEQ + CEQ */
6042 for (j = 0; j < other_num; j++)
6043 snprintf((char *)hr_dev->irq_names[j], HNS_ROCE_INT_NAME_LEN,
6046 for (j = other_num; j < (other_num + aeq_num); j++)
6047 snprintf((char *)hr_dev->irq_names[j], HNS_ROCE_INT_NAME_LEN,
6048 "hns-aeq-%d", j - other_num);
6050 for (j = (other_num + aeq_num); j < irq_num; j++)
6051 snprintf((char *)hr_dev->irq_names[j], HNS_ROCE_INT_NAME_LEN,
6052 "hns-ceq-%d", j - other_num - aeq_num);
6054 for (j = 0; j < irq_num; j++) {
6056 ret = request_irq(hr_dev->irq[j],
6057 hns_roce_v2_msix_interrupt_abn,
6058 0, hr_dev->irq_names[j], hr_dev);
6060 else if (j < (other_num + comp_num))
6061 ret = request_irq(eq_table->eq[j - other_num].irq,
6062 hns_roce_v2_msix_interrupt_eq,
6063 0, hr_dev->irq_names[j + aeq_num],
6064 &eq_table->eq[j - other_num]);
6066 ret = request_irq(eq_table->eq[j - other_num].irq,
6067 hns_roce_v2_msix_interrupt_eq,
6068 0, hr_dev->irq_names[j - comp_num],
6069 &eq_table->eq[j - other_num]);
6071 dev_err(hr_dev->dev, "Request irq error!\n");
6072 goto err_request_failed;
6079 for (j -= 1; j >= 0; j--)
6081 free_irq(hr_dev->irq[j], hr_dev);
6083 free_irq(eq_table->eq[j - other_num].irq,
6084 &eq_table->eq[j - other_num]);
6087 for (i -= 1; i >= 0; i--)
6088 kfree(hr_dev->irq_names[i]);
6093 static void __hns_roce_free_irq(struct hns_roce_dev *hr_dev)
6099 eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
6100 irq_num = eq_num + hr_dev->caps.num_other_vectors;
6102 for (i = 0; i < hr_dev->caps.num_other_vectors; i++)
6103 free_irq(hr_dev->irq[i], hr_dev);
6105 for (i = 0; i < eq_num; i++)
6106 free_irq(hr_dev->eq_table.eq[i].irq, &hr_dev->eq_table.eq[i]);
6108 for (i = 0; i < irq_num; i++)
6109 kfree(hr_dev->irq_names[i]);
6112 static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev)
6114 struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
6115 struct device *dev = hr_dev->dev;
6116 struct hns_roce_eq *eq;
6117 unsigned int eq_cmd;
6126 other_num = hr_dev->caps.num_other_vectors;
6127 comp_num = hr_dev->caps.num_comp_vectors;
6128 aeq_num = hr_dev->caps.num_aeq_vectors;
6130 eq_num = comp_num + aeq_num;
6131 irq_num = eq_num + other_num;
6133 eq_table->eq = kcalloc(eq_num, sizeof(*eq_table->eq), GFP_KERNEL);
6138 for (i = 0; i < eq_num; i++) {
6139 eq = &eq_table->eq[i];
6140 eq->hr_dev = hr_dev;
6144 eq_cmd = HNS_ROCE_CMD_CREATE_CEQC;
6145 eq->type_flag = HNS_ROCE_CEQ;
6146 eq->entries = hr_dev->caps.ceqe_depth;
6147 eq->eqe_size = hr_dev->caps.ceqe_size;
6148 eq->irq = hr_dev->irq[i + other_num + aeq_num];
6149 eq->eq_max_cnt = HNS_ROCE_CEQ_DEFAULT_BURST_NUM;
6150 eq->eq_period = HNS_ROCE_CEQ_DEFAULT_INTERVAL;
6153 eq_cmd = HNS_ROCE_CMD_CREATE_AEQC;
6154 eq->type_flag = HNS_ROCE_AEQ;
6155 eq->entries = hr_dev->caps.aeqe_depth;
6156 eq->eqe_size = hr_dev->caps.aeqe_size;
6157 eq->irq = hr_dev->irq[i - comp_num + other_num];
6158 eq->eq_max_cnt = HNS_ROCE_AEQ_DEFAULT_BURST_NUM;
6159 eq->eq_period = HNS_ROCE_AEQ_DEFAULT_INTERVAL;
6162 ret = hns_roce_v2_create_eq(hr_dev, eq, eq_cmd);
6164 dev_err(dev, "failed to create eq.\n");
6165 goto err_create_eq_fail;
6169 hr_dev->irq_workq = alloc_ordered_workqueue("hns_roce_irq_workq", 0);
6170 if (!hr_dev->irq_workq) {
6171 dev_err(dev, "failed to create irq workqueue.\n");
6173 goto err_create_eq_fail;
6176 ret = __hns_roce_request_irq(hr_dev, irq_num, comp_num, aeq_num,
6179 dev_err(dev, "failed to request irq.\n");
6180 goto err_request_irq_fail;
6184 hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_ENABLE);
6188 err_request_irq_fail:
6189 destroy_workqueue(hr_dev->irq_workq);
6192 for (i -= 1; i >= 0; i--)
6193 free_eq_buf(hr_dev, &eq_table->eq[i]);
6194 kfree(eq_table->eq);
6199 static void hns_roce_v2_cleanup_eq_table(struct hns_roce_dev *hr_dev)
6201 struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
6205 eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
6208 hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_DISABLE);
6210 __hns_roce_free_irq(hr_dev);
6211 destroy_workqueue(hr_dev->irq_workq);
6213 for (i = 0; i < eq_num; i++) {
6214 hns_roce_v2_destroy_eqc(hr_dev, i);
6216 free_eq_buf(hr_dev, &eq_table->eq[i]);
6219 kfree(eq_table->eq);
6222 static const struct hns_roce_dfx_hw hns_roce_dfx_hw_v2 = {
6223 .query_cqc_info = hns_roce_v2_query_cqc_info,
6226 static const struct ib_device_ops hns_roce_v2_dev_ops = {
6227 .destroy_qp = hns_roce_v2_destroy_qp,
6228 .modify_cq = hns_roce_v2_modify_cq,
6229 .poll_cq = hns_roce_v2_poll_cq,
6230 .post_recv = hns_roce_v2_post_recv,
6231 .post_send = hns_roce_v2_post_send,
6232 .query_qp = hns_roce_v2_query_qp,
6233 .req_notify_cq = hns_roce_v2_req_notify_cq,
6236 static const struct ib_device_ops hns_roce_v2_dev_srq_ops = {
6237 .modify_srq = hns_roce_v2_modify_srq,
6238 .post_srq_recv = hns_roce_v2_post_srq_recv,
6239 .query_srq = hns_roce_v2_query_srq,
6242 static const struct hns_roce_hw hns_roce_hw_v2 = {
6243 .cmq_init = hns_roce_v2_cmq_init,
6244 .cmq_exit = hns_roce_v2_cmq_exit,
6245 .hw_profile = hns_roce_v2_profile,
6246 .hw_init = hns_roce_v2_init,
6247 .hw_exit = hns_roce_v2_exit,
6248 .post_mbox = v2_post_mbox,
6249 .poll_mbox_done = v2_poll_mbox_done,
6250 .chk_mbox_avail = v2_chk_mbox_is_avail,
6251 .set_gid = hns_roce_v2_set_gid,
6252 .set_mac = hns_roce_v2_set_mac,
6253 .write_mtpt = hns_roce_v2_write_mtpt,
6254 .rereg_write_mtpt = hns_roce_v2_rereg_write_mtpt,
6255 .frmr_write_mtpt = hns_roce_v2_frmr_write_mtpt,
6256 .mw_write_mtpt = hns_roce_v2_mw_write_mtpt,
6257 .write_cqc = hns_roce_v2_write_cqc,
6258 .set_hem = hns_roce_v2_set_hem,
6259 .clear_hem = hns_roce_v2_clear_hem,
6260 .modify_qp = hns_roce_v2_modify_qp,
6261 .qp_flow_control_init = hns_roce_v2_qp_flow_control_init,
6262 .init_eq = hns_roce_v2_init_eq_table,
6263 .cleanup_eq = hns_roce_v2_cleanup_eq_table,
6264 .write_srqc = hns_roce_v2_write_srqc,
6265 .hns_roce_dev_ops = &hns_roce_v2_dev_ops,
6266 .hns_roce_dev_srq_ops = &hns_roce_v2_dev_srq_ops,
6269 static const struct pci_device_id hns_roce_hw_v2_pci_tbl[] = {
6270 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
6271 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
6272 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
6273 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
6274 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
6275 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
6276 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_RDMA_DCB_PFC_VF),
6277 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
6278 /* required last entry */
6282 MODULE_DEVICE_TABLE(pci, hns_roce_hw_v2_pci_tbl);
6284 static void hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
6285 struct hnae3_handle *handle)
6287 struct hns_roce_v2_priv *priv = hr_dev->priv;
6288 const struct pci_device_id *id;
6291 hr_dev->pci_dev = handle->pdev;
6292 id = pci_match_id(hns_roce_hw_v2_pci_tbl, hr_dev->pci_dev);
6293 hr_dev->is_vf = id->driver_data;
6294 hr_dev->dev = &handle->pdev->dev;
6295 hr_dev->hw = &hns_roce_hw_v2;
6296 hr_dev->dfx = &hns_roce_dfx_hw_v2;
6297 hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG;
6298 hr_dev->odb_offset = hr_dev->sdb_offset;
6300 /* Get info from NIC driver. */
6301 hr_dev->reg_base = handle->rinfo.roce_io_base;
6302 hr_dev->mem_base = handle->rinfo.roce_mem_base;
6303 hr_dev->caps.num_ports = 1;
6304 hr_dev->iboe.netdevs[0] = handle->rinfo.netdev;
6305 hr_dev->iboe.phy_port[0] = 0;
6307 addrconf_addr_eui48((u8 *)&hr_dev->ib_dev.node_guid,
6308 hr_dev->iboe.netdevs[0]->dev_addr);
6310 for (i = 0; i < handle->rinfo.num_vectors; i++)
6311 hr_dev->irq[i] = pci_irq_vector(handle->pdev,
6312 i + handle->rinfo.base_vector);
6314 /* cmd issue mode: 0 is poll, 1 is event */
6315 hr_dev->cmd_mod = 1;
6316 hr_dev->loop_idc = 0;
6318 hr_dev->reset_cnt = handle->ae_algo->ops->ae_dev_reset_cnt(handle);
6319 priv->handle = handle;
6322 static int __hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
6324 struct hns_roce_dev *hr_dev;
6327 hr_dev = ib_alloc_device(hns_roce_dev, ib_dev);
6331 hr_dev->priv = kzalloc(sizeof(struct hns_roce_v2_priv), GFP_KERNEL);
6332 if (!hr_dev->priv) {
6334 goto error_failed_kzalloc;
6337 hns_roce_hw_v2_get_cfg(hr_dev, handle);
6339 ret = hns_roce_init(hr_dev);
6341 dev_err(hr_dev->dev, "RoCE Engine init failed!\n");
6342 goto error_failed_get_cfg;
6345 handle->priv = hr_dev;
6349 error_failed_get_cfg:
6350 kfree(hr_dev->priv);
6352 error_failed_kzalloc:
6353 ib_dealloc_device(&hr_dev->ib_dev);
6358 static void __hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
6361 struct hns_roce_dev *hr_dev = handle->priv;
6366 handle->priv = NULL;
6368 hr_dev->state = HNS_ROCE_DEVICE_STATE_UNINIT;
6369 hns_roce_handle_device_err(hr_dev);
6371 hns_roce_exit(hr_dev);
6372 kfree(hr_dev->priv);
6373 ib_dealloc_device(&hr_dev->ib_dev);
6376 static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
6378 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
6379 const struct pci_device_id *id;
6380 struct device *dev = &handle->pdev->dev;
6383 handle->rinfo.instance_state = HNS_ROCE_STATE_INIT;
6385 if (ops->ae_dev_resetting(handle) || ops->get_hw_reset_stat(handle)) {
6386 handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
6390 id = pci_match_id(hns_roce_hw_v2_pci_tbl, handle->pdev);
6394 if (id->driver_data && handle->pdev->revision == PCI_REVISION_ID_HIP08)
6397 ret = __hns_roce_hw_v2_init_instance(handle);
6399 handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
6400 dev_err(dev, "RoCE instance init failed! ret = %d\n", ret);
6401 if (ops->ae_dev_resetting(handle) ||
6402 ops->get_hw_reset_stat(handle))
6408 handle->rinfo.instance_state = HNS_ROCE_STATE_INITED;
6413 dev_err(dev, "Device is busy in resetting state.\n"
6414 "please retry later.\n");
6419 static void hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
6422 if (handle->rinfo.instance_state != HNS_ROCE_STATE_INITED)
6425 handle->rinfo.instance_state = HNS_ROCE_STATE_UNINIT;
6427 __hns_roce_hw_v2_uninit_instance(handle, reset);
6429 handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
6431 static int hns_roce_hw_v2_reset_notify_down(struct hnae3_handle *handle)
6433 struct hns_roce_dev *hr_dev;
6435 if (handle->rinfo.instance_state != HNS_ROCE_STATE_INITED) {
6436 set_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state);
6440 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_DOWN;
6441 clear_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state);
6443 hr_dev = handle->priv;
6447 hr_dev->active = false;
6448 hr_dev->dis_db = true;
6449 hr_dev->state = HNS_ROCE_DEVICE_STATE_RST_DOWN;
6454 static int hns_roce_hw_v2_reset_notify_init(struct hnae3_handle *handle)
6456 struct device *dev = &handle->pdev->dev;
6459 if (test_and_clear_bit(HNS_ROCE_RST_DIRECT_RETURN,
6460 &handle->rinfo.state)) {
6461 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INITED;
6465 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INIT;
6467 dev_info(&handle->pdev->dev, "In reset process RoCE client reinit.\n");
6468 ret = __hns_roce_hw_v2_init_instance(handle);
6470 /* when reset notify type is HNAE3_INIT_CLIENT In reset notify
6471 * callback function, RoCE Engine reinitialize. If RoCE reinit
6472 * failed, we should inform NIC driver.
6474 handle->priv = NULL;
6475 dev_err(dev, "In reset process RoCE reinit failed %d.\n", ret);
6477 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INITED;
6478 dev_info(dev, "Reset done, RoCE client reinit finished.\n");
6484 static int hns_roce_hw_v2_reset_notify_uninit(struct hnae3_handle *handle)
6486 if (test_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state))
6489 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_UNINIT;
6490 dev_info(&handle->pdev->dev, "In reset process RoCE client uninit.\n");
6491 msleep(HNS_ROCE_V2_HW_RST_UNINT_DELAY);
6492 __hns_roce_hw_v2_uninit_instance(handle, false);
6497 static int hns_roce_hw_v2_reset_notify(struct hnae3_handle *handle,
6498 enum hnae3_reset_notify_type type)
6503 case HNAE3_DOWN_CLIENT:
6504 ret = hns_roce_hw_v2_reset_notify_down(handle);
6506 case HNAE3_INIT_CLIENT:
6507 ret = hns_roce_hw_v2_reset_notify_init(handle);
6509 case HNAE3_UNINIT_CLIENT:
6510 ret = hns_roce_hw_v2_reset_notify_uninit(handle);
6519 static const struct hnae3_client_ops hns_roce_hw_v2_ops = {
6520 .init_instance = hns_roce_hw_v2_init_instance,
6521 .uninit_instance = hns_roce_hw_v2_uninit_instance,
6522 .reset_notify = hns_roce_hw_v2_reset_notify,
6525 static struct hnae3_client hns_roce_hw_v2_client = {
6526 .name = "hns_roce_hw_v2",
6527 .type = HNAE3_CLIENT_ROCE,
6528 .ops = &hns_roce_hw_v2_ops,
6531 static int __init hns_roce_hw_v2_init(void)
6533 return hnae3_register_client(&hns_roce_hw_v2_client);
6536 static void __exit hns_roce_hw_v2_exit(void)
6538 hnae3_unregister_client(&hns_roce_hw_v2_client);
6541 module_init(hns_roce_hw_v2_init);
6542 module_exit(hns_roce_hw_v2_exit);
6544 MODULE_LICENSE("Dual BSD/GPL");
6545 MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>");
6546 MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>");
6547 MODULE_AUTHOR("Shaobo Xu <xushaobo2@huawei.com>");
6548 MODULE_DESCRIPTION("Hisilicon Hip08 Family RoCE Driver");