2 * Copyright (c) 2016-2017 Hisilicon Limited.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/acpi.h>
34 #include <linux/etherdevice.h>
35 #include <linux/interrupt.h>
36 #include <linux/kernel.h>
37 #include <linux/types.h>
38 #include <net/addrconf.h>
39 #include <rdma/ib_umem.h>
42 #include "hns_roce_common.h"
43 #include "hns_roce_device.h"
44 #include "hns_roce_cmd.h"
45 #include "hns_roce_hem.h"
46 #include "hns_roce_hw_v2.h"
48 static void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg *dseg,
51 dseg->lkey = cpu_to_le32(sg->lkey);
52 dseg->addr = cpu_to_le64(sg->addr);
53 dseg->len = cpu_to_le32(sg->length);
56 static void set_extend_sge(struct hns_roce_qp *qp, const struct ib_send_wr *wr,
57 unsigned int *sge_ind)
59 struct hns_roce_v2_wqe_data_seg *dseg;
68 if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC)
69 num_in_wqe = HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE;
70 extend_sge_num = wr->num_sge - num_in_wqe;
71 sg = wr->sg_list + num_in_wqe;
72 shift = qp->hr_buf.page_shift;
75 * Check whether wr->num_sge sges are in the same page. If not, we
76 * should calculate how many sges in the first page and the second
79 dseg = get_send_extend_sge(qp, (*sge_ind) & (qp->sge.sge_cnt - 1));
80 fi_sge_num = (round_up((uintptr_t)dseg, 1 << shift) -
82 sizeof(struct hns_roce_v2_wqe_data_seg);
83 if (extend_sge_num > fi_sge_num) {
84 se_sge_num = extend_sge_num - fi_sge_num;
85 for (i = 0; i < fi_sge_num; i++) {
86 set_data_seg_v2(dseg++, sg + i);
89 dseg = get_send_extend_sge(qp,
90 (*sge_ind) & (qp->sge.sge_cnt - 1));
91 for (i = 0; i < se_sge_num; i++) {
92 set_data_seg_v2(dseg++, sg + fi_sge_num + i);
96 for (i = 0; i < extend_sge_num; i++) {
97 set_data_seg_v2(dseg++, sg + i);
103 static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
104 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
105 void *wqe, unsigned int *sge_ind,
106 struct ib_send_wr **bad_wr)
108 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
109 struct hns_roce_v2_wqe_data_seg *dseg = wqe;
110 struct hns_roce_qp *qp = to_hr_qp(ibqp);
113 if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) {
114 if (le32_to_cpu(rc_sq_wqe->msg_len) >
115 hr_dev->caps.max_sq_inline) {
117 dev_err(hr_dev->dev, "inline len(1-%d)=%d, illegal",
118 rc_sq_wqe->msg_len, hr_dev->caps.max_sq_inline);
122 if (wr->opcode == IB_WR_RDMA_READ) {
123 dev_err(hr_dev->dev, "Not support inline data!\n");
127 for (i = 0; i < wr->num_sge; i++) {
128 memcpy(wqe, ((void *)wr->sg_list[i].addr),
129 wr->sg_list[i].length);
130 wqe += wr->sg_list[i].length;
133 roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_INLINE_S,
136 if (wr->num_sge <= HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) {
137 for (i = 0; i < wr->num_sge; i++) {
138 if (likely(wr->sg_list[i].length)) {
139 set_data_seg_v2(dseg, wr->sg_list + i);
144 roce_set_field(rc_sq_wqe->byte_20,
145 V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M,
146 V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
147 (*sge_ind) & (qp->sge.sge_cnt - 1));
149 for (i = 0; i < HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE; i++) {
150 if (likely(wr->sg_list[i].length)) {
151 set_data_seg_v2(dseg, wr->sg_list + i);
156 set_extend_sge(qp, wr, sge_ind);
159 roce_set_field(rc_sq_wqe->byte_16,
160 V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
161 V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S, wr->num_sge);
167 static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
168 struct ib_send_wr **bad_wr)
170 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
171 struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah);
172 struct hns_roce_v2_ud_send_wqe *ud_sq_wqe;
173 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe;
174 struct hns_roce_qp *qp = to_hr_qp(ibqp);
175 struct device *dev = hr_dev->dev;
176 struct hns_roce_v2_db sq_db;
177 unsigned int sge_ind = 0;
178 unsigned int owner_bit;
189 if (unlikely(ibqp->qp_type != IB_QPT_RC &&
190 ibqp->qp_type != IB_QPT_GSI &&
191 ibqp->qp_type != IB_QPT_UD)) {
192 dev_err(dev, "Not supported QP(0x%x)type!\n", ibqp->qp_type);
197 if (unlikely(qp->state == IB_QPS_RESET || qp->state == IB_QPS_INIT ||
198 qp->state == IB_QPS_RTR)) {
199 dev_err(dev, "Post WQE fail, QP state %d err!\n", qp->state);
204 spin_lock_irqsave(&qp->sq.lock, flags);
205 ind = qp->sq_next_wqe;
206 sge_ind = qp->next_sge;
208 for (nreq = 0; wr; ++nreq, wr = wr->next) {
209 if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
215 if (unlikely(wr->num_sge > qp->sq.max_gs)) {
216 dev_err(dev, "num_sge=%d > qp->sq.max_gs=%d\n",
217 wr->num_sge, qp->sq.max_gs);
223 wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1));
224 qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] =
228 ~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1);
231 /* Corresponding to the QP type, wqe process separately */
232 if (ibqp->qp_type == IB_QPT_GSI) {
234 memset(ud_sq_wqe, 0, sizeof(*ud_sq_wqe));
236 roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_0_M,
237 V2_UD_SEND_WQE_DMAC_0_S, ah->av.mac[0]);
238 roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_1_M,
239 V2_UD_SEND_WQE_DMAC_1_S, ah->av.mac[1]);
240 roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_2_M,
241 V2_UD_SEND_WQE_DMAC_2_S, ah->av.mac[2]);
242 roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_3_M,
243 V2_UD_SEND_WQE_DMAC_3_S, ah->av.mac[3]);
244 roce_set_field(ud_sq_wqe->byte_48,
245 V2_UD_SEND_WQE_BYTE_48_DMAC_4_M,
246 V2_UD_SEND_WQE_BYTE_48_DMAC_4_S,
248 roce_set_field(ud_sq_wqe->byte_48,
249 V2_UD_SEND_WQE_BYTE_48_DMAC_5_M,
250 V2_UD_SEND_WQE_BYTE_48_DMAC_5_S,
254 smac = (u8 *)hr_dev->dev_addr[qp->port];
255 loopback = ether_addr_equal_unaligned(ah->av.mac,
258 roce_set_bit(ud_sq_wqe->byte_40,
259 V2_UD_SEND_WQE_BYTE_40_LBI_S, loopback);
261 roce_set_field(ud_sq_wqe->byte_4,
262 V2_UD_SEND_WQE_BYTE_4_OPCODE_M,
263 V2_UD_SEND_WQE_BYTE_4_OPCODE_S,
264 HNS_ROCE_V2_WQE_OP_SEND);
266 for (i = 0; i < wr->num_sge; i++)
267 tmp_len += wr->sg_list[i].length;
270 cpu_to_le32(le32_to_cpu(ud_sq_wqe->msg_len) + tmp_len);
272 switch (wr->opcode) {
273 case IB_WR_SEND_WITH_IMM:
274 case IB_WR_RDMA_WRITE_WITH_IMM:
275 ud_sq_wqe->immtdata =
276 cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
279 ud_sq_wqe->immtdata = 0;
284 roce_set_bit(ud_sq_wqe->byte_4,
285 V2_UD_SEND_WQE_BYTE_4_CQE_S,
286 (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
289 roce_set_bit(ud_sq_wqe->byte_4,
290 V2_UD_SEND_WQE_BYTE_4_SE_S,
291 (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0);
293 roce_set_bit(ud_sq_wqe->byte_4,
294 V2_UD_SEND_WQE_BYTE_4_OWNER_S, owner_bit);
296 roce_set_field(ud_sq_wqe->byte_16,
297 V2_UD_SEND_WQE_BYTE_16_PD_M,
298 V2_UD_SEND_WQE_BYTE_16_PD_S,
299 to_hr_pd(ibqp->pd)->pdn);
301 roce_set_field(ud_sq_wqe->byte_16,
302 V2_UD_SEND_WQE_BYTE_16_SGE_NUM_M,
303 V2_UD_SEND_WQE_BYTE_16_SGE_NUM_S,
306 roce_set_field(ud_sq_wqe->byte_20,
307 V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M,
308 V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
309 sge_ind & (qp->sge.sge_cnt - 1));
311 roce_set_field(ud_sq_wqe->byte_24,
312 V2_UD_SEND_WQE_BYTE_24_UDPSPN_M,
313 V2_UD_SEND_WQE_BYTE_24_UDPSPN_S, 0);
315 cpu_to_le32(ud_wr(wr)->remote_qkey & 0x80000000 ?
316 qp->qkey : ud_wr(wr)->remote_qkey);
317 roce_set_field(ud_sq_wqe->byte_32,
318 V2_UD_SEND_WQE_BYTE_32_DQPN_M,
319 V2_UD_SEND_WQE_BYTE_32_DQPN_S,
320 ud_wr(wr)->remote_qpn);
322 roce_set_field(ud_sq_wqe->byte_36,
323 V2_UD_SEND_WQE_BYTE_36_VLAN_M,
324 V2_UD_SEND_WQE_BYTE_36_VLAN_S,
325 le16_to_cpu(ah->av.vlan));
326 roce_set_field(ud_sq_wqe->byte_36,
327 V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_M,
328 V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_S,
330 roce_set_field(ud_sq_wqe->byte_36,
331 V2_UD_SEND_WQE_BYTE_36_TCLASS_M,
332 V2_UD_SEND_WQE_BYTE_36_TCLASS_S,
334 roce_set_field(ud_sq_wqe->byte_36,
335 V2_UD_SEND_WQE_BYTE_36_TCLASS_M,
336 V2_UD_SEND_WQE_BYTE_36_TCLASS_S,
338 roce_set_field(ud_sq_wqe->byte_40,
339 V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_M,
340 V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_S, 0);
341 roce_set_field(ud_sq_wqe->byte_40,
342 V2_UD_SEND_WQE_BYTE_40_SL_M,
343 V2_UD_SEND_WQE_BYTE_40_SL_S,
344 le32_to_cpu(ah->av.sl_tclass_flowlabel) >>
346 roce_set_field(ud_sq_wqe->byte_40,
347 V2_UD_SEND_WQE_BYTE_40_PORTN_M,
348 V2_UD_SEND_WQE_BYTE_40_PORTN_S,
351 roce_set_field(ud_sq_wqe->byte_48,
352 V2_UD_SEND_WQE_BYTE_48_SGID_INDX_M,
353 V2_UD_SEND_WQE_BYTE_48_SGID_INDX_S,
354 hns_get_gid_index(hr_dev, qp->phy_port,
357 memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0],
360 set_extend_sge(qp, wr, &sge_ind);
362 } else if (ibqp->qp_type == IB_QPT_RC) {
364 memset(rc_sq_wqe, 0, sizeof(*rc_sq_wqe));
365 for (i = 0; i < wr->num_sge; i++)
366 tmp_len += wr->sg_list[i].length;
369 cpu_to_le32(le32_to_cpu(rc_sq_wqe->msg_len) + tmp_len);
371 switch (wr->opcode) {
372 case IB_WR_SEND_WITH_IMM:
373 case IB_WR_RDMA_WRITE_WITH_IMM:
374 rc_sq_wqe->immtdata =
375 cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
377 case IB_WR_SEND_WITH_INV:
379 cpu_to_le32(wr->ex.invalidate_rkey);
382 rc_sq_wqe->immtdata = 0;
386 roce_set_bit(rc_sq_wqe->byte_4,
387 V2_RC_SEND_WQE_BYTE_4_FENCE_S,
388 (wr->send_flags & IB_SEND_FENCE) ? 1 : 0);
390 roce_set_bit(rc_sq_wqe->byte_4,
391 V2_RC_SEND_WQE_BYTE_4_SE_S,
392 (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0);
394 roce_set_bit(rc_sq_wqe->byte_4,
395 V2_RC_SEND_WQE_BYTE_4_CQE_S,
396 (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
398 roce_set_bit(rc_sq_wqe->byte_4,
399 V2_RC_SEND_WQE_BYTE_4_OWNER_S, owner_bit);
401 switch (wr->opcode) {
402 case IB_WR_RDMA_READ:
403 roce_set_field(rc_sq_wqe->byte_4,
404 V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
405 V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
406 HNS_ROCE_V2_WQE_OP_RDMA_READ);
408 cpu_to_le32(rdma_wr(wr)->rkey);
410 cpu_to_le64(rdma_wr(wr)->remote_addr);
412 case IB_WR_RDMA_WRITE:
413 roce_set_field(rc_sq_wqe->byte_4,
414 V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
415 V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
416 HNS_ROCE_V2_WQE_OP_RDMA_WRITE);
418 cpu_to_le32(rdma_wr(wr)->rkey);
420 cpu_to_le64(rdma_wr(wr)->remote_addr);
422 case IB_WR_RDMA_WRITE_WITH_IMM:
423 roce_set_field(rc_sq_wqe->byte_4,
424 V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
425 V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
426 HNS_ROCE_V2_WQE_OP_RDMA_WRITE_WITH_IMM);
428 cpu_to_le32(rdma_wr(wr)->rkey);
430 cpu_to_le64(rdma_wr(wr)->remote_addr);
433 roce_set_field(rc_sq_wqe->byte_4,
434 V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
435 V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
436 HNS_ROCE_V2_WQE_OP_SEND);
438 case IB_WR_SEND_WITH_INV:
439 roce_set_field(rc_sq_wqe->byte_4,
440 V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
441 V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
442 HNS_ROCE_V2_WQE_OP_SEND_WITH_INV);
444 case IB_WR_SEND_WITH_IMM:
445 roce_set_field(rc_sq_wqe->byte_4,
446 V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
447 V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
448 HNS_ROCE_V2_WQE_OP_SEND_WITH_IMM);
450 case IB_WR_LOCAL_INV:
451 roce_set_field(rc_sq_wqe->byte_4,
452 V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
453 V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
454 HNS_ROCE_V2_WQE_OP_LOCAL_INV);
456 case IB_WR_ATOMIC_CMP_AND_SWP:
457 roce_set_field(rc_sq_wqe->byte_4,
458 V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
459 V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
460 HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP);
462 case IB_WR_ATOMIC_FETCH_AND_ADD:
463 roce_set_field(rc_sq_wqe->byte_4,
464 V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
465 V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
466 HNS_ROCE_V2_WQE_OP_ATOM_FETCH_AND_ADD);
468 case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
469 roce_set_field(rc_sq_wqe->byte_4,
470 V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
471 V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
472 HNS_ROCE_V2_WQE_OP_ATOM_MSK_CMP_AND_SWAP);
474 case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD:
475 roce_set_field(rc_sq_wqe->byte_4,
476 V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
477 V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
478 HNS_ROCE_V2_WQE_OP_ATOM_MSK_FETCH_AND_ADD);
481 roce_set_field(rc_sq_wqe->byte_4,
482 V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
483 V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
484 HNS_ROCE_V2_WQE_OP_MASK);
488 wqe += sizeof(struct hns_roce_v2_rc_send_wqe);
490 ret = set_rwqe_data_seg(ibqp, wr, rc_sq_wqe, wqe,
496 dev_err(dev, "Illegal qp_type(0x%x)\n", ibqp->qp_type);
497 spin_unlock_irqrestore(&qp->sq.lock, flags);
512 roce_set_field(sq_db.byte_4, V2_DB_BYTE_4_TAG_M,
513 V2_DB_BYTE_4_TAG_S, qp->doorbell_qpn);
514 roce_set_field(sq_db.byte_4, V2_DB_BYTE_4_CMD_M,
515 V2_DB_BYTE_4_CMD_S, HNS_ROCE_V2_SQ_DB);
516 roce_set_field(sq_db.parameter, V2_DB_PARAMETER_IDX_M,
517 V2_DB_PARAMETER_IDX_S,
518 qp->sq.head & ((qp->sq.wqe_cnt << 1) - 1));
519 roce_set_field(sq_db.parameter, V2_DB_PARAMETER_SL_M,
520 V2_DB_PARAMETER_SL_S, qp->sl);
522 hns_roce_write64_k((__le32 *)&sq_db, qp->sq.db_reg_l);
524 qp->sq_next_wqe = ind;
525 qp->next_sge = sge_ind;
528 spin_unlock_irqrestore(&qp->sq.lock, flags);
533 static int hns_roce_v2_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
534 struct ib_recv_wr **bad_wr)
536 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
537 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
538 struct hns_roce_v2_wqe_data_seg *dseg;
539 struct hns_roce_rinl_sge *sge_list;
540 struct device *dev = hr_dev->dev;
548 spin_lock_irqsave(&hr_qp->rq.lock, flags);
549 ind = hr_qp->rq.head & (hr_qp->rq.wqe_cnt - 1);
551 if (hr_qp->state == IB_QPS_RESET) {
552 spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
557 for (nreq = 0; wr; ++nreq, wr = wr->next) {
558 if (hns_roce_wq_overflow(&hr_qp->rq, nreq,
559 hr_qp->ibqp.recv_cq)) {
565 if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) {
566 dev_err(dev, "rq:num_sge=%d > qp->sq.max_gs=%d\n",
567 wr->num_sge, hr_qp->rq.max_gs);
573 wqe = get_recv_wqe(hr_qp, ind);
574 dseg = (struct hns_roce_v2_wqe_data_seg *)wqe;
575 for (i = 0; i < wr->num_sge; i++) {
576 if (!wr->sg_list[i].length)
578 set_data_seg_v2(dseg, wr->sg_list + i);
582 if (i < hr_qp->rq.max_gs) {
583 dseg->lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY);
587 /* rq support inline data */
588 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) {
589 sge_list = hr_qp->rq_inl_buf.wqe_list[ind].sg_list;
590 hr_qp->rq_inl_buf.wqe_list[ind].sge_cnt =
592 for (i = 0; i < wr->num_sge; i++) {
594 (void *)(u64)wr->sg_list[i].addr;
595 sge_list[i].len = wr->sg_list[i].length;
599 hr_qp->rq.wrid[ind] = wr->wr_id;
601 ind = (ind + 1) & (hr_qp->rq.wqe_cnt - 1);
606 hr_qp->rq.head += nreq;
610 *hr_qp->rdb.db_record = hr_qp->rq.head & 0xffff;
612 spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
617 static int hns_roce_cmq_space(struct hns_roce_v2_cmq_ring *ring)
619 int ntu = ring->next_to_use;
620 int ntc = ring->next_to_clean;
621 int used = (ntu - ntc + ring->desc_num) % ring->desc_num;
623 return ring->desc_num - used - 1;
626 static int hns_roce_alloc_cmq_desc(struct hns_roce_dev *hr_dev,
627 struct hns_roce_v2_cmq_ring *ring)
629 int size = ring->desc_num * sizeof(struct hns_roce_cmq_desc);
631 ring->desc = kzalloc(size, GFP_KERNEL);
635 ring->desc_dma_addr = dma_map_single(hr_dev->dev, ring->desc, size,
637 if (dma_mapping_error(hr_dev->dev, ring->desc_dma_addr)) {
638 ring->desc_dma_addr = 0;
647 static void hns_roce_free_cmq_desc(struct hns_roce_dev *hr_dev,
648 struct hns_roce_v2_cmq_ring *ring)
650 dma_unmap_single(hr_dev->dev, ring->desc_dma_addr,
651 ring->desc_num * sizeof(struct hns_roce_cmq_desc),
654 ring->desc_dma_addr = 0;
658 static int hns_roce_init_cmq_ring(struct hns_roce_dev *hr_dev, bool ring_type)
660 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
661 struct hns_roce_v2_cmq_ring *ring = (ring_type == TYPE_CSQ) ?
662 &priv->cmq.csq : &priv->cmq.crq;
664 ring->flag = ring_type;
665 ring->next_to_clean = 0;
666 ring->next_to_use = 0;
668 return hns_roce_alloc_cmq_desc(hr_dev, ring);
671 static void hns_roce_cmq_init_regs(struct hns_roce_dev *hr_dev, bool ring_type)
673 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
674 struct hns_roce_v2_cmq_ring *ring = (ring_type == TYPE_CSQ) ?
675 &priv->cmq.csq : &priv->cmq.crq;
676 dma_addr_t dma = ring->desc_dma_addr;
678 if (ring_type == TYPE_CSQ) {
679 roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_L_REG, (u32)dma);
680 roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_H_REG,
682 roce_write(hr_dev, ROCEE_TX_CMQ_DEPTH_REG,
683 (ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S) |
684 HNS_ROCE_CMQ_ENABLE);
685 roce_write(hr_dev, ROCEE_TX_CMQ_HEAD_REG, 0);
686 roce_write(hr_dev, ROCEE_TX_CMQ_TAIL_REG, 0);
688 roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_L_REG, (u32)dma);
689 roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_H_REG,
691 roce_write(hr_dev, ROCEE_RX_CMQ_DEPTH_REG,
692 (ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S) |
693 HNS_ROCE_CMQ_ENABLE);
694 roce_write(hr_dev, ROCEE_RX_CMQ_HEAD_REG, 0);
695 roce_write(hr_dev, ROCEE_RX_CMQ_TAIL_REG, 0);
699 static int hns_roce_v2_cmq_init(struct hns_roce_dev *hr_dev)
701 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
704 /* Setup the queue entries for command queue */
705 priv->cmq.csq.desc_num = 1024;
706 priv->cmq.crq.desc_num = 1024;
708 /* Setup the lock for command queue */
709 spin_lock_init(&priv->cmq.csq.lock);
710 spin_lock_init(&priv->cmq.crq.lock);
712 /* Setup Tx write back timeout */
713 priv->cmq.tx_timeout = HNS_ROCE_CMQ_TX_TIMEOUT;
716 ret = hns_roce_init_cmq_ring(hr_dev, TYPE_CSQ);
718 dev_err(hr_dev->dev, "Init CSQ error, ret = %d.\n", ret);
723 ret = hns_roce_init_cmq_ring(hr_dev, TYPE_CRQ);
725 dev_err(hr_dev->dev, "Init CRQ error, ret = %d.\n", ret);
730 hns_roce_cmq_init_regs(hr_dev, TYPE_CSQ);
733 hns_roce_cmq_init_regs(hr_dev, TYPE_CRQ);
738 hns_roce_free_cmq_desc(hr_dev, &priv->cmq.csq);
743 static void hns_roce_v2_cmq_exit(struct hns_roce_dev *hr_dev)
745 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
747 hns_roce_free_cmq_desc(hr_dev, &priv->cmq.csq);
748 hns_roce_free_cmq_desc(hr_dev, &priv->cmq.crq);
751 static void hns_roce_cmq_setup_basic_desc(struct hns_roce_cmq_desc *desc,
752 enum hns_roce_opcode_type opcode,
755 memset((void *)desc, 0, sizeof(struct hns_roce_cmq_desc));
756 desc->opcode = cpu_to_le16(opcode);
758 cpu_to_le16(HNS_ROCE_CMD_FLAG_NO_INTR | HNS_ROCE_CMD_FLAG_IN);
760 desc->flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_WR);
762 desc->flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR);
765 static int hns_roce_cmq_csq_done(struct hns_roce_dev *hr_dev)
767 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
768 u32 head = roce_read(hr_dev, ROCEE_TX_CMQ_HEAD_REG);
770 return head == priv->cmq.csq.next_to_use;
773 static int hns_roce_cmq_csq_clean(struct hns_roce_dev *hr_dev)
775 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
776 struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
777 struct hns_roce_cmq_desc *desc;
778 u16 ntc = csq->next_to_clean;
782 desc = &csq->desc[ntc];
783 head = roce_read(hr_dev, ROCEE_TX_CMQ_HEAD_REG);
784 while (head != ntc) {
785 memset(desc, 0, sizeof(*desc));
787 if (ntc == csq->desc_num)
789 desc = &csq->desc[ntc];
792 csq->next_to_clean = ntc;
797 static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
798 struct hns_roce_cmq_desc *desc, int num)
800 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
801 struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
802 struct hns_roce_cmq_desc *desc_to_use;
803 bool complete = false;
810 if (hr_dev->is_reset)
813 spin_lock_bh(&csq->lock);
815 if (num > hns_roce_cmq_space(csq)) {
816 spin_unlock_bh(&csq->lock);
821 * Record the location of desc in the cmq for this time
822 * which will be use for hardware to write back
824 ntc = csq->next_to_use;
826 while (handle < num) {
827 desc_to_use = &csq->desc[csq->next_to_use];
828 *desc_to_use = desc[handle];
829 dev_dbg(hr_dev->dev, "set cmq desc:\n");
831 if (csq->next_to_use == csq->desc_num)
832 csq->next_to_use = 0;
836 /* Write to hardware */
837 roce_write(hr_dev, ROCEE_TX_CMQ_TAIL_REG, csq->next_to_use);
840 * If the command is sync, wait for the firmware to write back,
841 * if multi descriptors to be sent, use the first one to check
843 if ((desc->flag) & HNS_ROCE_CMD_FLAG_NO_INTR) {
845 if (hns_roce_cmq_csq_done(hr_dev))
849 } while (timeout < priv->cmq.tx_timeout);
852 if (hns_roce_cmq_csq_done(hr_dev)) {
855 while (handle < num) {
856 /* get the result of hardware write back */
857 desc_to_use = &csq->desc[ntc];
858 desc[handle] = *desc_to_use;
859 dev_dbg(hr_dev->dev, "Get cmq desc:\n");
860 desc_ret = desc[handle].retval;
861 if (desc_ret == CMD_EXEC_SUCCESS)
865 priv->cmq.last_status = desc_ret;
868 if (ntc == csq->desc_num)
876 /* clean the command send queue */
877 handle = hns_roce_cmq_csq_clean(hr_dev);
879 dev_warn(hr_dev->dev, "Cleaned %d, need to clean %d\n",
882 spin_unlock_bh(&csq->lock);
887 static int hns_roce_cmq_query_hw_info(struct hns_roce_dev *hr_dev)
889 struct hns_roce_query_version *resp;
890 struct hns_roce_cmq_desc desc;
893 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_HW_VER, true);
894 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
898 resp = (struct hns_roce_query_version *)desc.data;
899 hr_dev->hw_rev = le32_to_cpu(resp->rocee_hw_version);
900 hr_dev->vendor_id = le32_to_cpu(resp->rocee_vendor_id);
905 static int hns_roce_config_global_param(struct hns_roce_dev *hr_dev)
907 struct hns_roce_cfg_global_param *req;
908 struct hns_roce_cmq_desc desc;
910 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GLOBAL_PARAM,
913 req = (struct hns_roce_cfg_global_param *)desc.data;
914 memset(req, 0, sizeof(*req));
915 roce_set_field(req->time_cfg_udp_port,
916 CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_M,
917 CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_S, 0x3e8);
918 roce_set_field(req->time_cfg_udp_port,
919 CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_M,
920 CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_S, 0x12b7);
922 return hns_roce_cmq_send(hr_dev, &desc, 1);
925 static int hns_roce_query_pf_resource(struct hns_roce_dev *hr_dev)
927 struct hns_roce_cmq_desc desc[2];
928 struct hns_roce_pf_res_a *req_a;
929 struct hns_roce_pf_res_b *req_b;
933 for (i = 0; i < 2; i++) {
934 hns_roce_cmq_setup_basic_desc(&desc[i],
935 HNS_ROCE_OPC_QUERY_PF_RES, true);
938 desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
940 desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
943 ret = hns_roce_cmq_send(hr_dev, desc, 2);
947 req_a = (struct hns_roce_pf_res_a *)desc[0].data;
948 req_b = (struct hns_roce_pf_res_b *)desc[1].data;
950 hr_dev->caps.qpc_bt_num = roce_get_field(req_a->qpc_bt_idx_num,
951 PF_RES_DATA_1_PF_QPC_BT_NUM_M,
952 PF_RES_DATA_1_PF_QPC_BT_NUM_S);
953 hr_dev->caps.srqc_bt_num = roce_get_field(req_a->srqc_bt_idx_num,
954 PF_RES_DATA_2_PF_SRQC_BT_NUM_M,
955 PF_RES_DATA_2_PF_SRQC_BT_NUM_S);
956 hr_dev->caps.cqc_bt_num = roce_get_field(req_a->cqc_bt_idx_num,
957 PF_RES_DATA_3_PF_CQC_BT_NUM_M,
958 PF_RES_DATA_3_PF_CQC_BT_NUM_S);
959 hr_dev->caps.mpt_bt_num = roce_get_field(req_a->mpt_bt_idx_num,
960 PF_RES_DATA_4_PF_MPT_BT_NUM_M,
961 PF_RES_DATA_4_PF_MPT_BT_NUM_S);
963 hr_dev->caps.sl_num = roce_get_field(req_b->qid_idx_sl_num,
964 PF_RES_DATA_3_PF_SL_NUM_M,
965 PF_RES_DATA_3_PF_SL_NUM_S);
970 static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev)
972 struct hns_roce_cmq_desc desc[2];
973 struct hns_roce_vf_res_a *req_a;
974 struct hns_roce_vf_res_b *req_b;
977 req_a = (struct hns_roce_vf_res_a *)desc[0].data;
978 req_b = (struct hns_roce_vf_res_b *)desc[1].data;
979 memset(req_a, 0, sizeof(*req_a));
980 memset(req_b, 0, sizeof(*req_b));
981 for (i = 0; i < 2; i++) {
982 hns_roce_cmq_setup_basic_desc(&desc[i],
983 HNS_ROCE_OPC_ALLOC_VF_RES, false);
986 desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
988 desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
991 roce_set_field(req_a->vf_qpc_bt_idx_num,
992 VF_RES_A_DATA_1_VF_QPC_BT_IDX_M,
993 VF_RES_A_DATA_1_VF_QPC_BT_IDX_S, 0);
994 roce_set_field(req_a->vf_qpc_bt_idx_num,
995 VF_RES_A_DATA_1_VF_QPC_BT_NUM_M,
996 VF_RES_A_DATA_1_VF_QPC_BT_NUM_S,
997 HNS_ROCE_VF_QPC_BT_NUM);
999 roce_set_field(req_a->vf_srqc_bt_idx_num,
1000 VF_RES_A_DATA_2_VF_SRQC_BT_IDX_M,
1001 VF_RES_A_DATA_2_VF_SRQC_BT_IDX_S, 0);
1002 roce_set_field(req_a->vf_srqc_bt_idx_num,
1003 VF_RES_A_DATA_2_VF_SRQC_BT_NUM_M,
1004 VF_RES_A_DATA_2_VF_SRQC_BT_NUM_S,
1005 HNS_ROCE_VF_SRQC_BT_NUM);
1007 roce_set_field(req_a->vf_cqc_bt_idx_num,
1008 VF_RES_A_DATA_3_VF_CQC_BT_IDX_M,
1009 VF_RES_A_DATA_3_VF_CQC_BT_IDX_S, 0);
1010 roce_set_field(req_a->vf_cqc_bt_idx_num,
1011 VF_RES_A_DATA_3_VF_CQC_BT_NUM_M,
1012 VF_RES_A_DATA_3_VF_CQC_BT_NUM_S,
1013 HNS_ROCE_VF_CQC_BT_NUM);
1015 roce_set_field(req_a->vf_mpt_bt_idx_num,
1016 VF_RES_A_DATA_4_VF_MPT_BT_IDX_M,
1017 VF_RES_A_DATA_4_VF_MPT_BT_IDX_S, 0);
1018 roce_set_field(req_a->vf_mpt_bt_idx_num,
1019 VF_RES_A_DATA_4_VF_MPT_BT_NUM_M,
1020 VF_RES_A_DATA_4_VF_MPT_BT_NUM_S,
1021 HNS_ROCE_VF_MPT_BT_NUM);
1023 roce_set_field(req_a->vf_eqc_bt_idx_num,
1024 VF_RES_A_DATA_5_VF_EQC_IDX_M,
1025 VF_RES_A_DATA_5_VF_EQC_IDX_S, 0);
1026 roce_set_field(req_a->vf_eqc_bt_idx_num,
1027 VF_RES_A_DATA_5_VF_EQC_NUM_M,
1028 VF_RES_A_DATA_5_VF_EQC_NUM_S,
1029 HNS_ROCE_VF_EQC_NUM);
1031 roce_set_field(req_b->vf_smac_idx_num,
1032 VF_RES_B_DATA_1_VF_SMAC_IDX_M,
1033 VF_RES_B_DATA_1_VF_SMAC_IDX_S, 0);
1034 roce_set_field(req_b->vf_smac_idx_num,
1035 VF_RES_B_DATA_1_VF_SMAC_NUM_M,
1036 VF_RES_B_DATA_1_VF_SMAC_NUM_S,
1037 HNS_ROCE_VF_SMAC_NUM);
1039 roce_set_field(req_b->vf_sgid_idx_num,
1040 VF_RES_B_DATA_2_VF_SGID_IDX_M,
1041 VF_RES_B_DATA_2_VF_SGID_IDX_S, 0);
1042 roce_set_field(req_b->vf_sgid_idx_num,
1043 VF_RES_B_DATA_2_VF_SGID_NUM_M,
1044 VF_RES_B_DATA_2_VF_SGID_NUM_S,
1045 HNS_ROCE_VF_SGID_NUM);
1047 roce_set_field(req_b->vf_qid_idx_sl_num,
1048 VF_RES_B_DATA_3_VF_QID_IDX_M,
1049 VF_RES_B_DATA_3_VF_QID_IDX_S, 0);
1050 roce_set_field(req_b->vf_qid_idx_sl_num,
1051 VF_RES_B_DATA_3_VF_SL_NUM_M,
1052 VF_RES_B_DATA_3_VF_SL_NUM_S,
1053 HNS_ROCE_VF_SL_NUM);
1057 return hns_roce_cmq_send(hr_dev, desc, 2);
1060 static int hns_roce_v2_set_bt(struct hns_roce_dev *hr_dev)
1062 u8 srqc_hop_num = hr_dev->caps.srqc_hop_num;
1063 u8 qpc_hop_num = hr_dev->caps.qpc_hop_num;
1064 u8 cqc_hop_num = hr_dev->caps.cqc_hop_num;
1065 u8 mpt_hop_num = hr_dev->caps.mpt_hop_num;
1066 struct hns_roce_cfg_bt_attr *req;
1067 struct hns_roce_cmq_desc desc;
1069 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_BT_ATTR, false);
1070 req = (struct hns_roce_cfg_bt_attr *)desc.data;
1071 memset(req, 0, sizeof(*req));
1073 roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_M,
1074 CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_S,
1075 hr_dev->caps.qpc_ba_pg_sz + PG_SHIFT_OFFSET);
1076 roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_BUF_PGSZ_M,
1077 CFG_BT_ATTR_DATA_0_VF_QPC_BUF_PGSZ_S,
1078 hr_dev->caps.qpc_buf_pg_sz + PG_SHIFT_OFFSET);
1079 roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_HOPNUM_M,
1080 CFG_BT_ATTR_DATA_0_VF_QPC_HOPNUM_S,
1081 qpc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : qpc_hop_num);
1083 roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_BA_PGSZ_M,
1084 CFG_BT_ATTR_DATA_1_VF_SRQC_BA_PGSZ_S,
1085 hr_dev->caps.srqc_ba_pg_sz + PG_SHIFT_OFFSET);
1086 roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_BUF_PGSZ_M,
1087 CFG_BT_ATTR_DATA_1_VF_SRQC_BUF_PGSZ_S,
1088 hr_dev->caps.srqc_buf_pg_sz + PG_SHIFT_OFFSET);
1089 roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_HOPNUM_M,
1090 CFG_BT_ATTR_DATA_1_VF_SRQC_HOPNUM_S,
1091 srqc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : srqc_hop_num);
1093 roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_BA_PGSZ_M,
1094 CFG_BT_ATTR_DATA_2_VF_CQC_BA_PGSZ_S,
1095 hr_dev->caps.cqc_ba_pg_sz + PG_SHIFT_OFFSET);
1096 roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_BUF_PGSZ_M,
1097 CFG_BT_ATTR_DATA_2_VF_CQC_BUF_PGSZ_S,
1098 hr_dev->caps.cqc_buf_pg_sz + PG_SHIFT_OFFSET);
1099 roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_HOPNUM_M,
1100 CFG_BT_ATTR_DATA_2_VF_CQC_HOPNUM_S,
1101 cqc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : cqc_hop_num);
1103 roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_BA_PGSZ_M,
1104 CFG_BT_ATTR_DATA_3_VF_MPT_BA_PGSZ_S,
1105 hr_dev->caps.mpt_ba_pg_sz + PG_SHIFT_OFFSET);
1106 roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_BUF_PGSZ_M,
1107 CFG_BT_ATTR_DATA_3_VF_MPT_BUF_PGSZ_S,
1108 hr_dev->caps.mpt_buf_pg_sz + PG_SHIFT_OFFSET);
1109 roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_M,
1110 CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_S,
1111 mpt_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : mpt_hop_num);
1113 return hns_roce_cmq_send(hr_dev, &desc, 1);
1116 static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
1118 struct hns_roce_caps *caps = &hr_dev->caps;
1121 ret = hns_roce_cmq_query_hw_info(hr_dev);
1123 dev_err(hr_dev->dev, "Query firmware version fail, ret = %d.\n",
1128 ret = hns_roce_config_global_param(hr_dev);
1130 dev_err(hr_dev->dev, "Configure global param fail, ret = %d.\n",
1135 /* Get pf resource owned by every pf */
1136 ret = hns_roce_query_pf_resource(hr_dev);
1138 dev_err(hr_dev->dev, "Query pf resource fail, ret = %d.\n",
1143 ret = hns_roce_alloc_vf_resource(hr_dev);
1145 dev_err(hr_dev->dev, "Allocate vf resource fail, ret = %d.\n",
1150 hr_dev->vendor_part_id = 0;
1151 hr_dev->sys_image_guid = 0;
1153 caps->num_qps = HNS_ROCE_V2_MAX_QP_NUM;
1154 caps->max_wqes = HNS_ROCE_V2_MAX_WQE_NUM;
1155 caps->num_cqs = HNS_ROCE_V2_MAX_CQ_NUM;
1156 caps->max_cqes = HNS_ROCE_V2_MAX_CQE_NUM;
1157 caps->max_sq_sg = HNS_ROCE_V2_MAX_SQ_SGE_NUM;
1158 caps->max_rq_sg = HNS_ROCE_V2_MAX_RQ_SGE_NUM;
1159 caps->max_sq_inline = HNS_ROCE_V2_MAX_SQ_INLINE;
1160 caps->num_uars = HNS_ROCE_V2_UAR_NUM;
1161 caps->phy_num_uars = HNS_ROCE_V2_PHY_UAR_NUM;
1162 caps->num_aeq_vectors = HNS_ROCE_V2_AEQE_VEC_NUM;
1163 caps->num_comp_vectors = HNS_ROCE_V2_COMP_VEC_NUM;
1164 caps->num_other_vectors = HNS_ROCE_V2_ABNORMAL_VEC_NUM;
1165 caps->num_mtpts = HNS_ROCE_V2_MAX_MTPT_NUM;
1166 caps->num_mtt_segs = HNS_ROCE_V2_MAX_MTT_SEGS;
1167 caps->num_cqe_segs = HNS_ROCE_V2_MAX_CQE_SEGS;
1168 caps->num_pds = HNS_ROCE_V2_MAX_PD_NUM;
1169 caps->max_qp_init_rdma = HNS_ROCE_V2_MAX_QP_INIT_RDMA;
1170 caps->max_qp_dest_rdma = HNS_ROCE_V2_MAX_QP_DEST_RDMA;
1171 caps->max_sq_desc_sz = HNS_ROCE_V2_MAX_SQ_DESC_SZ;
1172 caps->max_rq_desc_sz = HNS_ROCE_V2_MAX_RQ_DESC_SZ;
1173 caps->max_srq_desc_sz = HNS_ROCE_V2_MAX_SRQ_DESC_SZ;
1174 caps->qpc_entry_sz = HNS_ROCE_V2_QPC_ENTRY_SZ;
1175 caps->irrl_entry_sz = HNS_ROCE_V2_IRRL_ENTRY_SZ;
1176 caps->trrl_entry_sz = HNS_ROCE_V2_TRRL_ENTRY_SZ;
1177 caps->cqc_entry_sz = HNS_ROCE_V2_CQC_ENTRY_SZ;
1178 caps->mtpt_entry_sz = HNS_ROCE_V2_MTPT_ENTRY_SZ;
1179 caps->mtt_entry_sz = HNS_ROCE_V2_MTT_ENTRY_SZ;
1180 caps->cq_entry_sz = HNS_ROCE_V2_CQE_ENTRY_SIZE;
1181 caps->page_size_cap = HNS_ROCE_V2_PAGE_SIZE_SUPPORTED;
1182 caps->reserved_lkey = 0;
1183 caps->reserved_pds = 0;
1184 caps->reserved_mrws = 1;
1185 caps->reserved_uars = 0;
1186 caps->reserved_cqs = 0;
1188 caps->qpc_ba_pg_sz = 0;
1189 caps->qpc_buf_pg_sz = 0;
1190 caps->qpc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
1191 caps->srqc_ba_pg_sz = 0;
1192 caps->srqc_buf_pg_sz = 0;
1193 caps->srqc_hop_num = HNS_ROCE_HOP_NUM_0;
1194 caps->cqc_ba_pg_sz = 0;
1195 caps->cqc_buf_pg_sz = 0;
1196 caps->cqc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
1197 caps->mpt_ba_pg_sz = 0;
1198 caps->mpt_buf_pg_sz = 0;
1199 caps->mpt_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
1200 caps->pbl_ba_pg_sz = 0;
1201 caps->pbl_buf_pg_sz = 0;
1202 caps->pbl_hop_num = HNS_ROCE_PBL_HOP_NUM;
1203 caps->mtt_ba_pg_sz = 0;
1204 caps->mtt_buf_pg_sz = 0;
1205 caps->mtt_hop_num = HNS_ROCE_MTT_HOP_NUM;
1206 caps->cqe_ba_pg_sz = 0;
1207 caps->cqe_buf_pg_sz = 0;
1208 caps->cqe_hop_num = HNS_ROCE_CQE_HOP_NUM;
1209 caps->eqe_ba_pg_sz = 0;
1210 caps->eqe_buf_pg_sz = 0;
1211 caps->eqe_hop_num = HNS_ROCE_EQE_HOP_NUM;
1212 caps->tsq_buf_pg_sz = 0;
1213 caps->chunk_sz = HNS_ROCE_V2_TABLE_CHUNK_SIZE;
1215 caps->flags = HNS_ROCE_CAP_FLAG_REREG_MR |
1216 HNS_ROCE_CAP_FLAG_ROCE_V1_V2 |
1217 HNS_ROCE_CAP_FLAG_RQ_INLINE |
1218 HNS_ROCE_CAP_FLAG_RECORD_DB;
1219 caps->pkey_table_len[0] = 1;
1220 caps->gid_table_len[0] = HNS_ROCE_V2_GID_INDEX_NUM;
1221 caps->ceqe_depth = HNS_ROCE_V2_COMP_EQE_NUM;
1222 caps->aeqe_depth = HNS_ROCE_V2_ASYNC_EQE_NUM;
1223 caps->local_ca_ack_delay = 0;
1224 caps->max_mtu = IB_MTU_4096;
1226 ret = hns_roce_v2_set_bt(hr_dev);
1228 dev_err(hr_dev->dev, "Configure bt attribute fail, ret = %d.\n",
1234 static int hns_roce_config_link_table(struct hns_roce_dev *hr_dev,
1235 enum hns_roce_link_table_type type)
1237 struct hns_roce_cmq_desc desc[2];
1238 struct hns_roce_cfg_llm_a *req_a =
1239 (struct hns_roce_cfg_llm_a *)desc[0].data;
1240 struct hns_roce_cfg_llm_b *req_b =
1241 (struct hns_roce_cfg_llm_b *)desc[1].data;
1242 struct hns_roce_v2_priv *priv = hr_dev->priv;
1243 struct hns_roce_link_table *link_tbl;
1244 struct hns_roce_link_table_entry *entry;
1245 enum hns_roce_opcode_type opcode;
1250 case TSQ_LINK_TABLE:
1251 link_tbl = &priv->tsq;
1252 opcode = HNS_ROCE_OPC_CFG_EXT_LLM;
1254 case TPQ_LINK_TABLE:
1255 link_tbl = &priv->tpq;
1256 opcode = HNS_ROCE_OPC_CFG_TMOUT_LLM;
1262 page_num = link_tbl->npages;
1263 entry = link_tbl->table.buf;
1264 memset(req_a, 0, sizeof(*req_a));
1265 memset(req_b, 0, sizeof(*req_b));
1267 for (i = 0; i < 2; i++) {
1268 hns_roce_cmq_setup_basic_desc(&desc[i], opcode, false);
1271 desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1273 desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1276 req_a->base_addr_l = link_tbl->table.map & 0xffffffff;
1277 req_a->base_addr_h = (link_tbl->table.map >> 32) &
1279 roce_set_field(req_a->depth_pgsz_init_en,
1280 CFG_LLM_QUE_DEPTH_M,
1281 CFG_LLM_QUE_DEPTH_S,
1283 roce_set_field(req_a->depth_pgsz_init_en,
1287 req_a->head_ba_l = entry[0].blk_ba0;
1288 req_a->head_ba_h_nxtptr = entry[0].blk_ba1_nxt_ptr;
1289 roce_set_field(req_a->head_ptr,
1291 CFG_LLM_HEAD_PTR_S, 0);
1293 req_b->tail_ba_l = entry[page_num - 1].blk_ba0;
1294 roce_set_field(req_b->tail_ba_h,
1295 CFG_LLM_TAIL_BA_H_M,
1296 CFG_LLM_TAIL_BA_H_S,
1297 entry[page_num - 1].blk_ba1_nxt_ptr &
1298 HNS_ROCE_LINK_TABLE_BA1_M);
1299 roce_set_field(req_b->tail_ptr,
1302 (entry[page_num - 2].blk_ba1_nxt_ptr &
1303 HNS_ROCE_LINK_TABLE_NXT_PTR_M) >>
1304 HNS_ROCE_LINK_TABLE_NXT_PTR_S);
1307 roce_set_field(req_a->depth_pgsz_init_en,
1308 CFG_LLM_INIT_EN_M, CFG_LLM_INIT_EN_S, 1);
1310 return hns_roce_cmq_send(hr_dev, desc, 2);
1313 static int hns_roce_init_link_table(struct hns_roce_dev *hr_dev,
1314 enum hns_roce_link_table_type type)
1316 struct hns_roce_v2_priv *priv = hr_dev->priv;
1317 struct hns_roce_link_table *link_tbl;
1318 struct hns_roce_link_table_entry *entry;
1319 struct device *dev = hr_dev->dev;
1330 case TSQ_LINK_TABLE:
1331 link_tbl = &priv->tsq;
1332 buf_chk_sz = 1 << (hr_dev->caps.tsq_buf_pg_sz + PAGE_SHIFT);
1333 pg_num_a = hr_dev->caps.num_qps * 8 / buf_chk_sz;
1334 pg_num_b = hr_dev->caps.sl_num * 4 + 2;
1336 case TPQ_LINK_TABLE:
1337 link_tbl = &priv->tpq;
1338 buf_chk_sz = 1 << (hr_dev->caps.tpq_buf_pg_sz + PAGE_SHIFT);
1339 pg_num_a = hr_dev->caps.num_cqs * 4 / buf_chk_sz;
1340 pg_num_b = 2 * 4 * func_num + 2;
1346 pg_num = max(pg_num_a, pg_num_b);
1347 size = pg_num * sizeof(struct hns_roce_link_table_entry);
1349 link_tbl->table.buf = dma_alloc_coherent(dev, size,
1350 &link_tbl->table.map,
1352 if (!link_tbl->table.buf)
1355 link_tbl->pg_list = kcalloc(pg_num, sizeof(*link_tbl->pg_list),
1357 if (!link_tbl->pg_list)
1358 goto err_kcalloc_failed;
1360 entry = link_tbl->table.buf;
1361 for (i = 0; i < pg_num; ++i) {
1362 link_tbl->pg_list[i].buf = dma_alloc_coherent(dev, buf_chk_sz,
1364 if (!link_tbl->pg_list[i].buf)
1365 goto err_alloc_buf_failed;
1367 link_tbl->pg_list[i].map = t;
1368 memset(link_tbl->pg_list[i].buf, 0, buf_chk_sz);
1370 entry[i].blk_ba0 = (t >> 12) & 0xffffffff;
1371 roce_set_field(entry[i].blk_ba1_nxt_ptr,
1372 HNS_ROCE_LINK_TABLE_BA1_M,
1373 HNS_ROCE_LINK_TABLE_BA1_S,
1376 if (i < (pg_num - 1))
1377 roce_set_field(entry[i].blk_ba1_nxt_ptr,
1378 HNS_ROCE_LINK_TABLE_NXT_PTR_M,
1379 HNS_ROCE_LINK_TABLE_NXT_PTR_S,
1382 link_tbl->npages = pg_num;
1383 link_tbl->pg_sz = buf_chk_sz;
1385 return hns_roce_config_link_table(hr_dev, type);
1387 err_alloc_buf_failed:
1388 for (i -= 1; i >= 0; i--)
1389 dma_free_coherent(dev, buf_chk_sz,
1390 link_tbl->pg_list[i].buf,
1391 link_tbl->pg_list[i].map);
1392 kfree(link_tbl->pg_list);
1395 dma_free_coherent(dev, size, link_tbl->table.buf,
1396 link_tbl->table.map);
1402 static void hns_roce_free_link_table(struct hns_roce_dev *hr_dev,
1403 struct hns_roce_link_table *link_tbl)
1405 struct device *dev = hr_dev->dev;
1409 size = link_tbl->npages * sizeof(struct hns_roce_link_table_entry);
1411 for (i = 0; i < link_tbl->npages; ++i)
1412 if (link_tbl->pg_list[i].buf)
1413 dma_free_coherent(dev, link_tbl->pg_sz,
1414 link_tbl->pg_list[i].buf,
1415 link_tbl->pg_list[i].map);
1416 kfree(link_tbl->pg_list);
1418 dma_free_coherent(dev, size, link_tbl->table.buf,
1419 link_tbl->table.map);
1422 static int hns_roce_v2_init(struct hns_roce_dev *hr_dev)
1424 struct hns_roce_v2_priv *priv = hr_dev->priv;
1427 /* TSQ includes SQ doorbell and ack doorbell */
1428 ret = hns_roce_init_link_table(hr_dev, TSQ_LINK_TABLE);
1430 dev_err(hr_dev->dev, "TSQ init failed, ret = %d.\n", ret);
1434 ret = hns_roce_init_link_table(hr_dev, TPQ_LINK_TABLE);
1436 dev_err(hr_dev->dev, "TPQ init failed, ret = %d.\n", ret);
1437 goto err_tpq_init_failed;
1442 err_tpq_init_failed:
1443 hns_roce_free_link_table(hr_dev, &priv->tsq);
1448 static void hns_roce_v2_exit(struct hns_roce_dev *hr_dev)
1450 struct hns_roce_v2_priv *priv = hr_dev->priv;
1452 hns_roce_free_link_table(hr_dev, &priv->tpq);
1453 hns_roce_free_link_table(hr_dev, &priv->tsq);
1456 static int hns_roce_v2_cmd_pending(struct hns_roce_dev *hr_dev)
1458 u32 status = readl(hr_dev->reg_base + ROCEE_VF_MB_STATUS_REG);
1460 return status >> HNS_ROCE_HW_RUN_BIT_SHIFT;
1463 static int hns_roce_v2_cmd_complete(struct hns_roce_dev *hr_dev)
1465 u32 status = readl(hr_dev->reg_base + ROCEE_VF_MB_STATUS_REG);
1467 return status & HNS_ROCE_HW_MB_STATUS_MASK;
1470 static int hns_roce_v2_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param,
1471 u64 out_param, u32 in_modifier, u8 op_modifier,
1472 u16 op, u16 token, int event)
1474 struct device *dev = hr_dev->dev;
1475 u32 __iomem *hcr = (u32 __iomem *)(hr_dev->reg_base +
1476 ROCEE_VF_MB_CFG0_REG);
1481 end = msecs_to_jiffies(HNS_ROCE_V2_GO_BIT_TIMEOUT_MSECS) + jiffies;
1482 while (hns_roce_v2_cmd_pending(hr_dev)) {
1483 if (time_after(jiffies, end)) {
1484 dev_dbg(dev, "jiffies=%d end=%d\n", (int)jiffies,
1491 roce_set_field(val0, HNS_ROCE_VF_MB4_TAG_MASK,
1492 HNS_ROCE_VF_MB4_TAG_SHIFT, in_modifier);
1493 roce_set_field(val0, HNS_ROCE_VF_MB4_CMD_MASK,
1494 HNS_ROCE_VF_MB4_CMD_SHIFT, op);
1495 roce_set_field(val1, HNS_ROCE_VF_MB5_EVENT_MASK,
1496 HNS_ROCE_VF_MB5_EVENT_SHIFT, event);
1497 roce_set_field(val1, HNS_ROCE_VF_MB5_TOKEN_MASK,
1498 HNS_ROCE_VF_MB5_TOKEN_SHIFT, token);
1500 writeq(in_param, hcr + 0);
1501 writeq(out_param, hcr + 2);
1503 /* Memory barrier */
1506 writel(val0, hcr + 4);
1507 writel(val1, hcr + 5);
1514 static int hns_roce_v2_chk_mbox(struct hns_roce_dev *hr_dev,
1515 unsigned long timeout)
1517 struct device *dev = hr_dev->dev;
1518 unsigned long end = 0;
1521 end = msecs_to_jiffies(timeout) + jiffies;
1522 while (hns_roce_v2_cmd_pending(hr_dev) && time_before(jiffies, end))
1525 if (hns_roce_v2_cmd_pending(hr_dev)) {
1526 dev_err(dev, "[cmd_poll]hw run cmd TIMEDOUT!\n");
1530 status = hns_roce_v2_cmd_complete(hr_dev);
1531 if (status != 0x1) {
1532 dev_err(dev, "mailbox status 0x%x!\n", status);
1539 static int hns_roce_config_sgid_table(struct hns_roce_dev *hr_dev,
1540 int gid_index, const union ib_gid *gid,
1541 enum hns_roce_sgid_type sgid_type)
1543 struct hns_roce_cmq_desc desc;
1544 struct hns_roce_cfg_sgid_tb *sgid_tb =
1545 (struct hns_roce_cfg_sgid_tb *)desc.data;
1548 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_SGID_TB, false);
1550 roce_set_field(sgid_tb->table_idx_rsv,
1551 CFG_SGID_TB_TABLE_IDX_M,
1552 CFG_SGID_TB_TABLE_IDX_S, gid_index);
1553 roce_set_field(sgid_tb->vf_sgid_type_rsv,
1554 CFG_SGID_TB_VF_SGID_TYPE_M,
1555 CFG_SGID_TB_VF_SGID_TYPE_S, sgid_type);
1557 p = (u32 *)&gid->raw[0];
1558 sgid_tb->vf_sgid_l = cpu_to_le32(*p);
1560 p = (u32 *)&gid->raw[4];
1561 sgid_tb->vf_sgid_ml = cpu_to_le32(*p);
1563 p = (u32 *)&gid->raw[8];
1564 sgid_tb->vf_sgid_mh = cpu_to_le32(*p);
1566 p = (u32 *)&gid->raw[0xc];
1567 sgid_tb->vf_sgid_h = cpu_to_le32(*p);
1569 return hns_roce_cmq_send(hr_dev, &desc, 1);
1572 static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, u8 port,
1573 int gid_index, const union ib_gid *gid,
1574 const struct ib_gid_attr *attr)
1576 enum hns_roce_sgid_type sgid_type = GID_TYPE_FLAG_ROCE_V1;
1582 if (attr->gid_type == IB_GID_TYPE_ROCE)
1583 sgid_type = GID_TYPE_FLAG_ROCE_V1;
1585 if (attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
1586 if (ipv6_addr_v4mapped((void *)gid))
1587 sgid_type = GID_TYPE_FLAG_ROCE_V2_IPV4;
1589 sgid_type = GID_TYPE_FLAG_ROCE_V2_IPV6;
1592 ret = hns_roce_config_sgid_table(hr_dev, gid_index, gid, sgid_type);
1594 dev_err(hr_dev->dev, "Configure sgid table failed(%d)!\n", ret);
1599 static int hns_roce_v2_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
1602 struct hns_roce_cmq_desc desc;
1603 struct hns_roce_cfg_smac_tb *smac_tb =
1604 (struct hns_roce_cfg_smac_tb *)desc.data;
1608 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_SMAC_TB, false);
1610 reg_smac_l = *(u32 *)(&addr[0]);
1611 reg_smac_h = *(u16 *)(&addr[4]);
1613 memset(smac_tb, 0, sizeof(*smac_tb));
1614 roce_set_field(smac_tb->tb_idx_rsv,
1616 CFG_SMAC_TB_IDX_S, phy_port);
1617 roce_set_field(smac_tb->vf_smac_h_rsv,
1618 CFG_SMAC_TB_VF_SMAC_H_M,
1619 CFG_SMAC_TB_VF_SMAC_H_S, reg_smac_h);
1620 smac_tb->vf_smac_l = reg_smac_l;
1622 return hns_roce_cmq_send(hr_dev, &desc, 1);
1625 static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
1626 unsigned long mtpt_idx)
1628 struct hns_roce_v2_mpt_entry *mpt_entry;
1629 struct scatterlist *sg;
1637 memset(mpt_entry, 0, sizeof(*mpt_entry));
1639 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
1640 V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_VALID);
1641 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_HOP_NUM_M,
1642 V2_MPT_BYTE_4_PBL_HOP_NUM_S, mr->pbl_hop_num ==
1643 HNS_ROCE_HOP_NUM_0 ? 0 : mr->pbl_hop_num);
1644 roce_set_field(mpt_entry->byte_4_pd_hop_st,
1645 V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
1646 V2_MPT_BYTE_4_PBL_BA_PG_SZ_S,
1647 mr->pbl_ba_pg_sz + PG_SHIFT_OFFSET);
1648 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
1649 V2_MPT_BYTE_4_PD_S, mr->pd);
1650 mpt_entry->byte_4_pd_hop_st = cpu_to_le32(mpt_entry->byte_4_pd_hop_st);
1652 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 0);
1653 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
1654 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 0);
1655 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_BIND_EN_S,
1656 (mr->access & IB_ACCESS_MW_BIND ? 1 : 0));
1657 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_ATOMIC_EN_S, 0);
1658 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RR_EN_S,
1659 (mr->access & IB_ACCESS_REMOTE_READ ? 1 : 0));
1660 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RW_EN_S,
1661 (mr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0));
1662 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S,
1663 (mr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0));
1664 mpt_entry->byte_8_mw_cnt_en = cpu_to_le32(mpt_entry->byte_8_mw_cnt_en);
1666 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S,
1667 mr->type == MR_TYPE_MR ? 0 : 1);
1668 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_INNER_PA_VLD_S,
1670 mpt_entry->byte_12_mw_pa = cpu_to_le32(mpt_entry->byte_12_mw_pa);
1672 mpt_entry->len_l = cpu_to_le32(lower_32_bits(mr->size));
1673 mpt_entry->len_h = cpu_to_le32(upper_32_bits(mr->size));
1674 mpt_entry->lkey = cpu_to_le32(mr->key);
1675 mpt_entry->va_l = cpu_to_le32(lower_32_bits(mr->iova));
1676 mpt_entry->va_h = cpu_to_le32(upper_32_bits(mr->iova));
1678 if (mr->type == MR_TYPE_DMA)
1681 mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size);
1683 mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3));
1684 roce_set_field(mpt_entry->byte_48_mode_ba, V2_MPT_BYTE_48_PBL_BA_H_M,
1685 V2_MPT_BYTE_48_PBL_BA_H_S,
1686 upper_32_bits(mr->pbl_ba >> 3));
1687 mpt_entry->byte_48_mode_ba = cpu_to_le32(mpt_entry->byte_48_mode_ba);
1689 pages = (u64 *)__get_free_page(GFP_KERNEL);
1694 for_each_sg(mr->umem->sg_head.sgl, sg, mr->umem->nmap, entry) {
1695 len = sg_dma_len(sg) >> PAGE_SHIFT;
1696 for (j = 0; j < len; ++j) {
1697 page_addr = sg_dma_address(sg) +
1698 (j << mr->umem->page_shift);
1699 pages[i] = page_addr >> 6;
1701 /* Record the first 2 entry directly to MTPT table */
1702 if (i >= HNS_ROCE_V2_MAX_INNER_MTPT_NUM - 1)
1709 mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0]));
1710 roce_set_field(mpt_entry->byte_56_pa0_h, V2_MPT_BYTE_56_PA0_H_M,
1711 V2_MPT_BYTE_56_PA0_H_S,
1712 upper_32_bits(pages[0]));
1713 mpt_entry->byte_56_pa0_h = cpu_to_le32(mpt_entry->byte_56_pa0_h);
1715 mpt_entry->pa1_l = cpu_to_le32(lower_32_bits(pages[1]));
1716 roce_set_field(mpt_entry->byte_64_buf_pa1, V2_MPT_BYTE_64_PA1_H_M,
1717 V2_MPT_BYTE_64_PA1_H_S, upper_32_bits(pages[1]));
1719 free_page((unsigned long)pages);
1721 roce_set_field(mpt_entry->byte_64_buf_pa1,
1722 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
1723 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
1724 mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
1725 mpt_entry->byte_64_buf_pa1 = cpu_to_le32(mpt_entry->byte_64_buf_pa1);
1730 static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
1731 struct hns_roce_mr *mr, int flags,
1732 u32 pdn, int mr_access_flags, u64 iova,
1733 u64 size, void *mb_buf)
1735 struct hns_roce_v2_mpt_entry *mpt_entry = mb_buf;
1737 if (flags & IB_MR_REREG_PD) {
1738 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
1739 V2_MPT_BYTE_4_PD_S, pdn);
1743 if (flags & IB_MR_REREG_ACCESS) {
1744 roce_set_bit(mpt_entry->byte_8_mw_cnt_en,
1745 V2_MPT_BYTE_8_BIND_EN_S,
1746 (mr_access_flags & IB_ACCESS_MW_BIND ? 1 : 0));
1747 roce_set_bit(mpt_entry->byte_8_mw_cnt_en,
1748 V2_MPT_BYTE_8_ATOMIC_EN_S,
1749 (mr_access_flags & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0));
1750 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RR_EN_S,
1751 (mr_access_flags & IB_ACCESS_REMOTE_READ ? 1 : 0));
1752 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RW_EN_S,
1753 (mr_access_flags & IB_ACCESS_REMOTE_WRITE ? 1 : 0));
1754 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S,
1755 (mr_access_flags & IB_ACCESS_LOCAL_WRITE ? 1 : 0));
1758 if (flags & IB_MR_REREG_TRANS) {
1759 mpt_entry->va_l = cpu_to_le32(lower_32_bits(iova));
1760 mpt_entry->va_h = cpu_to_le32(upper_32_bits(iova));
1761 mpt_entry->len_l = cpu_to_le32(lower_32_bits(size));
1762 mpt_entry->len_h = cpu_to_le32(upper_32_bits(size));
1764 mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size);
1765 mpt_entry->pbl_ba_l =
1766 cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3));
1767 roce_set_field(mpt_entry->byte_48_mode_ba,
1768 V2_MPT_BYTE_48_PBL_BA_H_M,
1769 V2_MPT_BYTE_48_PBL_BA_H_S,
1770 upper_32_bits(mr->pbl_ba >> 3));
1771 mpt_entry->byte_48_mode_ba =
1772 cpu_to_le32(mpt_entry->byte_48_mode_ba);
1781 static void *get_cqe_v2(struct hns_roce_cq *hr_cq, int n)
1783 return hns_roce_buf_offset(&hr_cq->hr_buf.hr_buf,
1784 n * HNS_ROCE_V2_CQE_ENTRY_SIZE);
1787 static void *get_sw_cqe_v2(struct hns_roce_cq *hr_cq, int n)
1789 struct hns_roce_v2_cqe *cqe = get_cqe_v2(hr_cq, n & hr_cq->ib_cq.cqe);
1791 /* Get cqe when Owner bit is Conversely with the MSB of cons_idx */
1792 return (roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_OWNER_S) ^
1793 !!(n & (hr_cq->ib_cq.cqe + 1))) ? cqe : NULL;
1796 static struct hns_roce_v2_cqe *next_cqe_sw_v2(struct hns_roce_cq *hr_cq)
1798 return get_sw_cqe_v2(hr_cq, hr_cq->cons_index);
1801 static void hns_roce_v2_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index)
1803 *hr_cq->set_ci_db = cons_index & 0xffffff;
1806 static void __hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
1807 struct hns_roce_srq *srq)
1809 struct hns_roce_v2_cqe *cqe, *dest;
1814 for (prod_index = hr_cq->cons_index; get_sw_cqe_v2(hr_cq, prod_index);
1816 if (prod_index == hr_cq->cons_index + hr_cq->ib_cq.cqe)
1821 * Now backwards through the CQ, removing CQ entries
1822 * that match our QP by overwriting them with next entries.
1824 while ((int) --prod_index - (int) hr_cq->cons_index >= 0) {
1825 cqe = get_cqe_v2(hr_cq, prod_index & hr_cq->ib_cq.cqe);
1826 if ((roce_get_field(cqe->byte_16, V2_CQE_BYTE_16_LCL_QPN_M,
1827 V2_CQE_BYTE_16_LCL_QPN_S) &
1828 HNS_ROCE_V2_CQE_QPN_MASK) == qpn) {
1829 /* In v1 engine, not support SRQ */
1831 } else if (nfreed) {
1832 dest = get_cqe_v2(hr_cq, (prod_index + nfreed) &
1834 owner_bit = roce_get_bit(dest->byte_4,
1835 V2_CQE_BYTE_4_OWNER_S);
1836 memcpy(dest, cqe, sizeof(*cqe));
1837 roce_set_bit(dest->byte_4, V2_CQE_BYTE_4_OWNER_S,
1843 hr_cq->cons_index += nfreed;
1845 * Make sure update of buffer contents is done before
1846 * updating consumer index.
1849 hns_roce_v2_cq_set_ci(hr_cq, hr_cq->cons_index);
1853 static void hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
1854 struct hns_roce_srq *srq)
1856 spin_lock_irq(&hr_cq->lock);
1857 __hns_roce_v2_cq_clean(hr_cq, qpn, srq);
1858 spin_unlock_irq(&hr_cq->lock);
1861 static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
1862 struct hns_roce_cq *hr_cq, void *mb_buf,
1863 u64 *mtts, dma_addr_t dma_handle, int nent,
1866 struct hns_roce_v2_cq_context *cq_context;
1868 cq_context = mb_buf;
1869 memset(cq_context, 0, sizeof(*cq_context));
1871 roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_CQ_ST_M,
1872 V2_CQC_BYTE_4_CQ_ST_S, V2_CQ_STATE_VALID);
1873 roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_ARM_ST_M,
1874 V2_CQC_BYTE_4_ARM_ST_S, REG_NXT_CEQE);
1875 roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_SHIFT_M,
1876 V2_CQC_BYTE_4_SHIFT_S, ilog2((unsigned int)nent));
1877 roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_CEQN_M,
1878 V2_CQC_BYTE_4_CEQN_S, vector);
1879 cq_context->byte_4_pg_ceqn = cpu_to_le32(cq_context->byte_4_pg_ceqn);
1881 roce_set_field(cq_context->byte_8_cqn, V2_CQC_BYTE_8_CQN_M,
1882 V2_CQC_BYTE_8_CQN_S, hr_cq->cqn);
1884 cq_context->cqe_cur_blk_addr = (u32)(mtts[0] >> PAGE_ADDR_SHIFT);
1885 cq_context->cqe_cur_blk_addr =
1886 cpu_to_le32(cq_context->cqe_cur_blk_addr);
1888 roce_set_field(cq_context->byte_16_hop_addr,
1889 V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_M,
1890 V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_S,
1891 cpu_to_le32((mtts[0]) >> (32 + PAGE_ADDR_SHIFT)));
1892 roce_set_field(cq_context->byte_16_hop_addr,
1893 V2_CQC_BYTE_16_CQE_HOP_NUM_M,
1894 V2_CQC_BYTE_16_CQE_HOP_NUM_S, hr_dev->caps.cqe_hop_num ==
1895 HNS_ROCE_HOP_NUM_0 ? 0 : hr_dev->caps.cqe_hop_num);
1897 cq_context->cqe_nxt_blk_addr = (u32)(mtts[1] >> PAGE_ADDR_SHIFT);
1898 roce_set_field(cq_context->byte_24_pgsz_addr,
1899 V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_M,
1900 V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_S,
1901 cpu_to_le32((mtts[1]) >> (32 + PAGE_ADDR_SHIFT)));
1902 roce_set_field(cq_context->byte_24_pgsz_addr,
1903 V2_CQC_BYTE_24_CQE_BA_PG_SZ_M,
1904 V2_CQC_BYTE_24_CQE_BA_PG_SZ_S,
1905 hr_dev->caps.cqe_ba_pg_sz + PG_SHIFT_OFFSET);
1906 roce_set_field(cq_context->byte_24_pgsz_addr,
1907 V2_CQC_BYTE_24_CQE_BUF_PG_SZ_M,
1908 V2_CQC_BYTE_24_CQE_BUF_PG_SZ_S,
1909 hr_dev->caps.cqe_buf_pg_sz + PG_SHIFT_OFFSET);
1911 cq_context->cqe_ba = (u32)(dma_handle >> 3);
1913 roce_set_field(cq_context->byte_40_cqe_ba, V2_CQC_BYTE_40_CQE_BA_M,
1914 V2_CQC_BYTE_40_CQE_BA_S, (dma_handle >> (32 + 3)));
1917 roce_set_bit(cq_context->byte_44_db_record,
1918 V2_CQC_BYTE_44_DB_RECORD_EN_S, 1);
1920 roce_set_field(cq_context->byte_44_db_record,
1921 V2_CQC_BYTE_44_DB_RECORD_ADDR_M,
1922 V2_CQC_BYTE_44_DB_RECORD_ADDR_S,
1923 ((u32)hr_cq->db.dma) >> 1);
1924 cq_context->db_record_addr = hr_cq->db.dma >> 32;
1926 roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
1927 V2_CQC_BYTE_56_CQ_MAX_CNT_M,
1928 V2_CQC_BYTE_56_CQ_MAX_CNT_S,
1929 HNS_ROCE_V2_CQ_DEFAULT_BURST_NUM);
1930 roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
1931 V2_CQC_BYTE_56_CQ_PERIOD_M,
1932 V2_CQC_BYTE_56_CQ_PERIOD_S,
1933 HNS_ROCE_V2_CQ_DEFAULT_INTERVAL);
1936 static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq,
1937 enum ib_cq_notify_flags flags)
1939 struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
1940 u32 notification_flag;
1946 notification_flag = (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
1947 V2_CQ_DB_REQ_NOT : V2_CQ_DB_REQ_NOT_SOL;
1949 * flags = 0; Notification Flag = 1, next
1950 * flags = 1; Notification Flag = 0, solocited
1952 roce_set_field(doorbell[0], V2_CQ_DB_BYTE_4_TAG_M, V2_DB_BYTE_4_TAG_S,
1954 roce_set_field(doorbell[0], V2_CQ_DB_BYTE_4_CMD_M, V2_DB_BYTE_4_CMD_S,
1955 HNS_ROCE_V2_CQ_DB_NTR);
1956 roce_set_field(doorbell[1], V2_CQ_DB_PARAMETER_CONS_IDX_M,
1957 V2_CQ_DB_PARAMETER_CONS_IDX_S,
1958 hr_cq->cons_index & ((hr_cq->cq_depth << 1) - 1));
1959 roce_set_field(doorbell[1], V2_CQ_DB_PARAMETER_CMD_SN_M,
1960 V2_CQ_DB_PARAMETER_CMD_SN_S, hr_cq->arm_sn & 0x3);
1961 roce_set_bit(doorbell[1], V2_CQ_DB_PARAMETER_NOTIFY_S,
1964 hns_roce_write64_k(doorbell, hr_cq->cq_db_l);
1969 static int hns_roce_handle_recv_inl_wqe(struct hns_roce_v2_cqe *cqe,
1970 struct hns_roce_qp **cur_qp,
1973 struct hns_roce_rinl_sge *sge_list;
1974 u32 wr_num, wr_cnt, sge_num;
1975 u32 sge_cnt, data_len, size;
1978 wr_num = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_WQE_INDX_M,
1979 V2_CQE_BYTE_4_WQE_INDX_S) & 0xffff;
1980 wr_cnt = wr_num & ((*cur_qp)->rq.wqe_cnt - 1);
1982 sge_list = (*cur_qp)->rq_inl_buf.wqe_list[wr_cnt].sg_list;
1983 sge_num = (*cur_qp)->rq_inl_buf.wqe_list[wr_cnt].sge_cnt;
1984 wqe_buf = get_recv_wqe(*cur_qp, wr_cnt);
1985 data_len = wc->byte_len;
1987 for (sge_cnt = 0; (sge_cnt < sge_num) && (data_len); sge_cnt++) {
1988 size = min(sge_list[sge_cnt].len, data_len);
1989 memcpy((void *)sge_list[sge_cnt].addr, wqe_buf, size);
1996 wc->status = IB_WC_LOC_LEN_ERR;
2003 static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
2004 struct hns_roce_qp **cur_qp, struct ib_wc *wc)
2006 struct hns_roce_dev *hr_dev;
2007 struct hns_roce_v2_cqe *cqe;
2008 struct hns_roce_qp *hr_qp;
2009 struct hns_roce_wq *wq;
2017 /* Find cqe according to consumer index */
2018 cqe = next_cqe_sw_v2(hr_cq);
2022 ++hr_cq->cons_index;
2023 /* Memory barrier */
2027 is_send = !roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_S_R_S);
2029 qpn = roce_get_field(cqe->byte_16, V2_CQE_BYTE_16_LCL_QPN_M,
2030 V2_CQE_BYTE_16_LCL_QPN_S);
2032 if (!*cur_qp || (qpn & HNS_ROCE_V2_CQE_QPN_MASK) != (*cur_qp)->qpn) {
2033 hr_dev = to_hr_dev(hr_cq->ib_cq.device);
2034 hr_qp = __hns_roce_qp_lookup(hr_dev, qpn);
2035 if (unlikely(!hr_qp)) {
2036 dev_err(hr_dev->dev, "CQ %06lx with entry for unknown QPN %06x\n",
2037 hr_cq->cqn, (qpn & HNS_ROCE_V2_CQE_QPN_MASK));
2043 wc->qp = &(*cur_qp)->ibqp;
2046 status = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_STATUS_M,
2047 V2_CQE_BYTE_4_STATUS_S);
2048 switch (status & HNS_ROCE_V2_CQE_STATUS_MASK) {
2049 case HNS_ROCE_CQE_V2_SUCCESS:
2050 wc->status = IB_WC_SUCCESS;
2052 case HNS_ROCE_CQE_V2_LOCAL_LENGTH_ERR:
2053 wc->status = IB_WC_LOC_LEN_ERR;
2055 case HNS_ROCE_CQE_V2_LOCAL_QP_OP_ERR:
2056 wc->status = IB_WC_LOC_QP_OP_ERR;
2058 case HNS_ROCE_CQE_V2_LOCAL_PROT_ERR:
2059 wc->status = IB_WC_LOC_PROT_ERR;
2061 case HNS_ROCE_CQE_V2_WR_FLUSH_ERR:
2062 wc->status = IB_WC_WR_FLUSH_ERR;
2064 case HNS_ROCE_CQE_V2_MW_BIND_ERR:
2065 wc->status = IB_WC_MW_BIND_ERR;
2067 case HNS_ROCE_CQE_V2_BAD_RESP_ERR:
2068 wc->status = IB_WC_BAD_RESP_ERR;
2070 case HNS_ROCE_CQE_V2_LOCAL_ACCESS_ERR:
2071 wc->status = IB_WC_LOC_ACCESS_ERR;
2073 case HNS_ROCE_CQE_V2_REMOTE_INVAL_REQ_ERR:
2074 wc->status = IB_WC_REM_INV_REQ_ERR;
2076 case HNS_ROCE_CQE_V2_REMOTE_ACCESS_ERR:
2077 wc->status = IB_WC_REM_ACCESS_ERR;
2079 case HNS_ROCE_CQE_V2_REMOTE_OP_ERR:
2080 wc->status = IB_WC_REM_OP_ERR;
2082 case HNS_ROCE_CQE_V2_TRANSPORT_RETRY_EXC_ERR:
2083 wc->status = IB_WC_RETRY_EXC_ERR;
2085 case HNS_ROCE_CQE_V2_RNR_RETRY_EXC_ERR:
2086 wc->status = IB_WC_RNR_RETRY_EXC_ERR;
2088 case HNS_ROCE_CQE_V2_REMOTE_ABORT_ERR:
2089 wc->status = IB_WC_REM_ABORT_ERR;
2092 wc->status = IB_WC_GENERAL_ERR;
2096 /* CQE status error, directly return */
2097 if (wc->status != IB_WC_SUCCESS)
2102 /* SQ corresponding to CQE */
2103 switch (roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_OPCODE_M,
2104 V2_CQE_BYTE_4_OPCODE_S) & 0x1f) {
2105 case HNS_ROCE_SQ_OPCODE_SEND:
2106 wc->opcode = IB_WC_SEND;
2108 case HNS_ROCE_SQ_OPCODE_SEND_WITH_INV:
2109 wc->opcode = IB_WC_SEND;
2111 case HNS_ROCE_SQ_OPCODE_SEND_WITH_IMM:
2112 wc->opcode = IB_WC_SEND;
2113 wc->wc_flags |= IB_WC_WITH_IMM;
2115 case HNS_ROCE_SQ_OPCODE_RDMA_READ:
2116 wc->opcode = IB_WC_RDMA_READ;
2117 wc->byte_len = le32_to_cpu(cqe->byte_cnt);
2119 case HNS_ROCE_SQ_OPCODE_RDMA_WRITE:
2120 wc->opcode = IB_WC_RDMA_WRITE;
2122 case HNS_ROCE_SQ_OPCODE_RDMA_WRITE_WITH_IMM:
2123 wc->opcode = IB_WC_RDMA_WRITE;
2124 wc->wc_flags |= IB_WC_WITH_IMM;
2126 case HNS_ROCE_SQ_OPCODE_LOCAL_INV:
2127 wc->opcode = IB_WC_LOCAL_INV;
2128 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
2130 case HNS_ROCE_SQ_OPCODE_ATOMIC_COMP_AND_SWAP:
2131 wc->opcode = IB_WC_COMP_SWAP;
2134 case HNS_ROCE_SQ_OPCODE_ATOMIC_FETCH_AND_ADD:
2135 wc->opcode = IB_WC_FETCH_ADD;
2138 case HNS_ROCE_SQ_OPCODE_ATOMIC_MASK_COMP_AND_SWAP:
2139 wc->opcode = IB_WC_MASKED_COMP_SWAP;
2142 case HNS_ROCE_SQ_OPCODE_ATOMIC_MASK_FETCH_AND_ADD:
2143 wc->opcode = IB_WC_MASKED_FETCH_ADD;
2146 case HNS_ROCE_SQ_OPCODE_FAST_REG_WR:
2147 wc->opcode = IB_WC_REG_MR;
2149 case HNS_ROCE_SQ_OPCODE_BIND_MW:
2150 wc->opcode = IB_WC_REG_MR;
2153 wc->status = IB_WC_GENERAL_ERR;
2157 wq = &(*cur_qp)->sq;
2158 if ((*cur_qp)->sq_signal_bits) {
2160 * If sg_signal_bit is 1,
2161 * firstly tail pointer updated to wqe
2162 * which current cqe correspond to
2164 wqe_ctr = (u16)roce_get_field(cqe->byte_4,
2165 V2_CQE_BYTE_4_WQE_INDX_M,
2166 V2_CQE_BYTE_4_WQE_INDX_S);
2167 wq->tail += (wqe_ctr - (u16)wq->tail) &
2171 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
2174 /* RQ correspond to CQE */
2175 wc->byte_len = le32_to_cpu(cqe->byte_cnt);
2177 opcode = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_OPCODE_M,
2178 V2_CQE_BYTE_4_OPCODE_S);
2179 switch (opcode & 0x1f) {
2180 case HNS_ROCE_V2_OPCODE_RDMA_WRITE_IMM:
2181 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
2182 wc->wc_flags = IB_WC_WITH_IMM;
2184 cpu_to_be32(le32_to_cpu(cqe->immtdata));
2186 case HNS_ROCE_V2_OPCODE_SEND:
2187 wc->opcode = IB_WC_RECV;
2190 case HNS_ROCE_V2_OPCODE_SEND_WITH_IMM:
2191 wc->opcode = IB_WC_RECV;
2192 wc->wc_flags = IB_WC_WITH_IMM;
2194 cpu_to_be32(le32_to_cpu(cqe->immtdata));
2196 case HNS_ROCE_V2_OPCODE_SEND_WITH_INV:
2197 wc->opcode = IB_WC_RECV;
2198 wc->wc_flags = IB_WC_WITH_INVALIDATE;
2199 wc->ex.invalidate_rkey = le32_to_cpu(cqe->rkey);
2202 wc->status = IB_WC_GENERAL_ERR;
2206 if ((wc->qp->qp_type == IB_QPT_RC ||
2207 wc->qp->qp_type == IB_QPT_UC) &&
2208 (opcode == HNS_ROCE_V2_OPCODE_SEND ||
2209 opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_IMM ||
2210 opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_INV) &&
2211 (roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_RQ_INLINE_S))) {
2212 ret = hns_roce_handle_recv_inl_wqe(cqe, cur_qp, wc);
2217 /* Update tail pointer, record wr_id */
2218 wq = &(*cur_qp)->rq;
2219 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
2222 wc->sl = (u8)roce_get_field(cqe->byte_32, V2_CQE_BYTE_32_SL_M,
2223 V2_CQE_BYTE_32_SL_S);
2224 wc->src_qp = (u8)roce_get_field(cqe->byte_32,
2225 V2_CQE_BYTE_32_RMT_QPN_M,
2226 V2_CQE_BYTE_32_RMT_QPN_S);
2227 wc->wc_flags |= (roce_get_bit(cqe->byte_32,
2228 V2_CQE_BYTE_32_GRH_S) ?
2230 wc->port_num = roce_get_field(cqe->byte_32,
2231 V2_CQE_BYTE_32_PORTN_M, V2_CQE_BYTE_32_PORTN_S);
2233 memcpy(wc->smac, cqe->smac, 4);
2234 wc->smac[4] = roce_get_field(cqe->byte_28,
2235 V2_CQE_BYTE_28_SMAC_4_M,
2236 V2_CQE_BYTE_28_SMAC_4_S);
2237 wc->smac[5] = roce_get_field(cqe->byte_28,
2238 V2_CQE_BYTE_28_SMAC_5_M,
2239 V2_CQE_BYTE_28_SMAC_5_S);
2240 wc->vlan_id = 0xffff;
2241 wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC);
2242 wc->network_hdr_type = roce_get_field(cqe->byte_28,
2243 V2_CQE_BYTE_28_PORT_TYPE_M,
2244 V2_CQE_BYTE_28_PORT_TYPE_S);
2250 static int hns_roce_v2_poll_cq(struct ib_cq *ibcq, int num_entries,
2253 struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
2254 struct hns_roce_qp *cur_qp = NULL;
2255 unsigned long flags;
2258 spin_lock_irqsave(&hr_cq->lock, flags);
2260 for (npolled = 0; npolled < num_entries; ++npolled) {
2261 if (hns_roce_v2_poll_one(hr_cq, &cur_qp, wc + npolled))
2266 /* Memory barrier */
2268 hns_roce_v2_cq_set_ci(hr_cq, hr_cq->cons_index);
2271 spin_unlock_irqrestore(&hr_cq->lock, flags);
2276 static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev,
2277 struct hns_roce_hem_table *table, int obj,
2280 struct device *dev = hr_dev->dev;
2281 struct hns_roce_cmd_mailbox *mailbox;
2282 struct hns_roce_hem_iter iter;
2283 struct hns_roce_hem_mhop mhop;
2284 struct hns_roce_hem *hem;
2285 unsigned long mhop_obj = obj;
2295 if (!hns_roce_check_whether_mhop(hr_dev, table->type))
2298 hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop);
2302 hop_num = mhop.hop_num;
2303 chunk_ba_num = mhop.bt_chunk_size / 8;
2306 hem_idx = i * chunk_ba_num * chunk_ba_num + j * chunk_ba_num +
2308 l1_idx = i * chunk_ba_num + j;
2309 } else if (hop_num == 1) {
2310 hem_idx = i * chunk_ba_num + j;
2311 } else if (hop_num == HNS_ROCE_HOP_NUM_0) {
2315 switch (table->type) {
2317 op = HNS_ROCE_CMD_WRITE_QPC_BT0;
2320 op = HNS_ROCE_CMD_WRITE_MPT_BT0;
2323 op = HNS_ROCE_CMD_WRITE_CQC_BT0;
2326 op = HNS_ROCE_CMD_WRITE_SRQC_BT0;
2329 dev_warn(dev, "Table %d not to be written by mailbox!\n",
2335 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
2336 if (IS_ERR(mailbox))
2337 return PTR_ERR(mailbox);
2339 if (check_whether_last_step(hop_num, step_idx)) {
2340 hem = table->hem[hem_idx];
2341 for (hns_roce_hem_first(hem, &iter);
2342 !hns_roce_hem_last(&iter); hns_roce_hem_next(&iter)) {
2343 bt_ba = hns_roce_hem_addr(&iter);
2345 /* configure the ba, tag, and op */
2346 ret = hns_roce_cmd_mbox(hr_dev, bt_ba, mailbox->dma,
2348 HNS_ROCE_CMD_TIMEOUT_MSECS);
2352 bt_ba = table->bt_l0_dma_addr[i];
2353 else if (step_idx == 1 && hop_num == 2)
2354 bt_ba = table->bt_l1_dma_addr[l1_idx];
2356 /* configure the ba, tag, and op */
2357 ret = hns_roce_cmd_mbox(hr_dev, bt_ba, mailbox->dma, obj,
2358 0, op, HNS_ROCE_CMD_TIMEOUT_MSECS);
2361 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
2365 static int hns_roce_v2_clear_hem(struct hns_roce_dev *hr_dev,
2366 struct hns_roce_hem_table *table, int obj,
2369 struct device *dev = hr_dev->dev;
2370 struct hns_roce_cmd_mailbox *mailbox;
2374 if (!hns_roce_check_whether_mhop(hr_dev, table->type))
2377 switch (table->type) {
2379 op = HNS_ROCE_CMD_DESTROY_QPC_BT0;
2382 op = HNS_ROCE_CMD_DESTROY_MPT_BT0;
2385 op = HNS_ROCE_CMD_DESTROY_CQC_BT0;
2388 op = HNS_ROCE_CMD_DESTROY_SRQC_BT0;
2391 dev_warn(dev, "Table %d not to be destroyed by mailbox!\n",
2397 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
2398 if (IS_ERR(mailbox))
2399 return PTR_ERR(mailbox);
2401 /* configure the tag and op */
2402 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, obj, 0, op,
2403 HNS_ROCE_CMD_TIMEOUT_MSECS);
2405 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
2409 static int hns_roce_v2_qp_modify(struct hns_roce_dev *hr_dev,
2410 struct hns_roce_mtt *mtt,
2411 enum ib_qp_state cur_state,
2412 enum ib_qp_state new_state,
2413 struct hns_roce_v2_qp_context *context,
2414 struct hns_roce_qp *hr_qp)
2416 struct hns_roce_cmd_mailbox *mailbox;
2419 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
2420 if (IS_ERR(mailbox))
2421 return PTR_ERR(mailbox);
2423 memcpy(mailbox->buf, context, sizeof(*context) * 2);
2425 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_qp->qpn, 0,
2426 HNS_ROCE_CMD_MODIFY_QPC,
2427 HNS_ROCE_CMD_TIMEOUT_MSECS);
2429 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
2434 static void set_access_flags(struct hns_roce_qp *hr_qp,
2435 struct hns_roce_v2_qp_context *context,
2436 struct hns_roce_v2_qp_context *qpc_mask,
2437 const struct ib_qp_attr *attr, int attr_mask)
2442 dest_rd_atomic = (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) ?
2443 attr->max_dest_rd_atomic : hr_qp->resp_depth;
2445 access_flags = (attr_mask & IB_QP_ACCESS_FLAGS) ?
2446 attr->qp_access_flags : hr_qp->atomic_rd_en;
2448 if (!dest_rd_atomic)
2449 access_flags &= IB_ACCESS_REMOTE_WRITE;
2451 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
2452 !!(access_flags & IB_ACCESS_REMOTE_READ));
2453 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S, 0);
2455 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
2456 !!(access_flags & IB_ACCESS_REMOTE_WRITE));
2457 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S, 0);
2459 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
2460 !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
2461 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S, 0);
2464 static void modify_qp_reset_to_init(struct ib_qp *ibqp,
2465 const struct ib_qp_attr *attr,
2467 struct hns_roce_v2_qp_context *context,
2468 struct hns_roce_v2_qp_context *qpc_mask)
2470 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
2471 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
2474 * In v2 engine, software pass context and context mask to hardware
2475 * when modifying qp. If software need modify some fields in context,
2476 * we should set all bits of the relevant fields in context mask to
2477 * 0 at the same time, else set them to 0x1.
2479 roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
2480 V2_QPC_BYTE_4_TST_S, to_hr_qp_type(hr_qp->ibqp.qp_type));
2481 roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
2482 V2_QPC_BYTE_4_TST_S, 0);
2484 if (ibqp->qp_type == IB_QPT_GSI)
2485 roce_set_field(context->byte_4_sqpn_tst,
2486 V2_QPC_BYTE_4_SGE_SHIFT_M,
2487 V2_QPC_BYTE_4_SGE_SHIFT_S,
2488 ilog2((unsigned int)hr_qp->sge.sge_cnt));
2490 roce_set_field(context->byte_4_sqpn_tst,
2491 V2_QPC_BYTE_4_SGE_SHIFT_M,
2492 V2_QPC_BYTE_4_SGE_SHIFT_S,
2493 hr_qp->sq.max_gs > 2 ?
2494 ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0);
2496 roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M,
2497 V2_QPC_BYTE_4_SGE_SHIFT_S, 0);
2499 roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
2500 V2_QPC_BYTE_4_SQPN_S, hr_qp->qpn);
2501 roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
2502 V2_QPC_BYTE_4_SQPN_S, 0);
2504 roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
2505 V2_QPC_BYTE_16_PD_S, to_hr_pd(ibqp->pd)->pdn);
2506 roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
2507 V2_QPC_BYTE_16_PD_S, 0);
2509 roce_set_field(context->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_RQWS_M,
2510 V2_QPC_BYTE_20_RQWS_S, ilog2(hr_qp->rq.max_gs));
2511 roce_set_field(qpc_mask->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_RQWS_M,
2512 V2_QPC_BYTE_20_RQWS_S, 0);
2514 roce_set_field(context->byte_20_smac_sgid_idx,
2515 V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S,
2516 ilog2((unsigned int)hr_qp->sq.wqe_cnt));
2517 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
2518 V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S, 0);
2520 roce_set_field(context->byte_20_smac_sgid_idx,
2521 V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S,
2522 ilog2((unsigned int)hr_qp->rq.wqe_cnt));
2523 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
2524 V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S, 0);
2526 /* No VLAN need to set 0xFFF */
2527 roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_IDX_M,
2528 V2_QPC_BYTE_24_VLAN_IDX_S, 0xfff);
2529 roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_IDX_M,
2530 V2_QPC_BYTE_24_VLAN_IDX_S, 0);
2533 * Set some fields in context to zero, Because the default values
2534 * of all fields in context are zero, we need not set them to 0 again.
2535 * but we should set the relevant fields of context mask to 0.
2537 roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_SQ_TX_ERR_S, 0);
2538 roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_SQ_RX_ERR_S, 0);
2539 roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_RQ_TX_ERR_S, 0);
2540 roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_RQ_RX_ERR_S, 0);
2542 roce_set_field(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_MAPID_M,
2543 V2_QPC_BYTE_60_MAPID_S, 0);
2545 roce_set_bit(qpc_mask->byte_60_qpst_mapid,
2546 V2_QPC_BYTE_60_INNER_MAP_IND_S, 0);
2547 roce_set_bit(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_SQ_MAP_IND_S,
2549 roce_set_bit(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_RQ_MAP_IND_S,
2551 roce_set_bit(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_EXT_MAP_IND_S,
2553 roce_set_bit(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_SQ_RLS_IND_S,
2555 roce_set_bit(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_SQ_EXT_IND_S,
2557 roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CNP_TX_FLAG_S, 0);
2558 roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CE_FLAG_S, 0);
2560 if (attr_mask & IB_QP_QKEY) {
2561 context->qkey_xrcd = attr->qkey;
2562 qpc_mask->qkey_xrcd = 0;
2563 hr_qp->qkey = attr->qkey;
2566 if (hr_qp->rdb_en) {
2567 roce_set_bit(context->byte_68_rq_db,
2568 V2_QPC_BYTE_68_RQ_RECORD_EN_S, 1);
2569 roce_set_bit(qpc_mask->byte_68_rq_db,
2570 V2_QPC_BYTE_68_RQ_RECORD_EN_S, 0);
2573 roce_set_field(context->byte_68_rq_db,
2574 V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_M,
2575 V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_S,
2576 ((u32)hr_qp->rdb.dma) >> 1);
2577 roce_set_field(qpc_mask->byte_68_rq_db,
2578 V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_M,
2579 V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_S, 0);
2580 context->rq_db_record_addr = hr_qp->rdb.dma >> 32;
2581 qpc_mask->rq_db_record_addr = 0;
2583 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S,
2584 (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) ? 1 : 0);
2585 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S, 0);
2587 roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
2588 V2_QPC_BYTE_80_RX_CQN_S, to_hr_cq(ibqp->recv_cq)->cqn);
2589 roce_set_field(qpc_mask->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
2590 V2_QPC_BYTE_80_RX_CQN_S, 0);
2592 roce_set_field(context->byte_76_srqn_op_en,
2593 V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S,
2594 to_hr_srq(ibqp->srq)->srqn);
2595 roce_set_field(qpc_mask->byte_76_srqn_op_en,
2596 V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S, 0);
2597 roce_set_bit(context->byte_76_srqn_op_en,
2598 V2_QPC_BYTE_76_SRQ_EN_S, 1);
2599 roce_set_bit(qpc_mask->byte_76_srqn_op_en,
2600 V2_QPC_BYTE_76_SRQ_EN_S, 0);
2603 roce_set_field(qpc_mask->byte_84_rq_ci_pi,
2604 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
2605 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
2606 roce_set_field(qpc_mask->byte_84_rq_ci_pi,
2607 V2_QPC_BYTE_84_RQ_CONSUMER_IDX_M,
2608 V2_QPC_BYTE_84_RQ_CONSUMER_IDX_S, 0);
2610 roce_set_field(qpc_mask->byte_92_srq_info, V2_QPC_BYTE_92_SRQ_INFO_M,
2611 V2_QPC_BYTE_92_SRQ_INFO_S, 0);
2613 roce_set_field(qpc_mask->byte_96_rx_reqmsn, V2_QPC_BYTE_96_RX_REQ_MSN_M,
2614 V2_QPC_BYTE_96_RX_REQ_MSN_S, 0);
2616 roce_set_field(qpc_mask->byte_104_rq_sge,
2617 V2_QPC_BYTE_104_RQ_CUR_WQE_SGE_NUM_M,
2618 V2_QPC_BYTE_104_RQ_CUR_WQE_SGE_NUM_S, 0);
2620 roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
2621 V2_QPC_BYTE_108_RX_REQ_PSN_ERR_S, 0);
2622 roce_set_field(qpc_mask->byte_108_rx_reqepsn,
2623 V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_M,
2624 V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_S, 0);
2625 roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
2626 V2_QPC_BYTE_108_RX_REQ_RNR_S, 0);
2628 qpc_mask->rq_rnr_timer = 0;
2629 qpc_mask->rx_msg_len = 0;
2630 qpc_mask->rx_rkey_pkt_info = 0;
2631 qpc_mask->rx_va = 0;
2633 roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_HEAD_MAX_M,
2634 V2_QPC_BYTE_132_TRRL_HEAD_MAX_S, 0);
2635 roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_TAIL_MAX_M,
2636 V2_QPC_BYTE_132_TRRL_TAIL_MAX_S, 0);
2638 roce_set_bit(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RSVD_RAQ_MAP_S, 0);
2639 roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RAQ_TRRL_HEAD_M,
2640 V2_QPC_BYTE_140_RAQ_TRRL_HEAD_S, 0);
2641 roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RAQ_TRRL_TAIL_M,
2642 V2_QPC_BYTE_140_RAQ_TRRL_TAIL_S, 0);
2644 roce_set_field(qpc_mask->byte_144_raq,
2645 V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_M,
2646 V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_S, 0);
2647 roce_set_bit(qpc_mask->byte_144_raq, V2_QPC_BYTE_144_RAQ_RTY_INI_IND_S,
2649 roce_set_field(qpc_mask->byte_144_raq, V2_QPC_BYTE_144_RAQ_CREDIT_M,
2650 V2_QPC_BYTE_144_RAQ_CREDIT_S, 0);
2651 roce_set_bit(qpc_mask->byte_144_raq, V2_QPC_BYTE_144_RESP_RTY_FLG_S, 0);
2653 roce_set_field(qpc_mask->byte_148_raq, V2_QPC_BYTE_148_RQ_MSN_M,
2654 V2_QPC_BYTE_148_RQ_MSN_S, 0);
2655 roce_set_field(qpc_mask->byte_148_raq, V2_QPC_BYTE_148_RAQ_SYNDROME_M,
2656 V2_QPC_BYTE_148_RAQ_SYNDROME_S, 0);
2658 roce_set_field(qpc_mask->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M,
2659 V2_QPC_BYTE_152_RAQ_PSN_S, 0);
2660 roce_set_field(qpc_mask->byte_152_raq,
2661 V2_QPC_BYTE_152_RAQ_TRRL_RTY_HEAD_M,
2662 V2_QPC_BYTE_152_RAQ_TRRL_RTY_HEAD_S, 0);
2664 roce_set_field(qpc_mask->byte_156_raq, V2_QPC_BYTE_156_RAQ_USE_PKTN_M,
2665 V2_QPC_BYTE_156_RAQ_USE_PKTN_S, 0);
2667 roce_set_field(qpc_mask->byte_160_sq_ci_pi,
2668 V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
2669 V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, 0);
2670 roce_set_field(qpc_mask->byte_160_sq_ci_pi,
2671 V2_QPC_BYTE_160_SQ_CONSUMER_IDX_M,
2672 V2_QPC_BYTE_160_SQ_CONSUMER_IDX_S, 0);
2674 roce_set_field(context->byte_168_irrl_idx,
2675 V2_QPC_BYTE_168_SQ_SHIFT_BAK_M,
2676 V2_QPC_BYTE_168_SQ_SHIFT_BAK_S,
2677 ilog2((unsigned int)hr_qp->sq.wqe_cnt));
2678 roce_set_field(qpc_mask->byte_168_irrl_idx,
2679 V2_QPC_BYTE_168_SQ_SHIFT_BAK_M,
2680 V2_QPC_BYTE_168_SQ_SHIFT_BAK_S, 0);
2682 roce_set_bit(qpc_mask->byte_168_irrl_idx,
2683 V2_QPC_BYTE_168_MSG_RTY_LP_FLG_S, 0);
2684 roce_set_bit(qpc_mask->byte_168_irrl_idx,
2685 V2_QPC_BYTE_168_SQ_INVLD_FLG_S, 0);
2686 roce_set_field(qpc_mask->byte_168_irrl_idx,
2687 V2_QPC_BYTE_168_IRRL_IDX_LSB_M,
2688 V2_QPC_BYTE_168_IRRL_IDX_LSB_S, 0);
2690 roce_set_field(context->byte_172_sq_psn, V2_QPC_BYTE_172_ACK_REQ_FREQ_M,
2691 V2_QPC_BYTE_172_ACK_REQ_FREQ_S, 4);
2692 roce_set_field(qpc_mask->byte_172_sq_psn,
2693 V2_QPC_BYTE_172_ACK_REQ_FREQ_M,
2694 V2_QPC_BYTE_172_ACK_REQ_FREQ_S, 0);
2696 roce_set_bit(qpc_mask->byte_172_sq_psn, V2_QPC_BYTE_172_MSG_RNR_FLG_S,
2699 roce_set_field(qpc_mask->byte_176_msg_pktn,
2700 V2_QPC_BYTE_176_MSG_USE_PKTN_M,
2701 V2_QPC_BYTE_176_MSG_USE_PKTN_S, 0);
2702 roce_set_field(qpc_mask->byte_176_msg_pktn,
2703 V2_QPC_BYTE_176_IRRL_HEAD_PRE_M,
2704 V2_QPC_BYTE_176_IRRL_HEAD_PRE_S, 0);
2706 roce_set_field(qpc_mask->byte_184_irrl_idx,
2707 V2_QPC_BYTE_184_IRRL_IDX_MSB_M,
2708 V2_QPC_BYTE_184_IRRL_IDX_MSB_S, 0);
2710 qpc_mask->cur_sge_offset = 0;
2712 roce_set_field(qpc_mask->byte_192_ext_sge,
2713 V2_QPC_BYTE_192_CUR_SGE_IDX_M,
2714 V2_QPC_BYTE_192_CUR_SGE_IDX_S, 0);
2715 roce_set_field(qpc_mask->byte_192_ext_sge,
2716 V2_QPC_BYTE_192_EXT_SGE_NUM_LEFT_M,
2717 V2_QPC_BYTE_192_EXT_SGE_NUM_LEFT_S, 0);
2719 roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_IRRL_HEAD_M,
2720 V2_QPC_BYTE_196_IRRL_HEAD_S, 0);
2722 roce_set_field(qpc_mask->byte_200_sq_max, V2_QPC_BYTE_200_SQ_MAX_IDX_M,
2723 V2_QPC_BYTE_200_SQ_MAX_IDX_S, 0);
2724 roce_set_field(qpc_mask->byte_200_sq_max,
2725 V2_QPC_BYTE_200_LCL_OPERATED_CNT_M,
2726 V2_QPC_BYTE_200_LCL_OPERATED_CNT_S, 0);
2728 roce_set_bit(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_PKT_RNR_FLG_S, 0);
2729 roce_set_bit(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_PKT_RTY_FLG_S, 0);
2731 roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_CHECK_FLG_M,
2732 V2_QPC_BYTE_212_CHECK_FLG_S, 0);
2734 qpc_mask->sq_timer = 0;
2736 roce_set_field(qpc_mask->byte_220_retry_psn_msn,
2737 V2_QPC_BYTE_220_RETRY_MSG_MSN_M,
2738 V2_QPC_BYTE_220_RETRY_MSG_MSN_S, 0);
2739 roce_set_field(qpc_mask->byte_232_irrl_sge,
2740 V2_QPC_BYTE_232_IRRL_SGE_IDX_M,
2741 V2_QPC_BYTE_232_IRRL_SGE_IDX_S, 0);
2743 qpc_mask->irrl_cur_sge_offset = 0;
2745 roce_set_field(qpc_mask->byte_240_irrl_tail,
2746 V2_QPC_BYTE_240_IRRL_TAIL_REAL_M,
2747 V2_QPC_BYTE_240_IRRL_TAIL_REAL_S, 0);
2748 roce_set_field(qpc_mask->byte_240_irrl_tail,
2749 V2_QPC_BYTE_240_IRRL_TAIL_RD_M,
2750 V2_QPC_BYTE_240_IRRL_TAIL_RD_S, 0);
2751 roce_set_field(qpc_mask->byte_240_irrl_tail,
2752 V2_QPC_BYTE_240_RX_ACK_MSN_M,
2753 V2_QPC_BYTE_240_RX_ACK_MSN_S, 0);
2755 roce_set_field(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_IRRL_PSN_M,
2756 V2_QPC_BYTE_248_IRRL_PSN_S, 0);
2757 roce_set_bit(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_ACK_PSN_ERR_S,
2759 roce_set_field(qpc_mask->byte_248_ack_psn,
2760 V2_QPC_BYTE_248_ACK_LAST_OPTYPE_M,
2761 V2_QPC_BYTE_248_ACK_LAST_OPTYPE_S, 0);
2762 roce_set_bit(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_IRRL_PSN_VLD_S,
2764 roce_set_bit(qpc_mask->byte_248_ack_psn,
2765 V2_QPC_BYTE_248_RNR_RETRY_FLAG_S, 0);
2766 roce_set_bit(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_CQ_ERR_IND_S,
2769 hr_qp->access_flags = attr->qp_access_flags;
2770 hr_qp->pkey_index = attr->pkey_index;
2771 roce_set_field(context->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
2772 V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->send_cq)->cqn);
2773 roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
2774 V2_QPC_BYTE_252_TX_CQN_S, 0);
2776 roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_ERR_TYPE_M,
2777 V2_QPC_BYTE_252_ERR_TYPE_S, 0);
2779 roce_set_field(qpc_mask->byte_256_sqflush_rqcqe,
2780 V2_QPC_BYTE_256_RQ_CQE_IDX_M,
2781 V2_QPC_BYTE_256_RQ_CQE_IDX_S, 0);
2782 roce_set_field(qpc_mask->byte_256_sqflush_rqcqe,
2783 V2_QPC_BYTE_256_SQ_FLUSH_IDX_M,
2784 V2_QPC_BYTE_256_SQ_FLUSH_IDX_S, 0);
2787 static void modify_qp_init_to_init(struct ib_qp *ibqp,
2788 const struct ib_qp_attr *attr, int attr_mask,
2789 struct hns_roce_v2_qp_context *context,
2790 struct hns_roce_v2_qp_context *qpc_mask)
2792 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
2795 * In v2 engine, software pass context and context mask to hardware
2796 * when modifying qp. If software need modify some fields in context,
2797 * we should set all bits of the relevant fields in context mask to
2798 * 0 at the same time, else set them to 0x1.
2800 roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
2801 V2_QPC_BYTE_4_TST_S, to_hr_qp_type(hr_qp->ibqp.qp_type));
2802 roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
2803 V2_QPC_BYTE_4_TST_S, 0);
2805 if (ibqp->qp_type == IB_QPT_GSI)
2806 roce_set_field(context->byte_4_sqpn_tst,
2807 V2_QPC_BYTE_4_SGE_SHIFT_M,
2808 V2_QPC_BYTE_4_SGE_SHIFT_S,
2809 ilog2((unsigned int)hr_qp->sge.sge_cnt));
2811 roce_set_field(context->byte_4_sqpn_tst,
2812 V2_QPC_BYTE_4_SGE_SHIFT_M,
2813 V2_QPC_BYTE_4_SGE_SHIFT_S, hr_qp->sq.max_gs > 2 ?
2814 ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0);
2816 roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M,
2817 V2_QPC_BYTE_4_SGE_SHIFT_S, 0);
2819 if (attr_mask & IB_QP_ACCESS_FLAGS) {
2820 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
2821 !!(attr->qp_access_flags & IB_ACCESS_REMOTE_READ));
2822 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
2825 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
2826 !!(attr->qp_access_flags &
2827 IB_ACCESS_REMOTE_WRITE));
2828 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
2831 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
2832 !!(attr->qp_access_flags &
2833 IB_ACCESS_REMOTE_ATOMIC));
2834 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
2837 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
2838 !!(hr_qp->access_flags & IB_ACCESS_REMOTE_READ));
2839 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
2842 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
2843 !!(hr_qp->access_flags & IB_ACCESS_REMOTE_WRITE));
2844 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
2847 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
2848 !!(hr_qp->access_flags & IB_ACCESS_REMOTE_ATOMIC));
2849 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
2853 roce_set_field(context->byte_20_smac_sgid_idx,
2854 V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S,
2855 ilog2((unsigned int)hr_qp->sq.wqe_cnt));
2856 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
2857 V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S, 0);
2859 roce_set_field(context->byte_20_smac_sgid_idx,
2860 V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S,
2861 ilog2((unsigned int)hr_qp->rq.wqe_cnt));
2862 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
2863 V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S, 0);
2865 roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
2866 V2_QPC_BYTE_16_PD_S, to_hr_pd(ibqp->pd)->pdn);
2867 roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
2868 V2_QPC_BYTE_16_PD_S, 0);
2870 roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
2871 V2_QPC_BYTE_80_RX_CQN_S, to_hr_cq(ibqp->recv_cq)->cqn);
2872 roce_set_field(qpc_mask->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
2873 V2_QPC_BYTE_80_RX_CQN_S, 0);
2875 roce_set_field(context->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
2876 V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->send_cq)->cqn);
2877 roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
2878 V2_QPC_BYTE_252_TX_CQN_S, 0);
2881 roce_set_bit(context->byte_76_srqn_op_en,
2882 V2_QPC_BYTE_76_SRQ_EN_S, 1);
2883 roce_set_bit(qpc_mask->byte_76_srqn_op_en,
2884 V2_QPC_BYTE_76_SRQ_EN_S, 0);
2885 roce_set_field(context->byte_76_srqn_op_en,
2886 V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S,
2887 to_hr_srq(ibqp->srq)->srqn);
2888 roce_set_field(qpc_mask->byte_76_srqn_op_en,
2889 V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S, 0);
2892 if (attr_mask & IB_QP_QKEY) {
2893 context->qkey_xrcd = attr->qkey;
2894 qpc_mask->qkey_xrcd = 0;
2897 roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
2898 V2_QPC_BYTE_4_SQPN_S, hr_qp->qpn);
2899 roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
2900 V2_QPC_BYTE_4_SQPN_S, 0);
2902 if (attr_mask & IB_QP_DEST_QPN) {
2903 roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
2904 V2_QPC_BYTE_56_DQPN_S, hr_qp->qpn);
2905 roce_set_field(qpc_mask->byte_56_dqpn_err,
2906 V2_QPC_BYTE_56_DQPN_M, V2_QPC_BYTE_56_DQPN_S, 0);
2908 roce_set_field(context->byte_168_irrl_idx,
2909 V2_QPC_BYTE_168_SQ_SHIFT_BAK_M,
2910 V2_QPC_BYTE_168_SQ_SHIFT_BAK_S,
2911 ilog2((unsigned int)hr_qp->sq.wqe_cnt));
2912 roce_set_field(qpc_mask->byte_168_irrl_idx,
2913 V2_QPC_BYTE_168_SQ_SHIFT_BAK_M,
2914 V2_QPC_BYTE_168_SQ_SHIFT_BAK_S, 0);
2917 static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
2918 const struct ib_qp_attr *attr, int attr_mask,
2919 struct hns_roce_v2_qp_context *context,
2920 struct hns_roce_v2_qp_context *qpc_mask)
2922 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
2923 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
2924 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
2925 struct device *dev = hr_dev->dev;
2926 dma_addr_t dma_handle_3;
2927 dma_addr_t dma_handle_2;
2928 dma_addr_t dma_handle;
2938 /* Search qp buf's mtts */
2939 mtts = hns_roce_table_find(hr_dev, &hr_dev->mr_table.mtt_table,
2940 hr_qp->mtt.first_seg, &dma_handle);
2942 dev_err(dev, "qp buf pa find failed\n");
2946 /* Search IRRL's mtts */
2947 mtts_2 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table,
2948 hr_qp->qpn, &dma_handle_2);
2950 dev_err(dev, "qp irrl_table find failed\n");
2954 /* Search TRRL's mtts */
2955 mtts_3 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.trrl_table,
2956 hr_qp->qpn, &dma_handle_3);
2958 dev_err(dev, "qp trrl_table find failed\n");
2962 if (attr_mask & IB_QP_ALT_PATH) {
2963 dev_err(dev, "INIT2RTR attr_mask (0x%x) error\n", attr_mask);
2967 dmac = (u8 *)attr->ah_attr.roce.dmac;
2968 context->wqe_sge_ba = (u32)(dma_handle >> 3);
2969 qpc_mask->wqe_sge_ba = 0;
2972 * In v2 engine, software pass context and context mask to hardware
2973 * when modifying qp. If software need modify some fields in context,
2974 * we should set all bits of the relevant fields in context mask to
2975 * 0 at the same time, else set them to 0x1.
2977 roce_set_field(context->byte_12_sq_hop, V2_QPC_BYTE_12_WQE_SGE_BA_M,
2978 V2_QPC_BYTE_12_WQE_SGE_BA_S, dma_handle >> (32 + 3));
2979 roce_set_field(qpc_mask->byte_12_sq_hop, V2_QPC_BYTE_12_WQE_SGE_BA_M,
2980 V2_QPC_BYTE_12_WQE_SGE_BA_S, 0);
2982 roce_set_field(context->byte_12_sq_hop, V2_QPC_BYTE_12_SQ_HOP_NUM_M,
2983 V2_QPC_BYTE_12_SQ_HOP_NUM_S,
2984 hr_dev->caps.mtt_hop_num == HNS_ROCE_HOP_NUM_0 ?
2985 0 : hr_dev->caps.mtt_hop_num);
2986 roce_set_field(qpc_mask->byte_12_sq_hop, V2_QPC_BYTE_12_SQ_HOP_NUM_M,
2987 V2_QPC_BYTE_12_SQ_HOP_NUM_S, 0);
2989 roce_set_field(context->byte_20_smac_sgid_idx,
2990 V2_QPC_BYTE_20_SGE_HOP_NUM_M,
2991 V2_QPC_BYTE_20_SGE_HOP_NUM_S,
2992 ((ibqp->qp_type == IB_QPT_GSI) || hr_qp->sq.max_gs > 2) ?
2993 hr_dev->caps.mtt_hop_num : 0);
2994 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
2995 V2_QPC_BYTE_20_SGE_HOP_NUM_M,
2996 V2_QPC_BYTE_20_SGE_HOP_NUM_S, 0);
2998 roce_set_field(context->byte_20_smac_sgid_idx,
2999 V2_QPC_BYTE_20_RQ_HOP_NUM_M,
3000 V2_QPC_BYTE_20_RQ_HOP_NUM_S,
3001 hr_dev->caps.mtt_hop_num == HNS_ROCE_HOP_NUM_0 ?
3002 0 : hr_dev->caps.mtt_hop_num);
3003 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
3004 V2_QPC_BYTE_20_RQ_HOP_NUM_M,
3005 V2_QPC_BYTE_20_RQ_HOP_NUM_S, 0);
3007 roce_set_field(context->byte_16_buf_ba_pg_sz,
3008 V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_M,
3009 V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_S,
3010 hr_dev->caps.mtt_ba_pg_sz + PG_SHIFT_OFFSET);
3011 roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz,
3012 V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_M,
3013 V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_S, 0);
3015 roce_set_field(context->byte_16_buf_ba_pg_sz,
3016 V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M,
3017 V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S,
3018 hr_dev->caps.mtt_buf_pg_sz + PG_SHIFT_OFFSET);
3019 roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz,
3020 V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M,
3021 V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S, 0);
3023 roce_set_field(context->byte_80_rnr_rx_cqn,
3024 V2_QPC_BYTE_80_MIN_RNR_TIME_M,
3025 V2_QPC_BYTE_80_MIN_RNR_TIME_S, attr->min_rnr_timer);
3026 roce_set_field(qpc_mask->byte_80_rnr_rx_cqn,
3027 V2_QPC_BYTE_80_MIN_RNR_TIME_M,
3028 V2_QPC_BYTE_80_MIN_RNR_TIME_S, 0);
3030 page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
3031 context->rq_cur_blk_addr = (u32)(mtts[hr_qp->rq.offset / page_size]
3032 >> PAGE_ADDR_SHIFT);
3033 qpc_mask->rq_cur_blk_addr = 0;
3035 roce_set_field(context->byte_92_srq_info,
3036 V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_M,
3037 V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_S,
3038 mtts[hr_qp->rq.offset / page_size]
3039 >> (32 + PAGE_ADDR_SHIFT));
3040 roce_set_field(qpc_mask->byte_92_srq_info,
3041 V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_M,
3042 V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_S, 0);
3044 context->rq_nxt_blk_addr = (u32)(mtts[hr_qp->rq.offset / page_size + 1]
3045 >> PAGE_ADDR_SHIFT);
3046 qpc_mask->rq_nxt_blk_addr = 0;
3048 roce_set_field(context->byte_104_rq_sge,
3049 V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M,
3050 V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S,
3051 mtts[hr_qp->rq.offset / page_size + 1]
3052 >> (32 + PAGE_ADDR_SHIFT));
3053 roce_set_field(qpc_mask->byte_104_rq_sge,
3054 V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M,
3055 V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S, 0);
3057 roce_set_field(context->byte_108_rx_reqepsn,
3058 V2_QPC_BYTE_108_RX_REQ_EPSN_M,
3059 V2_QPC_BYTE_108_RX_REQ_EPSN_S, attr->rq_psn);
3060 roce_set_field(qpc_mask->byte_108_rx_reqepsn,
3061 V2_QPC_BYTE_108_RX_REQ_EPSN_M,
3062 V2_QPC_BYTE_108_RX_REQ_EPSN_S, 0);
3064 roce_set_field(context->byte_132_trrl, V2_QPC_BYTE_132_TRRL_BA_M,
3065 V2_QPC_BYTE_132_TRRL_BA_S, dma_handle_3 >> 4);
3066 roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_BA_M,
3067 V2_QPC_BYTE_132_TRRL_BA_S, 0);
3068 context->trrl_ba = (u32)(dma_handle_3 >> (16 + 4));
3069 qpc_mask->trrl_ba = 0;
3070 roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_TRRL_BA_M,
3071 V2_QPC_BYTE_140_TRRL_BA_S,
3072 (u32)(dma_handle_3 >> (32 + 16 + 4)));
3073 roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_TRRL_BA_M,
3074 V2_QPC_BYTE_140_TRRL_BA_S, 0);
3076 context->irrl_ba = (u32)(dma_handle_2 >> 6);
3077 qpc_mask->irrl_ba = 0;
3078 roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_IRRL_BA_M,
3079 V2_QPC_BYTE_208_IRRL_BA_S,
3080 dma_handle_2 >> (32 + 6));
3081 roce_set_field(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_IRRL_BA_M,
3082 V2_QPC_BYTE_208_IRRL_BA_S, 0);
3084 roce_set_bit(context->byte_208_irrl, V2_QPC_BYTE_208_RMT_E2E_S, 1);
3085 roce_set_bit(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_RMT_E2E_S, 0);
3087 roce_set_bit(context->byte_252_err_txcqn, V2_QPC_BYTE_252_SIG_TYPE_S,
3088 hr_qp->sq_signal_bits);
3089 roce_set_bit(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_SIG_TYPE_S,
3092 port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) : hr_qp->port;
3094 smac = (u8 *)hr_dev->dev_addr[port];
3095 /* when dmac equals smac or loop_idc is 1, it should loopback */
3096 if (ether_addr_equal_unaligned(dmac, smac) ||
3097 hr_dev->loop_idc == 0x1) {
3098 roce_set_bit(context->byte_28_at_fl, V2_QPC_BYTE_28_LBI_S, 1);
3099 roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_LBI_S, 0);
3102 if ((attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) &&
3103 attr->max_dest_rd_atomic) {
3104 roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
3105 V2_QPC_BYTE_140_RR_MAX_S,
3106 fls(attr->max_dest_rd_atomic - 1));
3107 roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
3108 V2_QPC_BYTE_140_RR_MAX_S, 0);
3111 if (attr_mask & IB_QP_DEST_QPN) {
3112 roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
3113 V2_QPC_BYTE_56_DQPN_S, attr->dest_qp_num);
3114 roce_set_field(qpc_mask->byte_56_dqpn_err,
3115 V2_QPC_BYTE_56_DQPN_M, V2_QPC_BYTE_56_DQPN_S, 0);
3118 /* Configure GID index */
3119 port_num = rdma_ah_get_port_num(&attr->ah_attr);
3120 roce_set_field(context->byte_20_smac_sgid_idx,
3121 V2_QPC_BYTE_20_SGID_IDX_M,
3122 V2_QPC_BYTE_20_SGID_IDX_S,
3123 hns_get_gid_index(hr_dev, port_num - 1,
3125 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
3126 V2_QPC_BYTE_20_SGID_IDX_M,
3127 V2_QPC_BYTE_20_SGID_IDX_S, 0);
3128 memcpy(&(context->dmac), dmac, 4);
3129 roce_set_field(context->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M,
3130 V2_QPC_BYTE_52_DMAC_S, *((u16 *)(&dmac[4])));
3132 roce_set_field(qpc_mask->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M,
3133 V2_QPC_BYTE_52_DMAC_S, 0);
3135 roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
3136 V2_QPC_BYTE_56_LP_PKTN_INI_S, 4);
3137 roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
3138 V2_QPC_BYTE_56_LP_PKTN_INI_S, 0);
3140 roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_HOP_LIMIT_M,
3141 V2_QPC_BYTE_24_HOP_LIMIT_S, grh->hop_limit);
3142 roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_HOP_LIMIT_M,
3143 V2_QPC_BYTE_24_HOP_LIMIT_S, 0);
3145 roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_FL_M,
3146 V2_QPC_BYTE_28_FL_S, grh->flow_label);
3147 roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_FL_M,
3148 V2_QPC_BYTE_28_FL_S, 0);
3150 roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
3151 V2_QPC_BYTE_24_TC_S, grh->traffic_class);
3152 roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
3153 V2_QPC_BYTE_24_TC_S, 0);
3155 if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_UD)
3156 roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
3157 V2_QPC_BYTE_24_MTU_S, IB_MTU_4096);
3158 else if (attr_mask & IB_QP_PATH_MTU)
3159 roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
3160 V2_QPC_BYTE_24_MTU_S, attr->path_mtu);
3162 roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
3163 V2_QPC_BYTE_24_MTU_S, 0);
3165 memcpy(context->dgid, grh->dgid.raw, sizeof(grh->dgid.raw));
3166 memset(qpc_mask->dgid, 0, sizeof(grh->dgid.raw));
3168 roce_set_field(context->byte_84_rq_ci_pi,
3169 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
3170 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, hr_qp->rq.head);
3171 roce_set_field(qpc_mask->byte_84_rq_ci_pi,
3172 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
3173 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
3175 roce_set_field(qpc_mask->byte_84_rq_ci_pi,
3176 V2_QPC_BYTE_84_RQ_CONSUMER_IDX_M,
3177 V2_QPC_BYTE_84_RQ_CONSUMER_IDX_S, 0);
3178 roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
3179 V2_QPC_BYTE_108_RX_REQ_PSN_ERR_S, 0);
3180 roce_set_field(qpc_mask->byte_96_rx_reqmsn, V2_QPC_BYTE_96_RX_REQ_MSN_M,
3181 V2_QPC_BYTE_96_RX_REQ_MSN_S, 0);
3182 roce_set_field(qpc_mask->byte_108_rx_reqepsn,
3183 V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_M,
3184 V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_S, 0);
3186 context->rq_rnr_timer = 0;
3187 qpc_mask->rq_rnr_timer = 0;
3189 roce_set_field(context->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M,
3190 V2_QPC_BYTE_152_RAQ_PSN_S, attr->rq_psn - 1);
3191 roce_set_field(qpc_mask->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M,
3192 V2_QPC_BYTE_152_RAQ_PSN_S, 0);
3194 roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_HEAD_MAX_M,
3195 V2_QPC_BYTE_132_TRRL_HEAD_MAX_S, 0);
3196 roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_TAIL_MAX_M,
3197 V2_QPC_BYTE_132_TRRL_TAIL_MAX_S, 0);
3199 roce_set_field(context->byte_168_irrl_idx,
3200 V2_QPC_BYTE_168_LP_SGEN_INI_M,
3201 V2_QPC_BYTE_168_LP_SGEN_INI_S, 3);
3202 roce_set_field(qpc_mask->byte_168_irrl_idx,
3203 V2_QPC_BYTE_168_LP_SGEN_INI_M,
3204 V2_QPC_BYTE_168_LP_SGEN_INI_S, 0);
3206 roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
3207 V2_QPC_BYTE_28_SL_S, rdma_ah_get_sl(&attr->ah_attr));
3208 roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
3209 V2_QPC_BYTE_28_SL_S, 0);
3210 hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
3215 static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
3216 const struct ib_qp_attr *attr, int attr_mask,
3217 struct hns_roce_v2_qp_context *context,
3218 struct hns_roce_v2_qp_context *qpc_mask)
3220 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3221 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3222 struct device *dev = hr_dev->dev;
3223 dma_addr_t dma_handle;
3227 /* Search qp buf's mtts */
3228 mtts = hns_roce_table_find(hr_dev, &hr_dev->mr_table.mtt_table,
3229 hr_qp->mtt.first_seg, &dma_handle);
3231 dev_err(dev, "qp buf pa find failed\n");
3235 /* Not support alternate path and path migration */
3236 if ((attr_mask & IB_QP_ALT_PATH) ||
3237 (attr_mask & IB_QP_PATH_MIG_STATE)) {
3238 dev_err(dev, "RTR2RTS attr_mask (0x%x)error\n", attr_mask);
3243 * In v2 engine, software pass context and context mask to hardware
3244 * when modifying qp. If software need modify some fields in context,
3245 * we should set all bits of the relevant fields in context mask to
3246 * 0 at the same time, else set them to 0x1.
3248 roce_set_field(context->byte_60_qpst_mapid,
3249 V2_QPC_BYTE_60_RTY_NUM_INI_BAK_M,
3250 V2_QPC_BYTE_60_RTY_NUM_INI_BAK_S, attr->retry_cnt);
3251 roce_set_field(qpc_mask->byte_60_qpst_mapid,
3252 V2_QPC_BYTE_60_RTY_NUM_INI_BAK_M,
3253 V2_QPC_BYTE_60_RTY_NUM_INI_BAK_S, 0);
3255 context->sq_cur_blk_addr = (u32)(mtts[0] >> PAGE_ADDR_SHIFT);
3256 roce_set_field(context->byte_168_irrl_idx,
3257 V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M,
3258 V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S,
3259 mtts[0] >> (32 + PAGE_ADDR_SHIFT));
3260 qpc_mask->sq_cur_blk_addr = 0;
3261 roce_set_field(qpc_mask->byte_168_irrl_idx,
3262 V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M,
3263 V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S, 0);
3265 page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
3266 context->sq_cur_sge_blk_addr =
3267 ((ibqp->qp_type == IB_QPT_GSI) || hr_qp->sq.max_gs > 2) ?
3268 ((u32)(mtts[hr_qp->sge.offset / page_size]
3269 >> PAGE_ADDR_SHIFT)) : 0;
3270 roce_set_field(context->byte_184_irrl_idx,
3271 V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M,
3272 V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S,
3273 ((ibqp->qp_type == IB_QPT_GSI) || hr_qp->sq.max_gs > 2) ?
3274 (mtts[hr_qp->sge.offset / page_size] >>
3275 (32 + PAGE_ADDR_SHIFT)) : 0);
3276 qpc_mask->sq_cur_sge_blk_addr = 0;
3277 roce_set_field(qpc_mask->byte_184_irrl_idx,
3278 V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M,
3279 V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S, 0);
3281 context->rx_sq_cur_blk_addr = (u32)(mtts[0] >> PAGE_ADDR_SHIFT);
3282 roce_set_field(context->byte_232_irrl_sge,
3283 V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M,
3284 V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S,
3285 mtts[0] >> (32 + PAGE_ADDR_SHIFT));
3286 qpc_mask->rx_sq_cur_blk_addr = 0;
3287 roce_set_field(qpc_mask->byte_232_irrl_sge,
3288 V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M,
3289 V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S, 0);
3292 * Set some fields in context to zero, Because the default values
3293 * of all fields in context are zero, we need not set them to 0 again.
3294 * but we should set the relevant fields of context mask to 0.
3296 roce_set_field(qpc_mask->byte_232_irrl_sge,
3297 V2_QPC_BYTE_232_IRRL_SGE_IDX_M,
3298 V2_QPC_BYTE_232_IRRL_SGE_IDX_S, 0);
3300 roce_set_field(qpc_mask->byte_240_irrl_tail,
3301 V2_QPC_BYTE_240_RX_ACK_MSN_M,
3302 V2_QPC_BYTE_240_RX_ACK_MSN_S, 0);
3304 roce_set_field(context->byte_244_rnr_rxack,
3305 V2_QPC_BYTE_244_RX_ACK_EPSN_M,
3306 V2_QPC_BYTE_244_RX_ACK_EPSN_S, attr->sq_psn);
3307 roce_set_field(qpc_mask->byte_244_rnr_rxack,
3308 V2_QPC_BYTE_244_RX_ACK_EPSN_M,
3309 V2_QPC_BYTE_244_RX_ACK_EPSN_S, 0);
3311 roce_set_field(qpc_mask->byte_248_ack_psn,
3312 V2_QPC_BYTE_248_ACK_LAST_OPTYPE_M,
3313 V2_QPC_BYTE_248_ACK_LAST_OPTYPE_S, 0);
3314 roce_set_bit(qpc_mask->byte_248_ack_psn,
3315 V2_QPC_BYTE_248_IRRL_PSN_VLD_S, 0);
3316 roce_set_field(qpc_mask->byte_248_ack_psn,
3317 V2_QPC_BYTE_248_IRRL_PSN_M,
3318 V2_QPC_BYTE_248_IRRL_PSN_S, 0);
3320 roce_set_field(qpc_mask->byte_240_irrl_tail,
3321 V2_QPC_BYTE_240_IRRL_TAIL_REAL_M,
3322 V2_QPC_BYTE_240_IRRL_TAIL_REAL_S, 0);
3324 roce_set_field(context->byte_220_retry_psn_msn,
3325 V2_QPC_BYTE_220_RETRY_MSG_PSN_M,
3326 V2_QPC_BYTE_220_RETRY_MSG_PSN_S, attr->sq_psn);
3327 roce_set_field(qpc_mask->byte_220_retry_psn_msn,
3328 V2_QPC_BYTE_220_RETRY_MSG_PSN_M,
3329 V2_QPC_BYTE_220_RETRY_MSG_PSN_S, 0);
3331 roce_set_field(context->byte_224_retry_msg,
3332 V2_QPC_BYTE_224_RETRY_MSG_PSN_M,
3333 V2_QPC_BYTE_224_RETRY_MSG_PSN_S, attr->sq_psn >> 16);
3334 roce_set_field(qpc_mask->byte_224_retry_msg,
3335 V2_QPC_BYTE_224_RETRY_MSG_PSN_M,
3336 V2_QPC_BYTE_224_RETRY_MSG_PSN_S, 0);
3338 roce_set_field(context->byte_224_retry_msg,
3339 V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M,
3340 V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S, attr->sq_psn);
3341 roce_set_field(qpc_mask->byte_224_retry_msg,
3342 V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M,
3343 V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S, 0);
3345 roce_set_field(qpc_mask->byte_220_retry_psn_msn,
3346 V2_QPC_BYTE_220_RETRY_MSG_MSN_M,
3347 V2_QPC_BYTE_220_RETRY_MSG_MSN_S, 0);
3349 roce_set_bit(qpc_mask->byte_248_ack_psn,
3350 V2_QPC_BYTE_248_RNR_RETRY_FLAG_S, 0);
3352 roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_CHECK_FLG_M,
3353 V2_QPC_BYTE_212_CHECK_FLG_S, 0);
3355 roce_set_field(context->byte_212_lsn, V2_QPC_BYTE_212_RETRY_CNT_M,
3356 V2_QPC_BYTE_212_RETRY_CNT_S, attr->retry_cnt);
3357 roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_RETRY_CNT_M,
3358 V2_QPC_BYTE_212_RETRY_CNT_S, 0);
3360 roce_set_field(context->byte_212_lsn, V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
3361 V2_QPC_BYTE_212_RETRY_NUM_INIT_S, attr->retry_cnt);
3362 roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
3363 V2_QPC_BYTE_212_RETRY_NUM_INIT_S, 0);
3365 roce_set_field(context->byte_244_rnr_rxack,
3366 V2_QPC_BYTE_244_RNR_NUM_INIT_M,
3367 V2_QPC_BYTE_244_RNR_NUM_INIT_S, attr->rnr_retry);
3368 roce_set_field(qpc_mask->byte_244_rnr_rxack,
3369 V2_QPC_BYTE_244_RNR_NUM_INIT_M,
3370 V2_QPC_BYTE_244_RNR_NUM_INIT_S, 0);
3372 roce_set_field(context->byte_244_rnr_rxack, V2_QPC_BYTE_244_RNR_CNT_M,
3373 V2_QPC_BYTE_244_RNR_CNT_S, attr->rnr_retry);
3374 roce_set_field(qpc_mask->byte_244_rnr_rxack, V2_QPC_BYTE_244_RNR_CNT_M,
3375 V2_QPC_BYTE_244_RNR_CNT_S, 0);
3377 roce_set_field(context->byte_212_lsn, V2_QPC_BYTE_212_LSN_M,
3378 V2_QPC_BYTE_212_LSN_S, 0x100);
3379 roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_LSN_M,
3380 V2_QPC_BYTE_212_LSN_S, 0);
3382 if (attr_mask & IB_QP_TIMEOUT) {
3383 roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_AT_M,
3384 V2_QPC_BYTE_28_AT_S, attr->timeout);
3385 roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_AT_M,
3386 V2_QPC_BYTE_28_AT_S, 0);
3389 roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
3390 V2_QPC_BYTE_28_SL_S,
3391 rdma_ah_get_sl(&attr->ah_attr));
3392 roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
3393 V2_QPC_BYTE_28_SL_S, 0);
3394 hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
3396 roce_set_field(context->byte_172_sq_psn, V2_QPC_BYTE_172_SQ_CUR_PSN_M,
3397 V2_QPC_BYTE_172_SQ_CUR_PSN_S, attr->sq_psn);
3398 roce_set_field(qpc_mask->byte_172_sq_psn, V2_QPC_BYTE_172_SQ_CUR_PSN_M,
3399 V2_QPC_BYTE_172_SQ_CUR_PSN_S, 0);
3401 roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_IRRL_HEAD_M,
3402 V2_QPC_BYTE_196_IRRL_HEAD_S, 0);
3403 roce_set_field(context->byte_196_sq_psn, V2_QPC_BYTE_196_SQ_MAX_PSN_M,
3404 V2_QPC_BYTE_196_SQ_MAX_PSN_S, attr->sq_psn);
3405 roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_SQ_MAX_PSN_M,
3406 V2_QPC_BYTE_196_SQ_MAX_PSN_S, 0);
3408 if ((attr_mask & IB_QP_MAX_QP_RD_ATOMIC) && attr->max_rd_atomic) {
3409 roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_SR_MAX_M,
3410 V2_QPC_BYTE_208_SR_MAX_S,
3411 fls(attr->max_rd_atomic - 1));
3412 roce_set_field(qpc_mask->byte_208_irrl,
3413 V2_QPC_BYTE_208_SR_MAX_M,
3414 V2_QPC_BYTE_208_SR_MAX_S, 0);
3419 static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
3420 const struct ib_qp_attr *attr,
3421 int attr_mask, enum ib_qp_state cur_state,
3422 enum ib_qp_state new_state)
3424 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3425 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3426 struct hns_roce_v2_qp_context *context;
3427 struct hns_roce_v2_qp_context *qpc_mask;
3428 struct device *dev = hr_dev->dev;
3431 context = kcalloc(2, sizeof(*context), GFP_KERNEL);
3435 qpc_mask = context + 1;
3437 * In v2 engine, software pass context and context mask to hardware
3438 * when modifying qp. If software need modify some fields in context,
3439 * we should set all bits of the relevant fields in context mask to
3440 * 0 at the same time, else set them to 0x1.
3442 memset(qpc_mask, 0xff, sizeof(*qpc_mask));
3443 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
3444 modify_qp_reset_to_init(ibqp, attr, attr_mask, context,
3446 } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
3447 modify_qp_init_to_init(ibqp, attr, attr_mask, context,
3449 } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
3450 ret = modify_qp_init_to_rtr(ibqp, attr, attr_mask, context,
3454 } else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) {
3455 ret = modify_qp_rtr_to_rts(ibqp, attr, attr_mask, context,
3459 } else if ((cur_state == IB_QPS_RTS && new_state == IB_QPS_RTS) ||
3460 (cur_state == IB_QPS_SQE && new_state == IB_QPS_RTS) ||
3461 (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD) ||
3462 (cur_state == IB_QPS_SQD && new_state == IB_QPS_SQD) ||
3463 (cur_state == IB_QPS_SQD && new_state == IB_QPS_RTS) ||
3464 (cur_state == IB_QPS_INIT && new_state == IB_QPS_RESET) ||
3465 (cur_state == IB_QPS_RTR && new_state == IB_QPS_RESET) ||
3466 (cur_state == IB_QPS_RTS && new_state == IB_QPS_RESET) ||
3467 (cur_state == IB_QPS_ERR && new_state == IB_QPS_RESET) ||
3468 (cur_state == IB_QPS_INIT && new_state == IB_QPS_ERR) ||
3469 (cur_state == IB_QPS_RTR && new_state == IB_QPS_ERR) ||
3470 (cur_state == IB_QPS_RTS && new_state == IB_QPS_ERR) ||
3471 (cur_state == IB_QPS_SQD && new_state == IB_QPS_ERR) ||
3472 (cur_state == IB_QPS_SQE && new_state == IB_QPS_ERR) ||
3473 (cur_state == IB_QPS_ERR && new_state == IB_QPS_ERR)) {
3477 dev_err(dev, "Illegal state for QP!\n");
3481 if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC))
3482 set_access_flags(hr_qp, context, qpc_mask, attr, attr_mask);
3484 /* Every status migrate must change state */
3485 roce_set_field(context->byte_60_qpst_mapid, V2_QPC_BYTE_60_QP_ST_M,
3486 V2_QPC_BYTE_60_QP_ST_S, new_state);
3487 roce_set_field(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_QP_ST_M,
3488 V2_QPC_BYTE_60_QP_ST_S, 0);
3490 /* SW pass context to HW */
3491 ret = hns_roce_v2_qp_modify(hr_dev, &hr_qp->mtt, cur_state, new_state,
3494 dev_err(dev, "hns_roce_qp_modify failed(%d)\n", ret);
3498 hr_qp->state = new_state;
3500 if (attr_mask & IB_QP_ACCESS_FLAGS)
3501 hr_qp->atomic_rd_en = attr->qp_access_flags;
3503 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
3504 hr_qp->resp_depth = attr->max_dest_rd_atomic;
3505 if (attr_mask & IB_QP_PORT) {
3506 hr_qp->port = attr->port_num - 1;
3507 hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
3510 if (new_state == IB_QPS_RESET && !ibqp->uobject) {
3511 hns_roce_v2_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn,
3512 ibqp->srq ? to_hr_srq(ibqp->srq) : NULL);
3513 if (ibqp->send_cq != ibqp->recv_cq)
3514 hns_roce_v2_cq_clean(to_hr_cq(ibqp->send_cq),
3521 hr_qp->sq_next_wqe = 0;
3522 hr_qp->next_sge = 0;
3523 if (hr_qp->rq.wqe_cnt)
3524 *hr_qp->rdb.db_record = 0;
3532 static inline enum ib_qp_state to_ib_qp_st(enum hns_roce_v2_qp_state state)
3535 case HNS_ROCE_QP_ST_RST: return IB_QPS_RESET;
3536 case HNS_ROCE_QP_ST_INIT: return IB_QPS_INIT;
3537 case HNS_ROCE_QP_ST_RTR: return IB_QPS_RTR;
3538 case HNS_ROCE_QP_ST_RTS: return IB_QPS_RTS;
3539 case HNS_ROCE_QP_ST_SQ_DRAINING:
3540 case HNS_ROCE_QP_ST_SQD: return IB_QPS_SQD;
3541 case HNS_ROCE_QP_ST_SQER: return IB_QPS_SQE;
3542 case HNS_ROCE_QP_ST_ERR: return IB_QPS_ERR;
3547 static int hns_roce_v2_query_qpc(struct hns_roce_dev *hr_dev,
3548 struct hns_roce_qp *hr_qp,
3549 struct hns_roce_v2_qp_context *hr_context)
3551 struct hns_roce_cmd_mailbox *mailbox;
3554 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
3555 if (IS_ERR(mailbox))
3556 return PTR_ERR(mailbox);
3558 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, hr_qp->qpn, 0,
3559 HNS_ROCE_CMD_QUERY_QPC,
3560 HNS_ROCE_CMD_TIMEOUT_MSECS);
3562 dev_err(hr_dev->dev, "QUERY QP cmd process error\n");
3566 memcpy(hr_context, mailbox->buf, sizeof(*hr_context));
3569 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
3573 static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
3575 struct ib_qp_init_attr *qp_init_attr)
3577 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3578 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3579 struct hns_roce_v2_qp_context *context;
3580 struct device *dev = hr_dev->dev;
3585 context = kzalloc(sizeof(*context), GFP_KERNEL);
3589 memset(qp_attr, 0, sizeof(*qp_attr));
3590 memset(qp_init_attr, 0, sizeof(*qp_init_attr));
3592 mutex_lock(&hr_qp->mutex);
3594 if (hr_qp->state == IB_QPS_RESET) {
3595 qp_attr->qp_state = IB_QPS_RESET;
3600 ret = hns_roce_v2_query_qpc(hr_dev, hr_qp, context);
3602 dev_err(dev, "query qpc error\n");
3607 state = roce_get_field(context->byte_60_qpst_mapid,
3608 V2_QPC_BYTE_60_QP_ST_M, V2_QPC_BYTE_60_QP_ST_S);
3609 tmp_qp_state = to_ib_qp_st((enum hns_roce_v2_qp_state)state);
3610 if (tmp_qp_state == -1) {
3611 dev_err(dev, "Illegal ib_qp_state\n");
3615 hr_qp->state = (u8)tmp_qp_state;
3616 qp_attr->qp_state = (enum ib_qp_state)hr_qp->state;
3617 qp_attr->path_mtu = (enum ib_mtu)roce_get_field(context->byte_24_mtu_tc,
3618 V2_QPC_BYTE_24_MTU_M,
3619 V2_QPC_BYTE_24_MTU_S);
3620 qp_attr->path_mig_state = IB_MIG_ARMED;
3621 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
3622 if (hr_qp->ibqp.qp_type == IB_QPT_UD)
3623 qp_attr->qkey = V2_QKEY_VAL;
3625 qp_attr->rq_psn = roce_get_field(context->byte_108_rx_reqepsn,
3626 V2_QPC_BYTE_108_RX_REQ_EPSN_M,
3627 V2_QPC_BYTE_108_RX_REQ_EPSN_S);
3628 qp_attr->sq_psn = (u32)roce_get_field(context->byte_172_sq_psn,
3629 V2_QPC_BYTE_172_SQ_CUR_PSN_M,
3630 V2_QPC_BYTE_172_SQ_CUR_PSN_S);
3631 qp_attr->dest_qp_num = (u8)roce_get_field(context->byte_56_dqpn_err,
3632 V2_QPC_BYTE_56_DQPN_M,
3633 V2_QPC_BYTE_56_DQPN_S);
3634 qp_attr->qp_access_flags = ((roce_get_bit(context->byte_76_srqn_op_en,
3635 V2_QPC_BYTE_76_RRE_S)) << 2) |
3636 ((roce_get_bit(context->byte_76_srqn_op_en,
3637 V2_QPC_BYTE_76_RWE_S)) << 1) |
3638 ((roce_get_bit(context->byte_76_srqn_op_en,
3639 V2_QPC_BYTE_76_ATE_S)) << 3);
3640 if (hr_qp->ibqp.qp_type == IB_QPT_RC ||
3641 hr_qp->ibqp.qp_type == IB_QPT_UC) {
3642 struct ib_global_route *grh =
3643 rdma_ah_retrieve_grh(&qp_attr->ah_attr);
3645 rdma_ah_set_sl(&qp_attr->ah_attr,
3646 roce_get_field(context->byte_28_at_fl,
3647 V2_QPC_BYTE_28_SL_M,
3648 V2_QPC_BYTE_28_SL_S));
3649 grh->flow_label = roce_get_field(context->byte_28_at_fl,
3650 V2_QPC_BYTE_28_FL_M,
3651 V2_QPC_BYTE_28_FL_S);
3652 grh->sgid_index = roce_get_field(context->byte_20_smac_sgid_idx,
3653 V2_QPC_BYTE_20_SGID_IDX_M,
3654 V2_QPC_BYTE_20_SGID_IDX_S);
3655 grh->hop_limit = roce_get_field(context->byte_24_mtu_tc,
3656 V2_QPC_BYTE_24_HOP_LIMIT_M,
3657 V2_QPC_BYTE_24_HOP_LIMIT_S);
3658 grh->traffic_class = roce_get_field(context->byte_24_mtu_tc,
3659 V2_QPC_BYTE_24_TC_M,
3660 V2_QPC_BYTE_24_TC_S);
3662 memcpy(grh->dgid.raw, context->dgid, sizeof(grh->dgid.raw));
3665 qp_attr->port_num = hr_qp->port + 1;
3666 qp_attr->sq_draining = 0;
3667 qp_attr->max_rd_atomic = 1 << roce_get_field(context->byte_208_irrl,
3668 V2_QPC_BYTE_208_SR_MAX_M,
3669 V2_QPC_BYTE_208_SR_MAX_S);
3670 qp_attr->max_dest_rd_atomic = 1 << roce_get_field(context->byte_140_raq,
3671 V2_QPC_BYTE_140_RR_MAX_M,
3672 V2_QPC_BYTE_140_RR_MAX_S);
3673 qp_attr->min_rnr_timer = (u8)roce_get_field(context->byte_80_rnr_rx_cqn,
3674 V2_QPC_BYTE_80_MIN_RNR_TIME_M,
3675 V2_QPC_BYTE_80_MIN_RNR_TIME_S);
3676 qp_attr->timeout = (u8)roce_get_field(context->byte_28_at_fl,
3677 V2_QPC_BYTE_28_AT_M,
3678 V2_QPC_BYTE_28_AT_S);
3679 qp_attr->retry_cnt = roce_get_field(context->byte_212_lsn,
3680 V2_QPC_BYTE_212_RETRY_CNT_M,
3681 V2_QPC_BYTE_212_RETRY_CNT_S);
3682 qp_attr->rnr_retry = context->rq_rnr_timer;
3685 qp_attr->cur_qp_state = qp_attr->qp_state;
3686 qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
3687 qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs;
3689 if (!ibqp->uobject) {
3690 qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
3691 qp_attr->cap.max_send_sge = hr_qp->sq.max_gs;
3693 qp_attr->cap.max_send_wr = 0;
3694 qp_attr->cap.max_send_sge = 0;
3697 qp_init_attr->cap = qp_attr->cap;
3700 mutex_unlock(&hr_qp->mutex);
3705 static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
3706 struct hns_roce_qp *hr_qp,
3709 struct hns_roce_cq *send_cq, *recv_cq;
3710 struct device *dev = hr_dev->dev;
3713 if (hr_qp->ibqp.qp_type == IB_QPT_RC && hr_qp->state != IB_QPS_RESET) {
3714 /* Modify qp to reset before destroying qp */
3715 ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, NULL, 0,
3716 hr_qp->state, IB_QPS_RESET);
3718 dev_err(dev, "modify QP %06lx to ERR failed.\n",
3724 send_cq = to_hr_cq(hr_qp->ibqp.send_cq);
3725 recv_cq = to_hr_cq(hr_qp->ibqp.recv_cq);
3727 hns_roce_lock_cqs(send_cq, recv_cq);
3730 __hns_roce_v2_cq_clean(recv_cq, hr_qp->qpn, hr_qp->ibqp.srq ?
3731 to_hr_srq(hr_qp->ibqp.srq) : NULL);
3732 if (send_cq != recv_cq)
3733 __hns_roce_v2_cq_clean(send_cq, hr_qp->qpn, NULL);
3736 hns_roce_qp_remove(hr_dev, hr_qp);
3738 hns_roce_unlock_cqs(send_cq, recv_cq);
3740 hns_roce_qp_free(hr_dev, hr_qp);
3742 /* Not special_QP, free their QPN */
3743 if ((hr_qp->ibqp.qp_type == IB_QPT_RC) ||
3744 (hr_qp->ibqp.qp_type == IB_QPT_UC) ||
3745 (hr_qp->ibqp.qp_type == IB_QPT_UD))
3746 hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1);
3748 hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt);
3751 if (hr_qp->rq.wqe_cnt && (hr_qp->rdb_en == 1))
3752 hns_roce_db_unmap_user(
3753 to_hr_ucontext(hr_qp->ibqp.uobject->context),
3755 ib_umem_release(hr_qp->umem);
3757 kfree(hr_qp->sq.wrid);
3758 kfree(hr_qp->rq.wrid);
3759 hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
3760 if (hr_qp->rq.wqe_cnt)
3761 hns_roce_free_db(hr_dev, &hr_qp->rdb);
3764 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) {
3765 kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list);
3766 kfree(hr_qp->rq_inl_buf.wqe_list);
3772 static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp)
3774 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3775 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3778 ret = hns_roce_v2_destroy_qp_common(hr_dev, hr_qp, !!ibqp->pd->uobject);
3780 dev_err(hr_dev->dev, "Destroy qp failed(%d)\n", ret);
3784 if (hr_qp->ibqp.qp_type == IB_QPT_GSI)
3785 kfree(hr_to_hr_sqp(hr_qp));
3792 static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
3794 struct hns_roce_dev *hr_dev = to_hr_dev(cq->device);
3795 struct hns_roce_v2_cq_context *cq_context;
3796 struct hns_roce_cq *hr_cq = to_hr_cq(cq);
3797 struct hns_roce_v2_cq_context *cqc_mask;
3798 struct hns_roce_cmd_mailbox *mailbox;
3801 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
3802 if (IS_ERR(mailbox))
3803 return PTR_ERR(mailbox);
3805 cq_context = mailbox->buf;
3806 cqc_mask = (struct hns_roce_v2_cq_context *)mailbox->buf + 1;
3808 memset(cqc_mask, 0xff, sizeof(*cqc_mask));
3810 roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
3811 V2_CQC_BYTE_56_CQ_MAX_CNT_M, V2_CQC_BYTE_56_CQ_MAX_CNT_S,
3813 roce_set_field(cqc_mask->byte_56_cqe_period_maxcnt,
3814 V2_CQC_BYTE_56_CQ_MAX_CNT_M, V2_CQC_BYTE_56_CQ_MAX_CNT_S,
3816 roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
3817 V2_CQC_BYTE_56_CQ_PERIOD_M, V2_CQC_BYTE_56_CQ_PERIOD_S,
3819 roce_set_field(cqc_mask->byte_56_cqe_period_maxcnt,
3820 V2_CQC_BYTE_56_CQ_PERIOD_M, V2_CQC_BYTE_56_CQ_PERIOD_S,
3823 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_cq->cqn, 1,
3824 HNS_ROCE_CMD_MODIFY_CQC,
3825 HNS_ROCE_CMD_TIMEOUT_MSECS);
3826 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
3828 dev_err(hr_dev->dev, "MODIFY CQ Failed to cmd mailbox.\n");
3833 static void set_eq_cons_index_v2(struct hns_roce_eq *eq)
3840 if (eq->type_flag == HNS_ROCE_AEQ) {
3841 roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_CMD_M,
3842 HNS_ROCE_V2_EQ_DB_CMD_S,
3843 eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ?
3844 HNS_ROCE_EQ_DB_CMD_AEQ :
3845 HNS_ROCE_EQ_DB_CMD_AEQ_ARMED);
3847 roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_TAG_M,
3848 HNS_ROCE_V2_EQ_DB_TAG_S, eq->eqn);
3850 roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_CMD_M,
3851 HNS_ROCE_V2_EQ_DB_CMD_S,
3852 eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ?
3853 HNS_ROCE_EQ_DB_CMD_CEQ :
3854 HNS_ROCE_EQ_DB_CMD_CEQ_ARMED);
3857 roce_set_field(doorbell[1], HNS_ROCE_V2_EQ_DB_PARA_M,
3858 HNS_ROCE_V2_EQ_DB_PARA_S,
3859 (eq->cons_index & HNS_ROCE_V2_CONS_IDX_M));
3861 hns_roce_write64_k(doorbell, eq->doorbell);
3864 static void hns_roce_v2_wq_catas_err_handle(struct hns_roce_dev *hr_dev,
3865 struct hns_roce_aeqe *aeqe,
3868 struct device *dev = hr_dev->dev;
3871 dev_warn(dev, "Local work queue catastrophic error.\n");
3872 sub_type = roce_get_field(aeqe->asyn, HNS_ROCE_V2_AEQE_SUB_TYPE_M,
3873 HNS_ROCE_V2_AEQE_SUB_TYPE_S);
3875 case HNS_ROCE_LWQCE_QPC_ERROR:
3876 dev_warn(dev, "QP %d, QPC error.\n", qpn);
3878 case HNS_ROCE_LWQCE_MTU_ERROR:
3879 dev_warn(dev, "QP %d, MTU error.\n", qpn);
3881 case HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR:
3882 dev_warn(dev, "QP %d, WQE BA addr error.\n", qpn);
3884 case HNS_ROCE_LWQCE_WQE_ADDR_ERROR:
3885 dev_warn(dev, "QP %d, WQE addr error.\n", qpn);
3887 case HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR:
3888 dev_warn(dev, "QP %d, WQE shift error.\n", qpn);
3891 dev_err(dev, "Unhandled sub_event type %d.\n", sub_type);
3896 static void hns_roce_v2_local_wq_access_err_handle(struct hns_roce_dev *hr_dev,
3897 struct hns_roce_aeqe *aeqe, u32 qpn)
3899 struct device *dev = hr_dev->dev;
3902 dev_warn(dev, "Local access violation work queue error.\n");
3903 sub_type = roce_get_field(aeqe->asyn, HNS_ROCE_V2_AEQE_SUB_TYPE_M,
3904 HNS_ROCE_V2_AEQE_SUB_TYPE_S);
3906 case HNS_ROCE_LAVWQE_R_KEY_VIOLATION:
3907 dev_warn(dev, "QP %d, R_key violation.\n", qpn);
3909 case HNS_ROCE_LAVWQE_LENGTH_ERROR:
3910 dev_warn(dev, "QP %d, length error.\n", qpn);
3912 case HNS_ROCE_LAVWQE_VA_ERROR:
3913 dev_warn(dev, "QP %d, VA error.\n", qpn);
3915 case HNS_ROCE_LAVWQE_PD_ERROR:
3916 dev_err(dev, "QP %d, PD error.\n", qpn);
3918 case HNS_ROCE_LAVWQE_RW_ACC_ERROR:
3919 dev_warn(dev, "QP %d, rw acc error.\n", qpn);
3921 case HNS_ROCE_LAVWQE_KEY_STATE_ERROR:
3922 dev_warn(dev, "QP %d, key state error.\n", qpn);
3924 case HNS_ROCE_LAVWQE_MR_OPERATION_ERROR:
3925 dev_warn(dev, "QP %d, MR operation error.\n", qpn);
3928 dev_err(dev, "Unhandled sub_event type %d.\n", sub_type);
3933 static void hns_roce_v2_qp_err_handle(struct hns_roce_dev *hr_dev,
3934 struct hns_roce_aeqe *aeqe,
3937 struct device *dev = hr_dev->dev;
3940 qpn = roce_get_field(aeqe->event.qp_event.qp,
3941 HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
3942 HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
3944 switch (event_type) {
3945 case HNS_ROCE_EVENT_TYPE_COMM_EST:
3946 dev_warn(dev, "Communication established.\n");
3948 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
3949 dev_warn(dev, "Send queue drained.\n");
3951 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
3952 hns_roce_v2_wq_catas_err_handle(hr_dev, aeqe, qpn);
3954 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
3955 dev_warn(dev, "Invalid request local work queue error.\n");
3957 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
3958 hns_roce_v2_local_wq_access_err_handle(hr_dev, aeqe, qpn);
3964 hns_roce_qp_event(hr_dev, qpn, event_type);
3967 static void hns_roce_v2_cq_err_handle(struct hns_roce_dev *hr_dev,
3968 struct hns_roce_aeqe *aeqe,
3971 struct device *dev = hr_dev->dev;
3974 cqn = roce_get_field(aeqe->event.cq_event.cq,
3975 HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
3976 HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
3978 switch (event_type) {
3979 case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
3980 dev_warn(dev, "CQ 0x%x access err.\n", cqn);
3982 case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
3983 dev_warn(dev, "CQ 0x%x overflow\n", cqn);
3989 hns_roce_cq_event(hr_dev, cqn, event_type);
3992 static struct hns_roce_aeqe *get_aeqe_v2(struct hns_roce_eq *eq, u32 entry)
3997 buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
3998 off = (entry & (eq->entries - 1)) * HNS_ROCE_AEQ_ENTRY_SIZE;
4000 return (struct hns_roce_aeqe *)((char *)(eq->buf_list->buf) +
4004 static struct hns_roce_aeqe *mhop_get_aeqe(struct hns_roce_eq *eq, u32 entry)
4009 buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
4011 off = (entry & (eq->entries - 1)) * HNS_ROCE_AEQ_ENTRY_SIZE;
4013 if (eq->hop_num == HNS_ROCE_HOP_NUM_0)
4014 return (struct hns_roce_aeqe *)((u8 *)(eq->bt_l0) +
4017 return (struct hns_roce_aeqe *)((u8 *)
4018 (eq->buf[off / buf_chk_sz]) + off % buf_chk_sz);
4021 static struct hns_roce_aeqe *next_aeqe_sw_v2(struct hns_roce_eq *eq)
4023 struct hns_roce_aeqe *aeqe;
4026 aeqe = get_aeqe_v2(eq, eq->cons_index);
4028 aeqe = mhop_get_aeqe(eq, eq->cons_index);
4030 return (roce_get_bit(aeqe->asyn, HNS_ROCE_V2_AEQ_AEQE_OWNER_S) ^
4031 !!(eq->cons_index & eq->entries)) ? aeqe : NULL;
4034 static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
4035 struct hns_roce_eq *eq)
4037 struct device *dev = hr_dev->dev;
4038 struct hns_roce_aeqe *aeqe;
4042 while ((aeqe = next_aeqe_sw_v2(eq))) {
4044 /* Make sure we read AEQ entry after we have checked the
4049 event_type = roce_get_field(aeqe->asyn,
4050 HNS_ROCE_V2_AEQE_EVENT_TYPE_M,
4051 HNS_ROCE_V2_AEQE_EVENT_TYPE_S);
4053 switch (event_type) {
4054 case HNS_ROCE_EVENT_TYPE_PATH_MIG:
4055 dev_warn(dev, "Path migrated succeeded.\n");
4057 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
4058 dev_warn(dev, "Path migration failed.\n");
4060 case HNS_ROCE_EVENT_TYPE_COMM_EST:
4061 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
4062 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
4063 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
4064 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
4065 hns_roce_v2_qp_err_handle(hr_dev, aeqe, event_type);
4067 case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
4068 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
4069 case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
4070 dev_warn(dev, "SRQ not support.\n");
4072 case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
4073 case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
4074 hns_roce_v2_cq_err_handle(hr_dev, aeqe, event_type);
4076 case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
4077 dev_warn(dev, "DB overflow.\n");
4079 case HNS_ROCE_EVENT_TYPE_MB:
4080 hns_roce_cmd_event(hr_dev,
4081 le16_to_cpu(aeqe->event.cmd.token),
4082 aeqe->event.cmd.status,
4083 le64_to_cpu(aeqe->event.cmd.out_param));
4085 case HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW:
4086 dev_warn(dev, "CEQ overflow.\n");
4088 case HNS_ROCE_EVENT_TYPE_FLR:
4089 dev_warn(dev, "Function level reset.\n");
4092 dev_err(dev, "Unhandled event %d on EQ %d at idx %u.\n",
4093 event_type, eq->eqn, eq->cons_index);
4100 if (eq->cons_index > (2 * eq->entries - 1)) {
4101 dev_warn(dev, "cons_index overflow, set back to 0.\n");
4106 set_eq_cons_index_v2(eq);
4110 static struct hns_roce_ceqe *get_ceqe_v2(struct hns_roce_eq *eq, u32 entry)
4115 buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
4116 off = (entry & (eq->entries - 1)) * HNS_ROCE_CEQ_ENTRY_SIZE;
4118 return (struct hns_roce_ceqe *)((char *)(eq->buf_list->buf) +
4122 static struct hns_roce_ceqe *mhop_get_ceqe(struct hns_roce_eq *eq, u32 entry)
4127 buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
4129 off = (entry & (eq->entries - 1)) * HNS_ROCE_CEQ_ENTRY_SIZE;
4131 if (eq->hop_num == HNS_ROCE_HOP_NUM_0)
4132 return (struct hns_roce_ceqe *)((u8 *)(eq->bt_l0) +
4135 return (struct hns_roce_ceqe *)((u8 *)(eq->buf[off /
4136 buf_chk_sz]) + off % buf_chk_sz);
4139 static struct hns_roce_ceqe *next_ceqe_sw_v2(struct hns_roce_eq *eq)
4141 struct hns_roce_ceqe *ceqe;
4144 ceqe = get_ceqe_v2(eq, eq->cons_index);
4146 ceqe = mhop_get_ceqe(eq, eq->cons_index);
4148 return (!!(roce_get_bit(ceqe->comp, HNS_ROCE_V2_CEQ_CEQE_OWNER_S))) ^
4149 (!!(eq->cons_index & eq->entries)) ? ceqe : NULL;
4152 static int hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev,
4153 struct hns_roce_eq *eq)
4155 struct device *dev = hr_dev->dev;
4156 struct hns_roce_ceqe *ceqe;
4160 while ((ceqe = next_ceqe_sw_v2(eq))) {
4162 /* Make sure we read CEQ entry after we have checked the
4167 cqn = roce_get_field(ceqe->comp,
4168 HNS_ROCE_V2_CEQE_COMP_CQN_M,
4169 HNS_ROCE_V2_CEQE_COMP_CQN_S);
4171 hns_roce_cq_completion(hr_dev, cqn);
4176 if (eq->cons_index > (2 * eq->entries - 1)) {
4177 dev_warn(dev, "cons_index overflow, set back to 0.\n");
4182 set_eq_cons_index_v2(eq);
4187 static irqreturn_t hns_roce_v2_msix_interrupt_eq(int irq, void *eq_ptr)
4189 struct hns_roce_eq *eq = eq_ptr;
4190 struct hns_roce_dev *hr_dev = eq->hr_dev;
4193 if (eq->type_flag == HNS_ROCE_CEQ)
4194 /* Completion event interrupt */
4195 int_work = hns_roce_v2_ceq_int(hr_dev, eq);
4197 /* Asychronous event interrupt */
4198 int_work = hns_roce_v2_aeq_int(hr_dev, eq);
4200 return IRQ_RETVAL(int_work);
4203 static irqreturn_t hns_roce_v2_msix_interrupt_abn(int irq, void *dev_id)
4205 struct hns_roce_dev *hr_dev = dev_id;
4206 struct device *dev = hr_dev->dev;
4211 /* Abnormal interrupt */
4212 int_st = roce_read(hr_dev, ROCEE_VF_ABN_INT_ST_REG);
4213 int_en = roce_read(hr_dev, ROCEE_VF_ABN_INT_EN_REG);
4215 if (roce_get_bit(int_st, HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S)) {
4216 dev_err(dev, "AEQ overflow!\n");
4218 roce_set_bit(int_st, HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S, 1);
4219 roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
4221 roce_set_bit(int_en, HNS_ROCE_V2_VF_ABN_INT_EN_S, 1);
4222 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
4225 } else if (roce_get_bit(int_st, HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S)) {
4226 dev_err(dev, "BUS ERR!\n");
4228 roce_set_bit(int_st, HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S, 1);
4229 roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
4231 roce_set_bit(int_en, HNS_ROCE_V2_VF_ABN_INT_EN_S, 1);
4232 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
4235 } else if (roce_get_bit(int_st, HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S)) {
4236 dev_err(dev, "OTHER ERR!\n");
4238 roce_set_bit(int_st, HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S, 1);
4239 roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
4241 roce_set_bit(int_en, HNS_ROCE_V2_VF_ABN_INT_EN_S, 1);
4242 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
4246 dev_err(dev, "There is no abnormal irq found!\n");
4248 return IRQ_RETVAL(int_work);
4251 static void hns_roce_v2_int_mask_enable(struct hns_roce_dev *hr_dev,
4252 int eq_num, int enable_flag)
4256 if (enable_flag == EQ_ENABLE) {
4257 for (i = 0; i < eq_num; i++)
4258 roce_write(hr_dev, ROCEE_VF_EVENT_INT_EN_REG +
4260 HNS_ROCE_V2_VF_EVENT_INT_EN_M);
4262 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG,
4263 HNS_ROCE_V2_VF_ABN_INT_EN_M);
4264 roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG,
4265 HNS_ROCE_V2_VF_ABN_INT_CFG_M);
4267 for (i = 0; i < eq_num; i++)
4268 roce_write(hr_dev, ROCEE_VF_EVENT_INT_EN_REG +
4270 HNS_ROCE_V2_VF_EVENT_INT_EN_M & 0x0);
4272 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG,
4273 HNS_ROCE_V2_VF_ABN_INT_EN_M & 0x0);
4274 roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG,
4275 HNS_ROCE_V2_VF_ABN_INT_CFG_M & 0x0);
4279 static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, int eqn)
4281 struct device *dev = hr_dev->dev;
4284 if (eqn < hr_dev->caps.num_comp_vectors)
4285 ret = hns_roce_cmd_mbox(hr_dev, 0, 0, eqn & HNS_ROCE_V2_EQN_M,
4286 0, HNS_ROCE_CMD_DESTROY_CEQC,
4287 HNS_ROCE_CMD_TIMEOUT_MSECS);
4289 ret = hns_roce_cmd_mbox(hr_dev, 0, 0, eqn & HNS_ROCE_V2_EQN_M,
4290 0, HNS_ROCE_CMD_DESTROY_AEQC,
4291 HNS_ROCE_CMD_TIMEOUT_MSECS);
4293 dev_err(dev, "[mailbox cmd] destroy eqc(%d) failed.\n", eqn);
4296 static void hns_roce_mhop_free_eq(struct hns_roce_dev *hr_dev,
4297 struct hns_roce_eq *eq)
4299 struct device *dev = hr_dev->dev;
4309 mhop_num = hr_dev->caps.eqe_hop_num;
4310 buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT);
4311 bt_chk_sz = 1 << (hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT);
4314 if (mhop_num == HNS_ROCE_HOP_NUM_0) {
4315 dma_free_coherent(dev, (unsigned int)(eq->entries *
4316 eq->eqe_size), eq->bt_l0, eq->l0_dma);
4320 /* hop_num = 1 or hop = 2 */
4321 dma_free_coherent(dev, bt_chk_sz, eq->bt_l0, eq->l0_dma);
4322 if (mhop_num == 1) {
4323 for (i = 0; i < eq->l0_last_num; i++) {
4324 if (i == eq->l0_last_num - 1) {
4325 eqe_alloc = i * (buf_chk_sz / eq->eqe_size);
4326 size = (eq->entries - eqe_alloc) * eq->eqe_size;
4327 dma_free_coherent(dev, size, eq->buf[i],
4331 dma_free_coherent(dev, buf_chk_sz, eq->buf[i],
4334 } else if (mhop_num == 2) {
4335 for (i = 0; i < eq->l0_last_num; i++) {
4336 dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i],
4339 for (j = 0; j < bt_chk_sz / 8; j++) {
4340 idx = i * (bt_chk_sz / 8) + j;
4341 if ((i == eq->l0_last_num - 1)
4342 && j == eq->l1_last_num - 1) {
4343 eqe_alloc = (buf_chk_sz / eq->eqe_size)
4345 size = (eq->entries - eqe_alloc)
4347 dma_free_coherent(dev, size,
4352 dma_free_coherent(dev, buf_chk_sz, eq->buf[idx],
4367 static void hns_roce_v2_free_eq(struct hns_roce_dev *hr_dev,
4368 struct hns_roce_eq *eq)
4372 buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
4374 if (hr_dev->caps.eqe_hop_num) {
4375 hns_roce_mhop_free_eq(hr_dev, eq);
4380 dma_free_coherent(hr_dev->dev, buf_chk_sz,
4381 eq->buf_list->buf, eq->buf_list->map);
4384 static void hns_roce_config_eqc(struct hns_roce_dev *hr_dev,
4385 struct hns_roce_eq *eq,
4388 struct hns_roce_eq_context *eqc;
4391 memset(eqc, 0, sizeof(struct hns_roce_eq_context));
4394 eq->doorbell = hr_dev->reg_base + ROCEE_VF_EQ_DB_CFG0_REG;
4395 eq->hop_num = hr_dev->caps.eqe_hop_num;
4397 eq->over_ignore = HNS_ROCE_V2_EQ_OVER_IGNORE_0;
4398 eq->coalesce = HNS_ROCE_V2_EQ_COALESCE_0;
4399 eq->arm_st = HNS_ROCE_V2_EQ_ALWAYS_ARMED;
4400 eq->eqe_ba_pg_sz = hr_dev->caps.eqe_ba_pg_sz;
4401 eq->eqe_buf_pg_sz = hr_dev->caps.eqe_buf_pg_sz;
4402 eq->shift = ilog2((unsigned int)eq->entries);
4405 eq->eqe_ba = eq->buf_list->map;
4407 eq->eqe_ba = eq->l0_dma;
4410 roce_set_field(eqc->byte_4,
4411 HNS_ROCE_EQC_EQ_ST_M,
4412 HNS_ROCE_EQC_EQ_ST_S,
4413 HNS_ROCE_V2_EQ_STATE_VALID);
4415 /* set eqe hop num */
4416 roce_set_field(eqc->byte_4,
4417 HNS_ROCE_EQC_HOP_NUM_M,
4418 HNS_ROCE_EQC_HOP_NUM_S, eq->hop_num);
4420 /* set eqc over_ignore */
4421 roce_set_field(eqc->byte_4,
4422 HNS_ROCE_EQC_OVER_IGNORE_M,
4423 HNS_ROCE_EQC_OVER_IGNORE_S, eq->over_ignore);
4425 /* set eqc coalesce */
4426 roce_set_field(eqc->byte_4,
4427 HNS_ROCE_EQC_COALESCE_M,
4428 HNS_ROCE_EQC_COALESCE_S, eq->coalesce);
4430 /* set eqc arm_state */
4431 roce_set_field(eqc->byte_4,
4432 HNS_ROCE_EQC_ARM_ST_M,
4433 HNS_ROCE_EQC_ARM_ST_S, eq->arm_st);
4436 roce_set_field(eqc->byte_4,
4438 HNS_ROCE_EQC_EQN_S, eq->eqn);
4441 roce_set_field(eqc->byte_4,
4442 HNS_ROCE_EQC_EQE_CNT_M,
4443 HNS_ROCE_EQC_EQE_CNT_S,
4444 HNS_ROCE_EQ_INIT_EQE_CNT);
4446 /* set eqe_ba_pg_sz */
4447 roce_set_field(eqc->byte_8,
4448 HNS_ROCE_EQC_BA_PG_SZ_M,
4449 HNS_ROCE_EQC_BA_PG_SZ_S,
4450 eq->eqe_ba_pg_sz + PG_SHIFT_OFFSET);
4452 /* set eqe_buf_pg_sz */
4453 roce_set_field(eqc->byte_8,
4454 HNS_ROCE_EQC_BUF_PG_SZ_M,
4455 HNS_ROCE_EQC_BUF_PG_SZ_S,
4456 eq->eqe_buf_pg_sz + PG_SHIFT_OFFSET);
4458 /* set eq_producer_idx */
4459 roce_set_field(eqc->byte_8,
4460 HNS_ROCE_EQC_PROD_INDX_M,
4461 HNS_ROCE_EQC_PROD_INDX_S,
4462 HNS_ROCE_EQ_INIT_PROD_IDX);
4464 /* set eq_max_cnt */
4465 roce_set_field(eqc->byte_12,
4466 HNS_ROCE_EQC_MAX_CNT_M,
4467 HNS_ROCE_EQC_MAX_CNT_S, eq->eq_max_cnt);
4470 roce_set_field(eqc->byte_12,
4471 HNS_ROCE_EQC_PERIOD_M,
4472 HNS_ROCE_EQC_PERIOD_S, eq->eq_period);
4474 /* set eqe_report_timer */
4475 roce_set_field(eqc->eqe_report_timer,
4476 HNS_ROCE_EQC_REPORT_TIMER_M,
4477 HNS_ROCE_EQC_REPORT_TIMER_S,
4478 HNS_ROCE_EQ_INIT_REPORT_TIMER);
4480 /* set eqe_ba [34:3] */
4481 roce_set_field(eqc->eqe_ba0,
4482 HNS_ROCE_EQC_EQE_BA_L_M,
4483 HNS_ROCE_EQC_EQE_BA_L_S, eq->eqe_ba >> 3);
4485 /* set eqe_ba [64:35] */
4486 roce_set_field(eqc->eqe_ba1,
4487 HNS_ROCE_EQC_EQE_BA_H_M,
4488 HNS_ROCE_EQC_EQE_BA_H_S, eq->eqe_ba >> 35);
4491 roce_set_field(eqc->byte_28,
4492 HNS_ROCE_EQC_SHIFT_M,
4493 HNS_ROCE_EQC_SHIFT_S, eq->shift);
4495 /* set eq MSI_IDX */
4496 roce_set_field(eqc->byte_28,
4497 HNS_ROCE_EQC_MSI_INDX_M,
4498 HNS_ROCE_EQC_MSI_INDX_S,
4499 HNS_ROCE_EQ_INIT_MSI_IDX);
4501 /* set cur_eqe_ba [27:12] */
4502 roce_set_field(eqc->byte_28,
4503 HNS_ROCE_EQC_CUR_EQE_BA_L_M,
4504 HNS_ROCE_EQC_CUR_EQE_BA_L_S, eq->cur_eqe_ba >> 12);
4506 /* set cur_eqe_ba [59:28] */
4507 roce_set_field(eqc->byte_32,
4508 HNS_ROCE_EQC_CUR_EQE_BA_M_M,
4509 HNS_ROCE_EQC_CUR_EQE_BA_M_S, eq->cur_eqe_ba >> 28);
4511 /* set cur_eqe_ba [63:60] */
4512 roce_set_field(eqc->byte_36,
4513 HNS_ROCE_EQC_CUR_EQE_BA_H_M,
4514 HNS_ROCE_EQC_CUR_EQE_BA_H_S, eq->cur_eqe_ba >> 60);
4516 /* set eq consumer idx */
4517 roce_set_field(eqc->byte_36,
4518 HNS_ROCE_EQC_CONS_INDX_M,
4519 HNS_ROCE_EQC_CONS_INDX_S,
4520 HNS_ROCE_EQ_INIT_CONS_IDX);
4522 /* set nex_eqe_ba[43:12] */
4523 roce_set_field(eqc->nxt_eqe_ba0,
4524 HNS_ROCE_EQC_NXT_EQE_BA_L_M,
4525 HNS_ROCE_EQC_NXT_EQE_BA_L_S, eq->nxt_eqe_ba >> 12);
4527 /* set nex_eqe_ba[63:44] */
4528 roce_set_field(eqc->nxt_eqe_ba1,
4529 HNS_ROCE_EQC_NXT_EQE_BA_H_M,
4530 HNS_ROCE_EQC_NXT_EQE_BA_H_S, eq->nxt_eqe_ba >> 44);
4533 static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev,
4534 struct hns_roce_eq *eq)
4536 struct device *dev = hr_dev->dev;
4537 int eq_alloc_done = 0;
4552 mhop_num = hr_dev->caps.eqe_hop_num;
4553 buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT);
4554 bt_chk_sz = 1 << (hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT);
4556 ba_num = (PAGE_ALIGN(eq->entries * eq->eqe_size) + buf_chk_sz - 1)
4558 bt_num = (ba_num + bt_chk_sz / 8 - 1) / (bt_chk_sz / 8);
4561 if (mhop_num == HNS_ROCE_HOP_NUM_0) {
4562 if (eq->entries > buf_chk_sz / eq->eqe_size) {
4563 dev_err(dev, "eq entries %d is larger than buf_pg_sz!",
4567 eq->bt_l0 = dma_alloc_coherent(dev, eq->entries * eq->eqe_size,
4568 &(eq->l0_dma), GFP_KERNEL);
4572 eq->cur_eqe_ba = eq->l0_dma;
4575 memset(eq->bt_l0, 0, eq->entries * eq->eqe_size);
4580 eq->buf_dma = kcalloc(ba_num, sizeof(*eq->buf_dma), GFP_KERNEL);
4583 eq->buf = kcalloc(ba_num, sizeof(*eq->buf), GFP_KERNEL);
4585 goto err_kcalloc_buf;
4587 if (mhop_num == 2) {
4588 eq->l1_dma = kcalloc(bt_num, sizeof(*eq->l1_dma), GFP_KERNEL);
4590 goto err_kcalloc_l1_dma;
4592 eq->bt_l1 = kcalloc(bt_num, sizeof(*eq->bt_l1), GFP_KERNEL);
4594 goto err_kcalloc_bt_l1;
4598 eq->bt_l0 = dma_alloc_coherent(dev, bt_chk_sz, &eq->l0_dma, GFP_KERNEL);
4600 goto err_dma_alloc_l0;
4602 if (mhop_num == 1) {
4603 if (ba_num > (bt_chk_sz / 8))
4604 dev_err(dev, "ba_num %d is too large for 1 hop\n",
4608 for (i = 0; i < bt_chk_sz / 8; i++) {
4609 if (eq_buf_cnt + 1 < ba_num) {
4612 eqe_alloc = i * (buf_chk_sz / eq->eqe_size);
4613 size = (eq->entries - eqe_alloc) * eq->eqe_size;
4615 eq->buf[i] = dma_alloc_coherent(dev, size,
4619 goto err_dma_alloc_buf;
4621 memset(eq->buf[i], 0, size);
4622 *(eq->bt_l0 + i) = eq->buf_dma[i];
4625 if (eq_buf_cnt >= ba_num)
4628 eq->cur_eqe_ba = eq->buf_dma[0];
4629 eq->nxt_eqe_ba = eq->buf_dma[1];
4631 } else if (mhop_num == 2) {
4632 /* alloc L1 BT and buf */
4633 for (i = 0; i < bt_chk_sz / 8; i++) {
4634 eq->bt_l1[i] = dma_alloc_coherent(dev, bt_chk_sz,
4638 goto err_dma_alloc_l1;
4639 *(eq->bt_l0 + i) = eq->l1_dma[i];
4641 for (j = 0; j < bt_chk_sz / 8; j++) {
4642 idx = i * bt_chk_sz / 8 + j;
4643 if (eq_buf_cnt + 1 < ba_num) {
4646 eqe_alloc = (buf_chk_sz / eq->eqe_size)
4648 size = (eq->entries - eqe_alloc)
4651 eq->buf[idx] = dma_alloc_coherent(dev, size,
4652 &(eq->buf_dma[idx]),
4655 goto err_dma_alloc_buf;
4657 memset(eq->buf[idx], 0, size);
4658 *(eq->bt_l1[i] + j) = eq->buf_dma[idx];
4661 if (eq_buf_cnt >= ba_num) {
4670 eq->cur_eqe_ba = eq->buf_dma[0];
4671 eq->nxt_eqe_ba = eq->buf_dma[1];
4674 eq->l0_last_num = i + 1;
4676 eq->l1_last_num = j + 1;
4681 dma_free_coherent(dev, bt_chk_sz, eq->bt_l0, eq->l0_dma);
4684 for (i -= 1; i >= 0; i--) {
4685 dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i],
4688 for (j = 0; j < bt_chk_sz / 8; j++) {
4689 idx = i * bt_chk_sz / 8 + j;
4690 dma_free_coherent(dev, buf_chk_sz, eq->buf[idx],
4694 goto err_dma_alloc_l0;
4697 dma_free_coherent(dev, bt_chk_sz, eq->bt_l0, eq->l0_dma);
4702 for (i -= 1; i >= 0; i--)
4703 dma_free_coherent(dev, buf_chk_sz, eq->buf[i],
4705 else if (mhop_num == 2) {
4708 for (; i >= 0; i--) {
4709 dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i],
4712 for (j = 0; j < bt_chk_sz / 8; j++) {
4713 if (i == record_i && j >= record_j)
4716 idx = i * bt_chk_sz / 8 + j;
4717 dma_free_coherent(dev, buf_chk_sz,
4743 static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev,
4744 struct hns_roce_eq *eq,
4745 unsigned int eq_cmd)
4747 struct device *dev = hr_dev->dev;
4748 struct hns_roce_cmd_mailbox *mailbox;
4752 /* Allocate mailbox memory */
4753 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
4754 if (IS_ERR(mailbox))
4755 return PTR_ERR(mailbox);
4757 if (!hr_dev->caps.eqe_hop_num) {
4758 buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT);
4760 eq->buf_list = kzalloc(sizeof(struct hns_roce_buf_list),
4762 if (!eq->buf_list) {
4767 eq->buf_list->buf = dma_alloc_coherent(dev, buf_chk_sz,
4768 &(eq->buf_list->map),
4770 if (!eq->buf_list->buf) {
4775 memset(eq->buf_list->buf, 0, buf_chk_sz);
4777 ret = hns_roce_mhop_alloc_eq(hr_dev, eq);
4784 hns_roce_config_eqc(hr_dev, eq, mailbox->buf);
4786 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, eq->eqn, 0,
4787 eq_cmd, HNS_ROCE_CMD_TIMEOUT_MSECS);
4789 dev_err(dev, "[mailbox cmd] create eqc failed.\n");
4793 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
4798 if (!hr_dev->caps.eqe_hop_num)
4799 dma_free_coherent(dev, buf_chk_sz, eq->buf_list->buf,
4802 hns_roce_mhop_free_eq(hr_dev, eq);
4807 kfree(eq->buf_list);
4810 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
4815 static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev)
4817 struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
4818 struct device *dev = hr_dev->dev;
4819 struct hns_roce_eq *eq;
4820 unsigned int eq_cmd;
4829 other_num = hr_dev->caps.num_other_vectors;
4830 comp_num = hr_dev->caps.num_comp_vectors;
4831 aeq_num = hr_dev->caps.num_aeq_vectors;
4833 eq_num = comp_num + aeq_num;
4834 irq_num = eq_num + other_num;
4836 eq_table->eq = kcalloc(eq_num, sizeof(*eq_table->eq), GFP_KERNEL);
4840 for (i = 0; i < irq_num; i++) {
4841 hr_dev->irq_names[i] = kzalloc(HNS_ROCE_INT_NAME_LEN,
4843 if (!hr_dev->irq_names[i]) {
4845 goto err_failed_kzalloc;
4850 for (j = 0; j < eq_num; j++) {
4851 eq = &eq_table->eq[j];
4852 eq->hr_dev = hr_dev;
4856 eq_cmd = HNS_ROCE_CMD_CREATE_CEQC;
4857 eq->type_flag = HNS_ROCE_CEQ;
4858 eq->entries = hr_dev->caps.ceqe_depth;
4859 eq->eqe_size = HNS_ROCE_CEQ_ENTRY_SIZE;
4860 eq->irq = hr_dev->irq[j + other_num + aeq_num];
4861 eq->eq_max_cnt = HNS_ROCE_CEQ_DEFAULT_BURST_NUM;
4862 eq->eq_period = HNS_ROCE_CEQ_DEFAULT_INTERVAL;
4865 eq_cmd = HNS_ROCE_CMD_CREATE_AEQC;
4866 eq->type_flag = HNS_ROCE_AEQ;
4867 eq->entries = hr_dev->caps.aeqe_depth;
4868 eq->eqe_size = HNS_ROCE_AEQ_ENTRY_SIZE;
4869 eq->irq = hr_dev->irq[j - comp_num + other_num];
4870 eq->eq_max_cnt = HNS_ROCE_AEQ_DEFAULT_BURST_NUM;
4871 eq->eq_period = HNS_ROCE_AEQ_DEFAULT_INTERVAL;
4874 ret = hns_roce_v2_create_eq(hr_dev, eq, eq_cmd);
4876 dev_err(dev, "eq create failed.\n");
4877 goto err_create_eq_fail;
4882 hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_ENABLE);
4884 /* irq contains: abnormal + AEQ + CEQ*/
4885 for (k = 0; k < irq_num; k++)
4887 snprintf((char *)hr_dev->irq_names[k],
4888 HNS_ROCE_INT_NAME_LEN, "hns-abn-%d", k);
4889 else if (k < (other_num + aeq_num))
4890 snprintf((char *)hr_dev->irq_names[k],
4891 HNS_ROCE_INT_NAME_LEN, "hns-aeq-%d",
4894 snprintf((char *)hr_dev->irq_names[k],
4895 HNS_ROCE_INT_NAME_LEN, "hns-ceq-%d",
4896 k - other_num - aeq_num);
4898 for (k = 0; k < irq_num; k++) {
4900 ret = request_irq(hr_dev->irq[k],
4901 hns_roce_v2_msix_interrupt_abn,
4902 0, hr_dev->irq_names[k], hr_dev);
4904 else if (k < (other_num + comp_num))
4905 ret = request_irq(eq_table->eq[k - other_num].irq,
4906 hns_roce_v2_msix_interrupt_eq,
4907 0, hr_dev->irq_names[k + aeq_num],
4908 &eq_table->eq[k - other_num]);
4910 ret = request_irq(eq_table->eq[k - other_num].irq,
4911 hns_roce_v2_msix_interrupt_eq,
4912 0, hr_dev->irq_names[k - comp_num],
4913 &eq_table->eq[k - other_num]);
4915 dev_err(dev, "Request irq error!\n");
4916 goto err_request_irq_fail;
4922 err_request_irq_fail:
4923 for (k -= 1; k >= 0; k--)
4925 free_irq(hr_dev->irq[k], hr_dev);
4927 free_irq(eq_table->eq[k - other_num].irq,
4928 &eq_table->eq[k - other_num]);
4931 for (j -= 1; j >= 0; j--)
4932 hns_roce_v2_free_eq(hr_dev, &eq_table->eq[j]);
4935 for (i -= 1; i >= 0; i--)
4936 kfree(hr_dev->irq_names[i]);
4937 kfree(eq_table->eq);
4942 static void hns_roce_v2_cleanup_eq_table(struct hns_roce_dev *hr_dev)
4944 struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
4949 eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
4950 irq_num = eq_num + hr_dev->caps.num_other_vectors;
4953 hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_DISABLE);
4955 for (i = 0; i < hr_dev->caps.num_other_vectors; i++)
4956 free_irq(hr_dev->irq[i], hr_dev);
4958 for (i = 0; i < eq_num; i++) {
4959 hns_roce_v2_destroy_eqc(hr_dev, i);
4961 free_irq(eq_table->eq[i].irq, &eq_table->eq[i]);
4963 hns_roce_v2_free_eq(hr_dev, &eq_table->eq[i]);
4966 for (i = 0; i < irq_num; i++)
4967 kfree(hr_dev->irq_names[i]);
4969 kfree(eq_table->eq);
4972 static const struct hns_roce_hw hns_roce_hw_v2 = {
4973 .cmq_init = hns_roce_v2_cmq_init,
4974 .cmq_exit = hns_roce_v2_cmq_exit,
4975 .hw_profile = hns_roce_v2_profile,
4976 .hw_init = hns_roce_v2_init,
4977 .hw_exit = hns_roce_v2_exit,
4978 .post_mbox = hns_roce_v2_post_mbox,
4979 .chk_mbox = hns_roce_v2_chk_mbox,
4980 .set_gid = hns_roce_v2_set_gid,
4981 .set_mac = hns_roce_v2_set_mac,
4982 .write_mtpt = hns_roce_v2_write_mtpt,
4983 .rereg_write_mtpt = hns_roce_v2_rereg_write_mtpt,
4984 .write_cqc = hns_roce_v2_write_cqc,
4985 .set_hem = hns_roce_v2_set_hem,
4986 .clear_hem = hns_roce_v2_clear_hem,
4987 .modify_qp = hns_roce_v2_modify_qp,
4988 .query_qp = hns_roce_v2_query_qp,
4989 .destroy_qp = hns_roce_v2_destroy_qp,
4990 .modify_cq = hns_roce_v2_modify_cq,
4991 .post_send = hns_roce_v2_post_send,
4992 .post_recv = hns_roce_v2_post_recv,
4993 .req_notify_cq = hns_roce_v2_req_notify_cq,
4994 .poll_cq = hns_roce_v2_poll_cq,
4995 .init_eq = hns_roce_v2_init_eq_table,
4996 .cleanup_eq = hns_roce_v2_cleanup_eq_table,
4999 static const struct pci_device_id hns_roce_hw_v2_pci_tbl[] = {
5000 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
5001 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
5002 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
5003 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
5004 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
5005 /* required last entry */
5009 MODULE_DEVICE_TABLE(pci, hns_roce_hw_v2_pci_tbl);
5011 static int hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
5012 struct hnae3_handle *handle)
5014 const struct pci_device_id *id;
5017 id = pci_match_id(hns_roce_hw_v2_pci_tbl, hr_dev->pci_dev);
5019 dev_err(hr_dev->dev, "device is not compatible!\n");
5023 hr_dev->hw = &hns_roce_hw_v2;
5024 hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG;
5025 hr_dev->odb_offset = hr_dev->sdb_offset;
5027 /* Get info from NIC driver. */
5028 hr_dev->reg_base = handle->rinfo.roce_io_base;
5029 hr_dev->caps.num_ports = 1;
5030 hr_dev->iboe.netdevs[0] = handle->rinfo.netdev;
5031 hr_dev->iboe.phy_port[0] = 0;
5033 addrconf_addr_eui48((u8 *)&hr_dev->ib_dev.node_guid,
5034 hr_dev->iboe.netdevs[0]->dev_addr);
5036 for (i = 0; i < HNS_ROCE_V2_MAX_IRQ_NUM; i++)
5037 hr_dev->irq[i] = pci_irq_vector(handle->pdev,
5038 i + handle->rinfo.base_vector);
5040 /* cmd issue mode: 0 is poll, 1 is event */
5041 hr_dev->cmd_mod = 1;
5042 hr_dev->loop_idc = 0;
5047 static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
5049 struct hns_roce_dev *hr_dev;
5052 hr_dev = (struct hns_roce_dev *)ib_alloc_device(sizeof(*hr_dev));
5056 hr_dev->priv = kzalloc(sizeof(struct hns_roce_v2_priv), GFP_KERNEL);
5057 if (!hr_dev->priv) {
5059 goto error_failed_kzalloc;
5062 hr_dev->pci_dev = handle->pdev;
5063 hr_dev->dev = &handle->pdev->dev;
5064 handle->priv = hr_dev;
5066 ret = hns_roce_hw_v2_get_cfg(hr_dev, handle);
5068 dev_err(hr_dev->dev, "Get Configuration failed!\n");
5069 goto error_failed_get_cfg;
5072 ret = hns_roce_init(hr_dev);
5074 dev_err(hr_dev->dev, "RoCE Engine init failed!\n");
5075 goto error_failed_get_cfg;
5080 error_failed_get_cfg:
5081 kfree(hr_dev->priv);
5083 error_failed_kzalloc:
5084 ib_dealloc_device(&hr_dev->ib_dev);
5089 static void hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
5092 struct hns_roce_dev *hr_dev = (struct hns_roce_dev *)handle->priv;
5097 hns_roce_exit(hr_dev);
5098 kfree(hr_dev->priv);
5099 ib_dealloc_device(&hr_dev->ib_dev);
5102 static int hns_roce_hw_v2_reset_notify_down(struct hnae3_handle *handle)
5104 struct hns_roce_dev *hr_dev = (struct hns_roce_dev *)handle->priv;
5105 struct ib_event event;
5108 dev_err(&handle->pdev->dev,
5109 "Input parameter handle->priv is NULL!\n");
5113 hr_dev->active = false;
5114 hr_dev->is_reset = true;
5116 event.event = IB_EVENT_DEVICE_FATAL;
5117 event.device = &hr_dev->ib_dev;
5118 event.element.port_num = 1;
5119 ib_dispatch_event(&event);
5124 static int hns_roce_hw_v2_reset_notify_init(struct hnae3_handle *handle)
5128 ret = hns_roce_hw_v2_init_instance(handle);
5130 /* when reset notify type is HNAE3_INIT_CLIENT In reset notify
5131 * callback function, RoCE Engine reinitialize. If RoCE reinit
5132 * failed, we should inform NIC driver.
5134 handle->priv = NULL;
5135 dev_err(&handle->pdev->dev,
5136 "In reset process RoCE reinit failed %d.\n", ret);
5142 static int hns_roce_hw_v2_reset_notify_uninit(struct hnae3_handle *handle)
5145 hns_roce_hw_v2_uninit_instance(handle, false);
5149 static int hns_roce_hw_v2_reset_notify(struct hnae3_handle *handle,
5150 enum hnae3_reset_notify_type type)
5155 case HNAE3_DOWN_CLIENT:
5156 ret = hns_roce_hw_v2_reset_notify_down(handle);
5158 case HNAE3_INIT_CLIENT:
5159 ret = hns_roce_hw_v2_reset_notify_init(handle);
5161 case HNAE3_UNINIT_CLIENT:
5162 ret = hns_roce_hw_v2_reset_notify_uninit(handle);
5171 static const struct hnae3_client_ops hns_roce_hw_v2_ops = {
5172 .init_instance = hns_roce_hw_v2_init_instance,
5173 .uninit_instance = hns_roce_hw_v2_uninit_instance,
5174 .reset_notify = hns_roce_hw_v2_reset_notify,
5177 static struct hnae3_client hns_roce_hw_v2_client = {
5178 .name = "hns_roce_hw_v2",
5179 .type = HNAE3_CLIENT_ROCE,
5180 .ops = &hns_roce_hw_v2_ops,
5183 static int __init hns_roce_hw_v2_init(void)
5185 return hnae3_register_client(&hns_roce_hw_v2_client);
5188 static void __exit hns_roce_hw_v2_exit(void)
5190 hnae3_unregister_client(&hns_roce_hw_v2_client);
5193 module_init(hns_roce_hw_v2_init);
5194 module_exit(hns_roce_hw_v2_exit);
5196 MODULE_LICENSE("Dual BSD/GPL");
5197 MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>");
5198 MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>");
5199 MODULE_AUTHOR("Shaobo Xu <xushaobo2@huawei.com>");
5200 MODULE_DESCRIPTION("Hisilicon Hip08 Family RoCE Driver");