1 // SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
2 /* Copyright (c) 2015 - 2021 Intel Corporation */
10 * irdma_set_fragment - set fragment in wqe
11 * @wqe: wqe for setting fragment
12 * @offset: offset value
13 * @sge: sge length and stag
14 * @valid: The wqe valid
16 static void irdma_set_fragment(__le64 *wqe, u32 offset, struct irdma_sge *sge,
20 set_64bit_val(wqe, offset,
21 FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->tag_off));
22 set_64bit_val(wqe, offset + 8,
23 FIELD_PREP(IRDMAQPSQ_VALID, valid) |
24 FIELD_PREP(IRDMAQPSQ_FRAG_LEN, sge->len) |
25 FIELD_PREP(IRDMAQPSQ_FRAG_STAG, sge->stag));
27 set_64bit_val(wqe, offset, 0);
28 set_64bit_val(wqe, offset + 8,
29 FIELD_PREP(IRDMAQPSQ_VALID, valid));
34 * irdma_set_fragment_gen_1 - set fragment in wqe
35 * @wqe: wqe for setting fragment
36 * @offset: offset value
37 * @sge: sge length and stag
38 * @valid: wqe valid flag
40 static void irdma_set_fragment_gen_1(__le64 *wqe, u32 offset,
41 struct irdma_sge *sge, u8 valid)
44 set_64bit_val(wqe, offset,
45 FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->tag_off));
46 set_64bit_val(wqe, offset + 8,
47 FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, sge->len) |
48 FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_STAG, sge->stag));
50 set_64bit_val(wqe, offset, 0);
51 set_64bit_val(wqe, offset + 8, 0);
56 * irdma_nop_1 - insert a NOP wqe
59 static enum irdma_status_code irdma_nop_1(struct irdma_qp_uk *qp)
64 bool signaled = false;
66 if (!qp->sq_ring.head)
67 return IRDMA_ERR_PARAM;
69 wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
70 wqe = qp->sq_base[wqe_idx].elem;
72 qp->sq_wrtrk_array[wqe_idx].quanta = IRDMA_QP_WQE_MIN_QUANTA;
74 set_64bit_val(wqe, 0, 0);
75 set_64bit_val(wqe, 8, 0);
76 set_64bit_val(wqe, 16, 0);
78 hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_NOP) |
79 FIELD_PREP(IRDMAQPSQ_SIGCOMPL, signaled) |
80 FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
82 /* make sure WQE is written before valid bit is set */
85 set_64bit_val(wqe, 24, hdr);
91 * irdma_clr_wqes - clear next 128 sq entries
93 * @qp_wqe_idx: wqe_idx
95 void irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx)
100 if (!(qp_wqe_idx & 0x7F)) {
101 wqe_idx = (qp_wqe_idx + 128) % qp->sq_ring.size;
102 wqe = qp->sq_base[wqe_idx].elem;
104 memset(wqe, qp->swqe_polarity ? 0 : 0xFF, 0x1000);
106 memset(wqe, qp->swqe_polarity ? 0xFF : 0, 0x1000);
111 * irdma_uk_qp_post_wr - ring doorbell
114 void irdma_uk_qp_post_wr(struct irdma_qp_uk *qp)
120 /* valid bit is written and loads completed before reading shadow */
123 /* read the doorbell shadow area */
124 get_64bit_val(qp->shadow_area, 0, &temp);
126 hw_sq_tail = (u32)FIELD_GET(IRDMA_QP_DBSA_HW_SQ_TAIL, temp);
127 sw_sq_head = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
128 if (sw_sq_head != qp->initial_ring.head) {
129 if (qp->push_dropped) {
130 writel(qp->qp_id, qp->wqe_alloc_db);
131 qp->push_dropped = false;
132 } else if (sw_sq_head != hw_sq_tail) {
133 if (sw_sq_head > qp->initial_ring.head) {
134 if (hw_sq_tail >= qp->initial_ring.head &&
135 hw_sq_tail < sw_sq_head)
136 writel(qp->qp_id, qp->wqe_alloc_db);
138 if (hw_sq_tail >= qp->initial_ring.head ||
139 hw_sq_tail < sw_sq_head)
140 writel(qp->qp_id, qp->wqe_alloc_db);
145 qp->initial_ring.head = qp->sq_ring.head;
149 * irdma_qp_ring_push_db - ring qp doorbell
151 * @wqe_idx: wqe index
153 static void irdma_qp_ring_push_db(struct irdma_qp_uk *qp, u32 wqe_idx)
155 set_32bit_val(qp->push_db, 0,
156 FIELD_PREP(IRDMA_WQEALLOC_WQE_DESC_INDEX, wqe_idx >> 3) | qp->qp_id);
157 qp->initial_ring.head = qp->sq_ring.head;
158 qp->push_mode = true;
159 qp->push_dropped = false;
162 void irdma_qp_push_wqe(struct irdma_qp_uk *qp, __le64 *wqe, u16 quanta,
163 u32 wqe_idx, bool post_sq)
167 if (IRDMA_RING_CURRENT_HEAD(qp->initial_ring) !=
168 IRDMA_RING_CURRENT_TAIL(qp->sq_ring) &&
171 irdma_uk_qp_post_wr(qp);
173 push = (__le64 *)((uintptr_t)qp->push_wqe +
174 (wqe_idx & 0x7) * 0x20);
175 memcpy(push, wqe, quanta * IRDMA_QP_WQE_MIN_SIZE);
176 irdma_qp_ring_push_db(qp, wqe_idx);
181 * irdma_qp_get_next_send_wqe - pad with NOP if needed, return where next WR should go
183 * @wqe_idx: return wqe index
184 * @quanta: size of WR in quanta
185 * @total_size: size of WR in bytes
188 __le64 *irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx,
189 u16 quanta, u32 total_size,
190 struct irdma_post_sq_info *info)
193 __le64 *wqe_0 = NULL;
198 avail_quanta = qp->uk_attrs->max_hw_sq_chunk -
199 (IRDMA_RING_CURRENT_HEAD(qp->sq_ring) %
200 qp->uk_attrs->max_hw_sq_chunk);
201 if (quanta <= avail_quanta) {
202 /* WR fits in current chunk */
203 if (quanta > IRDMA_SQ_RING_FREE_QUANTA(qp->sq_ring))
206 /* Need to pad with NOP */
207 if (quanta + avail_quanta >
208 IRDMA_SQ_RING_FREE_QUANTA(qp->sq_ring))
211 nop_wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
212 for (i = 0; i < avail_quanta; i++) {
214 IRDMA_RING_MOVE_HEAD_NOCHECK(qp->sq_ring);
216 if (qp->push_db && info->push_wqe)
217 irdma_qp_push_wqe(qp, qp->sq_base[nop_wqe_idx].elem,
218 avail_quanta, nop_wqe_idx, true);
221 *wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
223 qp->swqe_polarity = !qp->swqe_polarity;
225 IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->sq_ring, quanta);
227 wqe = qp->sq_base[*wqe_idx].elem;
228 if (qp->uk_attrs->hw_rev == IRDMA_GEN_1 && quanta == 1 &&
229 (IRDMA_RING_CURRENT_HEAD(qp->sq_ring) & 1)) {
230 wqe_0 = qp->sq_base[IRDMA_RING_CURRENT_HEAD(qp->sq_ring)].elem;
231 wqe_0[3] = cpu_to_le64(FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity ? 0 : 1));
233 qp->sq_wrtrk_array[*wqe_idx].wrid = info->wr_id;
234 qp->sq_wrtrk_array[*wqe_idx].wr_len = total_size;
235 qp->sq_wrtrk_array[*wqe_idx].quanta = quanta;
241 * irdma_qp_get_next_recv_wqe - get next qp's rcv wqe
243 * @wqe_idx: return wqe index
245 __le64 *irdma_qp_get_next_recv_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx)
248 enum irdma_status_code ret_code;
250 if (IRDMA_RING_FULL_ERR(qp->rq_ring))
253 IRDMA_ATOMIC_RING_MOVE_HEAD(qp->rq_ring, *wqe_idx, ret_code);
258 qp->rwqe_polarity = !qp->rwqe_polarity;
259 /* rq_wqe_size_multiplier is no of 32 byte quanta in one rq wqe */
260 wqe = qp->rq_base[*wqe_idx * qp->rq_wqe_size_multiplier].elem;
266 * irdma_uk_rdma_write - rdma write operation
268 * @info: post sq information
269 * @post_sq: flag to post sq
271 enum irdma_status_code irdma_uk_rdma_write(struct irdma_qp_uk *qp,
272 struct irdma_post_sq_info *info,
277 struct irdma_rdma_write *op_info;
279 u32 total_size = 0, byte_off;
280 enum irdma_status_code ret_code;
281 u32 frag_cnt, addl_frag_cnt;
282 bool read_fence = false;
285 info->push_wqe = qp->push_db ? true : false;
287 op_info = &info->op.rdma_write;
288 if (op_info->num_lo_sges > qp->max_sq_frag_cnt)
289 return IRDMA_ERR_INVALID_FRAG_COUNT;
291 for (i = 0; i < op_info->num_lo_sges; i++)
292 total_size += op_info->lo_sg_list[i].len;
294 read_fence |= info->read_fence;
296 if (info->imm_data_valid)
297 frag_cnt = op_info->num_lo_sges + 1;
299 frag_cnt = op_info->num_lo_sges;
300 addl_frag_cnt = frag_cnt > 1 ? (frag_cnt - 1) : 0;
301 ret_code = irdma_fragcnt_to_quanta_sq(frag_cnt, &quanta);
305 wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
308 return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
310 irdma_clr_wqes(qp, wqe_idx);
312 set_64bit_val(wqe, 16,
313 FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.tag_off));
315 if (info->imm_data_valid) {
316 set_64bit_val(wqe, 0,
317 FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
320 qp->wqe_ops.iw_set_fragment(wqe, 0,
326 for (byte_off = 32; i < op_info->num_lo_sges; i++) {
327 qp->wqe_ops.iw_set_fragment(wqe, byte_off,
328 &op_info->lo_sg_list[i],
333 /* if not an odd number set valid bit in next fragment */
334 if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(frag_cnt & 0x01) &&
336 qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
338 if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
342 hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.stag) |
343 FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
344 FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, info->imm_data_valid) |
345 FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt) |
346 FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
347 FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
348 FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
349 FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
350 FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
351 FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
353 dma_wmb(); /* make sure WQE is populated before valid bit is set */
355 set_64bit_val(wqe, 24, hdr);
356 if (info->push_wqe) {
357 irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
360 irdma_uk_qp_post_wr(qp);
367 * irdma_uk_rdma_read - rdma read command
369 * @info: post sq information
370 * @inv_stag: flag for inv_stag
371 * @post_sq: flag to post sq
373 enum irdma_status_code irdma_uk_rdma_read(struct irdma_qp_uk *qp,
374 struct irdma_post_sq_info *info,
375 bool inv_stag, bool post_sq)
377 struct irdma_rdma_read *op_info;
378 enum irdma_status_code ret_code;
379 u32 i, byte_off, total_size = 0;
380 bool local_fence = false;
387 info->push_wqe = qp->push_db ? true : false;
389 op_info = &info->op.rdma_read;
390 if (qp->max_sq_frag_cnt < op_info->num_lo_sges)
391 return IRDMA_ERR_INVALID_FRAG_COUNT;
393 for (i = 0; i < op_info->num_lo_sges; i++)
394 total_size += op_info->lo_sg_list[i].len;
396 ret_code = irdma_fragcnt_to_quanta_sq(op_info->num_lo_sges, &quanta);
400 wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
403 return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
405 irdma_clr_wqes(qp, wqe_idx);
407 addl_frag_cnt = op_info->num_lo_sges > 1 ?
408 (op_info->num_lo_sges - 1) : 0;
409 local_fence |= info->local_fence;
411 qp->wqe_ops.iw_set_fragment(wqe, 0, op_info->lo_sg_list,
413 for (i = 1, byte_off = 32; i < op_info->num_lo_sges; ++i) {
414 qp->wqe_ops.iw_set_fragment(wqe, byte_off,
415 &op_info->lo_sg_list[i],
420 /* if not an odd number set valid bit in next fragment */
421 if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 &&
422 !(op_info->num_lo_sges & 0x01) && op_info->num_lo_sges) {
423 qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
425 if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
428 set_64bit_val(wqe, 16,
429 FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.tag_off));
430 hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.stag) |
431 FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
432 FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
433 FIELD_PREP(IRDMAQPSQ_OPCODE,
434 (inv_stag ? IRDMAQP_OP_RDMA_READ_LOC_INV : IRDMAQP_OP_RDMA_READ)) |
435 FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
436 FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
437 FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) |
438 FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
439 FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
441 dma_wmb(); /* make sure WQE is populated before valid bit is set */
443 set_64bit_val(wqe, 24, hdr);
444 if (info->push_wqe) {
445 irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
448 irdma_uk_qp_post_wr(qp);
455 * irdma_uk_send - rdma send command
457 * @info: post sq information
458 * @post_sq: flag to post sq
460 enum irdma_status_code irdma_uk_send(struct irdma_qp_uk *qp,
461 struct irdma_post_sq_info *info,
465 struct irdma_post_send *op_info;
467 u32 i, wqe_idx, total_size = 0, byte_off;
468 enum irdma_status_code ret_code;
469 u32 frag_cnt, addl_frag_cnt;
470 bool read_fence = false;
473 info->push_wqe = qp->push_db ? true : false;
475 op_info = &info->op.send;
476 if (qp->max_sq_frag_cnt < op_info->num_sges)
477 return IRDMA_ERR_INVALID_FRAG_COUNT;
479 for (i = 0; i < op_info->num_sges; i++)
480 total_size += op_info->sg_list[i].len;
482 if (info->imm_data_valid)
483 frag_cnt = op_info->num_sges + 1;
485 frag_cnt = op_info->num_sges;
486 ret_code = irdma_fragcnt_to_quanta_sq(frag_cnt, &quanta);
490 wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
493 return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
495 irdma_clr_wqes(qp, wqe_idx);
497 read_fence |= info->read_fence;
498 addl_frag_cnt = frag_cnt > 1 ? (frag_cnt - 1) : 0;
499 if (info->imm_data_valid) {
500 set_64bit_val(wqe, 0,
501 FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
504 qp->wqe_ops.iw_set_fragment(wqe, 0, op_info->sg_list,
509 for (byte_off = 32; i < op_info->num_sges; i++) {
510 qp->wqe_ops.iw_set_fragment(wqe, byte_off, &op_info->sg_list[i],
515 /* if not an odd number set valid bit in next fragment */
516 if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(frag_cnt & 0x01) &&
518 qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
520 if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
524 set_64bit_val(wqe, 16,
525 FIELD_PREP(IRDMAQPSQ_DESTQKEY, op_info->qkey) |
526 FIELD_PREP(IRDMAQPSQ_DESTQPN, op_info->dest_qp));
527 hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, info->stag_to_inv) |
528 FIELD_PREP(IRDMAQPSQ_AHID, op_info->ah_id) |
529 FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG,
530 (info->imm_data_valid ? 1 : 0)) |
531 FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
532 FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
533 FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
534 FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
535 FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
536 FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
537 FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
538 FIELD_PREP(IRDMAQPSQ_UDPHEADER, info->udp_hdr) |
539 FIELD_PREP(IRDMAQPSQ_L4LEN, info->l4len) |
540 FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
542 dma_wmb(); /* make sure WQE is populated before valid bit is set */
544 set_64bit_val(wqe, 24, hdr);
545 if (info->push_wqe) {
546 irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
549 irdma_uk_qp_post_wr(qp);
556 * irdma_set_mw_bind_wqe_gen_1 - set mw bind wqe
557 * @wqe: wqe for setting fragment
558 * @op_info: info for setting bind wqe values
560 static void irdma_set_mw_bind_wqe_gen_1(__le64 *wqe,
561 struct irdma_bind_window *op_info)
563 set_64bit_val(wqe, 0, (uintptr_t)op_info->va);
564 set_64bit_val(wqe, 8,
565 FIELD_PREP(IRDMAQPSQ_PARENTMRSTAG, op_info->mw_stag) |
566 FIELD_PREP(IRDMAQPSQ_MWSTAG, op_info->mr_stag));
567 set_64bit_val(wqe, 16, op_info->bind_len);
571 * irdma_copy_inline_data_gen_1 - Copy inline data to wqe
572 * @dest: pointer to wqe
573 * @src: pointer to inline data
574 * @len: length of inline data to copy
575 * @polarity: compatibility parameter
577 static void irdma_copy_inline_data_gen_1(u8 *dest, u8 *src, u32 len,
581 memcpy(dest, src, len);
583 memcpy(dest, src, 16);
586 memcpy(dest, src, len - 16);
591 * irdma_inline_data_size_to_quanta_gen_1 - based on inline data, quanta
592 * @data_size: data size for inline
594 * Gets the quanta based on inline and immediate data.
596 static inline u16 irdma_inline_data_size_to_quanta_gen_1(u32 data_size)
598 return data_size <= 16 ? IRDMA_QP_WQE_MIN_QUANTA : 2;
602 * irdma_set_mw_bind_wqe - set mw bind in wqe
603 * @wqe: wqe for setting mw bind
604 * @op_info: info for setting wqe values
606 static void irdma_set_mw_bind_wqe(__le64 *wqe,
607 struct irdma_bind_window *op_info)
609 set_64bit_val(wqe, 0, (uintptr_t)op_info->va);
610 set_64bit_val(wqe, 8,
611 FIELD_PREP(IRDMAQPSQ_PARENTMRSTAG, op_info->mr_stag) |
612 FIELD_PREP(IRDMAQPSQ_MWSTAG, op_info->mw_stag));
613 set_64bit_val(wqe, 16, op_info->bind_len);
617 * irdma_copy_inline_data - Copy inline data to wqe
618 * @dest: pointer to wqe
619 * @src: pointer to inline data
620 * @len: length of inline data to copy
621 * @polarity: polarity of wqe valid bit
623 static void irdma_copy_inline_data(u8 *dest, u8 *src, u32 len, u8 polarity)
625 u8 inline_valid = polarity << IRDMA_INLINE_VALID_S;
630 memcpy(dest, src, len);
634 *((u64 *)dest) = *((u64 *)src);
637 dest += 24; /* point to additional 32 byte quanta */
640 copy_size = len < 31 ? len : 31;
641 memcpy(dest, src, copy_size);
642 *(dest + 31) = inline_valid;
650 * irdma_inline_data_size_to_quanta - based on inline data, quanta
651 * @data_size: data size for inline
653 * Gets the quanta based on inline and immediate data.
655 static u16 irdma_inline_data_size_to_quanta(u32 data_size)
658 return IRDMA_QP_WQE_MIN_QUANTA;
659 else if (data_size <= 39)
661 else if (data_size <= 70)
663 else if (data_size <= 101)
665 else if (data_size <= 132)
667 else if (data_size <= 163)
669 else if (data_size <= 194)
676 * irdma_uk_inline_rdma_write - inline rdma write operation
678 * @info: post sq information
679 * @post_sq: flag to post sq
681 enum irdma_status_code
682 irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
686 struct irdma_inline_rdma_write *op_info;
689 bool read_fence = false;
692 info->push_wqe = qp->push_db ? true : false;
693 op_info = &info->op.inline_rdma_write;
695 if (op_info->len > qp->max_inline_data)
696 return IRDMA_ERR_INVALID_INLINE_DATA_SIZE;
698 quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(op_info->len);
699 wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, op_info->len,
702 return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
704 irdma_clr_wqes(qp, wqe_idx);
706 read_fence |= info->read_fence;
707 set_64bit_val(wqe, 16,
708 FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.tag_off));
710 hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.stag) |
711 FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
712 FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, op_info->len) |
713 FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt ? 1 : 0) |
714 FIELD_PREP(IRDMAQPSQ_INLINEDATAFLAG, 1) |
715 FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, info->imm_data_valid ? 1 : 0) |
716 FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe ? 1 : 0) |
717 FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
718 FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
719 FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
720 FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
722 if (info->imm_data_valid)
723 set_64bit_val(wqe, 0,
724 FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
726 qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->data, op_info->len,
728 dma_wmb(); /* make sure WQE is populated before valid bit is set */
730 set_64bit_val(wqe, 24, hdr);
732 if (info->push_wqe) {
733 irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
736 irdma_uk_qp_post_wr(qp);
743 * irdma_uk_inline_send - inline send operation
745 * @info: post sq information
746 * @post_sq: flag to post sq
748 enum irdma_status_code irdma_uk_inline_send(struct irdma_qp_uk *qp,
749 struct irdma_post_sq_info *info,
753 struct irdma_post_inline_send *op_info;
756 bool read_fence = false;
759 info->push_wqe = qp->push_db ? true : false;
760 op_info = &info->op.inline_send;
762 if (op_info->len > qp->max_inline_data)
763 return IRDMA_ERR_INVALID_INLINE_DATA_SIZE;
765 quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(op_info->len);
766 wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, op_info->len,
769 return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
771 irdma_clr_wqes(qp, wqe_idx);
773 set_64bit_val(wqe, 16,
774 FIELD_PREP(IRDMAQPSQ_DESTQKEY, op_info->qkey) |
775 FIELD_PREP(IRDMAQPSQ_DESTQPN, op_info->dest_qp));
777 read_fence |= info->read_fence;
778 hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, info->stag_to_inv) |
779 FIELD_PREP(IRDMAQPSQ_AHID, op_info->ah_id) |
780 FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
781 FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, op_info->len) |
782 FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG,
783 (info->imm_data_valid ? 1 : 0)) |
784 FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
785 FIELD_PREP(IRDMAQPSQ_INLINEDATAFLAG, 1) |
786 FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
787 FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
788 FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
789 FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
790 FIELD_PREP(IRDMAQPSQ_UDPHEADER, info->udp_hdr) |
791 FIELD_PREP(IRDMAQPSQ_L4LEN, info->l4len) |
792 FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
794 if (info->imm_data_valid)
795 set_64bit_val(wqe, 0,
796 FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
797 qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->data, op_info->len,
800 dma_wmb(); /* make sure WQE is populated before valid bit is set */
802 set_64bit_val(wqe, 24, hdr);
804 if (info->push_wqe) {
805 irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
808 irdma_uk_qp_post_wr(qp);
815 * irdma_uk_stag_local_invalidate - stag invalidate operation
817 * @info: post sq information
818 * @post_sq: flag to post sq
820 enum irdma_status_code
821 irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
822 struct irdma_post_sq_info *info, bool post_sq)
825 struct irdma_inv_local_stag *op_info;
828 bool local_fence = false;
829 struct irdma_sge sge = {};
831 info->push_wqe = qp->push_db ? true : false;
832 op_info = &info->op.inv_local_stag;
833 local_fence = info->local_fence;
835 wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, IRDMA_QP_WQE_MIN_QUANTA,
838 return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
840 irdma_clr_wqes(qp, wqe_idx);
842 sge.stag = op_info->target_stag;
843 qp->wqe_ops.iw_set_fragment(wqe, 0, &sge, 0);
845 set_64bit_val(wqe, 16, 0);
847 hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMA_OP_TYPE_INV_STAG) |
848 FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
849 FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
850 FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) |
851 FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
852 FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
854 dma_wmb(); /* make sure WQE is populated before valid bit is set */
856 set_64bit_val(wqe, 24, hdr);
858 if (info->push_wqe) {
859 irdma_qp_push_wqe(qp, wqe, IRDMA_QP_WQE_MIN_QUANTA, wqe_idx,
863 irdma_uk_qp_post_wr(qp);
870 * irdma_uk_mw_bind - bind Memory Window
872 * @info: post sq information
873 * @post_sq: flag to post sq
875 enum irdma_status_code irdma_uk_mw_bind(struct irdma_qp_uk *qp,
876 struct irdma_post_sq_info *info,
880 struct irdma_bind_window *op_info;
883 bool local_fence = false;
885 info->push_wqe = qp->push_db ? true : false;
886 op_info = &info->op.bind_window;
887 local_fence |= info->local_fence;
889 wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, IRDMA_QP_WQE_MIN_QUANTA,
892 return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
894 irdma_clr_wqes(qp, wqe_idx);
896 qp->wqe_ops.iw_set_mw_bind_wqe(wqe, op_info);
898 hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMA_OP_TYPE_BIND_MW) |
899 FIELD_PREP(IRDMAQPSQ_STAGRIGHTS,
900 ((op_info->ena_reads << 2) | (op_info->ena_writes << 3))) |
901 FIELD_PREP(IRDMAQPSQ_VABASEDTO,
902 (op_info->addressing_type == IRDMA_ADDR_TYPE_VA_BASED ? 1 : 0)) |
903 FIELD_PREP(IRDMAQPSQ_MEMWINDOWTYPE,
904 (op_info->mem_window_type_1 ? 1 : 0)) |
905 FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
906 FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
907 FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) |
908 FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
909 FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
911 dma_wmb(); /* make sure WQE is populated before valid bit is set */
913 set_64bit_val(wqe, 24, hdr);
915 if (info->push_wqe) {
916 irdma_qp_push_wqe(qp, wqe, IRDMA_QP_WQE_MIN_QUANTA, wqe_idx,
920 irdma_uk_qp_post_wr(qp);
927 * irdma_uk_post_receive - post receive wqe
929 * @info: post rq information
931 enum irdma_status_code irdma_uk_post_receive(struct irdma_qp_uk *qp,
932 struct irdma_post_rq_info *info)
934 u32 wqe_idx, i, byte_off;
939 if (qp->max_rq_frag_cnt < info->num_sges)
940 return IRDMA_ERR_INVALID_FRAG_COUNT;
942 wqe = irdma_qp_get_next_recv_wqe(qp, &wqe_idx);
944 return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
946 qp->rq_wrid_array[wqe_idx] = info->wr_id;
947 addl_frag_cnt = info->num_sges > 1 ? (info->num_sges - 1) : 0;
948 qp->wqe_ops.iw_set_fragment(wqe, 0, info->sg_list,
951 for (i = 1, byte_off = 32; i < info->num_sges; i++) {
952 qp->wqe_ops.iw_set_fragment(wqe, byte_off, &info->sg_list[i],
957 /* if not an odd number set valid bit in next fragment */
958 if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(info->num_sges & 0x01) &&
960 qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
962 if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
966 set_64bit_val(wqe, 16, 0);
967 hdr = FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
968 FIELD_PREP(IRDMAQPSQ_VALID, qp->rwqe_polarity);
970 dma_wmb(); /* make sure WQE is populated before valid bit is set */
972 set_64bit_val(wqe, 24, hdr);
978 * irdma_uk_cq_resize - reset the cq buffer info
980 * @cq_base: new cq buffer addr
981 * @cq_size: number of cqes
983 void irdma_uk_cq_resize(struct irdma_cq_uk *cq, void *cq_base, int cq_size)
985 cq->cq_base = cq_base;
986 cq->cq_size = cq_size;
987 IRDMA_RING_INIT(cq->cq_ring, cq->cq_size);
992 * irdma_uk_cq_set_resized_cnt - record the count of the resized buffers
994 * @cq_cnt: the count of the resized cq buffers
996 void irdma_uk_cq_set_resized_cnt(struct irdma_cq_uk *cq, u16 cq_cnt)
1004 get_64bit_val(cq->shadow_area, 32, &temp_val);
1006 sw_cq_sel = (u16)FIELD_GET(IRDMA_CQ_DBSA_SW_CQ_SELECT, temp_val);
1007 sw_cq_sel += cq_cnt;
1009 arm_seq_num = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_SEQ_NUM, temp_val);
1010 arm_next_se = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT_SE, temp_val);
1011 arm_next = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT, temp_val);
1013 temp_val = FIELD_PREP(IRDMA_CQ_DBSA_ARM_SEQ_NUM, arm_seq_num) |
1014 FIELD_PREP(IRDMA_CQ_DBSA_SW_CQ_SELECT, sw_cq_sel) |
1015 FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT_SE, arm_next_se) |
1016 FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT, arm_next);
1018 set_64bit_val(cq->shadow_area, 32, temp_val);
1022 * irdma_uk_cq_request_notification - cq notification request (door bell)
1024 * @cq_notify: notification type
1026 void irdma_uk_cq_request_notification(struct irdma_cq_uk *cq,
1027 enum irdma_cmpl_notify cq_notify)
1035 get_64bit_val(cq->shadow_area, 32, &temp_val);
1036 arm_seq_num = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_SEQ_NUM, temp_val);
1038 sw_cq_sel = (u16)FIELD_GET(IRDMA_CQ_DBSA_SW_CQ_SELECT, temp_val);
1039 arm_next_se = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT_SE, temp_val);
1041 if (cq_notify == IRDMA_CQ_COMPL_EVENT)
1043 temp_val = FIELD_PREP(IRDMA_CQ_DBSA_ARM_SEQ_NUM, arm_seq_num) |
1044 FIELD_PREP(IRDMA_CQ_DBSA_SW_CQ_SELECT, sw_cq_sel) |
1045 FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT_SE, arm_next_se) |
1046 FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT, arm_next);
1048 set_64bit_val(cq->shadow_area, 32, temp_val);
1050 dma_wmb(); /* make sure WQE is populated before valid bit is set */
1052 writel(cq->cq_id, cq->cqe_alloc_db);
1056 * irdma_uk_cq_poll_cmpl - get cq completion info
1058 * @info: cq poll information returned
1060 enum irdma_status_code
1061 irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, struct irdma_cq_poll_info *info)
1063 u64 comp_ctx, qword0, qword2, qword3;
1065 struct irdma_qp_uk *qp;
1066 struct irdma_ring *pring = NULL;
1067 u32 wqe_idx, q_type;
1068 enum irdma_status_code ret_code;
1069 bool move_cq_head = true;
1074 if (cq->avoid_mem_cflct)
1075 cqe = IRDMA_GET_CURRENT_EXTENDED_CQ_ELEM(cq);
1077 cqe = IRDMA_GET_CURRENT_CQ_ELEM(cq);
1079 get_64bit_val(cqe, 24, &qword3);
1080 polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
1081 if (polarity != cq->polarity)
1082 return IRDMA_ERR_Q_EMPTY;
1084 /* Ensure CQE contents are read after valid bit is checked */
1087 ext_valid = (bool)FIELD_GET(IRDMA_CQ_EXTCQE, qword3);
1092 if (cq->avoid_mem_cflct) {
1093 ext_cqe = (__le64 *)((u8 *)cqe + 32);
1094 get_64bit_val(ext_cqe, 24, &qword7);
1095 polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
1097 peek_head = (cq->cq_ring.head + 1) % cq->cq_ring.size;
1098 ext_cqe = cq->cq_base[peek_head].buf;
1099 get_64bit_val(ext_cqe, 24, &qword7);
1100 polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
1104 if (polarity != cq->polarity)
1105 return IRDMA_ERR_Q_EMPTY;
1107 /* Ensure ext CQE contents are read after ext valid bit is checked */
1110 info->imm_valid = (bool)FIELD_GET(IRDMA_CQ_IMMVALID, qword7);
1111 if (info->imm_valid) {
1114 get_64bit_val(ext_cqe, 0, &qword4);
1115 info->imm_data = (u32)FIELD_GET(IRDMA_CQ_IMMDATALOW32, qword4);
1117 info->ud_smac_valid = (bool)FIELD_GET(IRDMA_CQ_UDSMACVALID, qword7);
1118 info->ud_vlan_valid = (bool)FIELD_GET(IRDMA_CQ_UDVLANVALID, qword7);
1119 if (info->ud_smac_valid || info->ud_vlan_valid) {
1120 get_64bit_val(ext_cqe, 16, &qword6);
1121 if (info->ud_vlan_valid)
1122 info->ud_vlan = (u16)FIELD_GET(IRDMA_CQ_UDVLAN, qword6);
1123 if (info->ud_smac_valid) {
1124 info->ud_smac[5] = qword6 & 0xFF;
1125 info->ud_smac[4] = (qword6 >> 8) & 0xFF;
1126 info->ud_smac[3] = (qword6 >> 16) & 0xFF;
1127 info->ud_smac[2] = (qword6 >> 24) & 0xFF;
1128 info->ud_smac[1] = (qword6 >> 32) & 0xFF;
1129 info->ud_smac[0] = (qword6 >> 40) & 0xFF;
1133 info->imm_valid = false;
1134 info->ud_smac_valid = false;
1135 info->ud_vlan_valid = false;
1138 q_type = (u8)FIELD_GET(IRDMA_CQ_SQ, qword3);
1139 info->error = (bool)FIELD_GET(IRDMA_CQ_ERROR, qword3);
1140 info->push_dropped = (bool)FIELD_GET(IRDMACQ_PSHDROP, qword3);
1141 info->ipv4 = (bool)FIELD_GET(IRDMACQ_IPV4, qword3);
1143 info->major_err = FIELD_GET(IRDMA_CQ_MAJERR, qword3);
1144 info->minor_err = FIELD_GET(IRDMA_CQ_MINERR, qword3);
1145 if (info->major_err == IRDMA_FLUSH_MAJOR_ERR) {
1146 info->comp_status = IRDMA_COMPL_STATUS_FLUSHED;
1147 /* Set the min error to standard flush error code for remaining cqes */
1148 if (info->minor_err != FLUSH_GENERAL_ERR) {
1149 qword3 &= ~IRDMA_CQ_MINERR;
1150 qword3 |= FIELD_PREP(IRDMA_CQ_MINERR, FLUSH_GENERAL_ERR);
1151 set_64bit_val(cqe, 24, qword3);
1154 info->comp_status = IRDMA_COMPL_STATUS_UNKNOWN;
1157 info->comp_status = IRDMA_COMPL_STATUS_SUCCESS;
1160 get_64bit_val(cqe, 0, &qword0);
1161 get_64bit_val(cqe, 16, &qword2);
1163 info->tcp_seq_num_rtt = (u32)FIELD_GET(IRDMACQ_TCPSEQNUMRTT, qword0);
1164 info->qp_id = (u32)FIELD_GET(IRDMACQ_QPID, qword2);
1165 info->ud_src_qpn = (u32)FIELD_GET(IRDMACQ_UDSRCQPN, qword2);
1167 get_64bit_val(cqe, 8, &comp_ctx);
1169 info->solicited_event = (bool)FIELD_GET(IRDMACQ_SOEVENT, qword3);
1170 qp = (struct irdma_qp_uk *)(unsigned long)comp_ctx;
1171 if (!qp || qp->destroy_pending) {
1172 ret_code = IRDMA_ERR_Q_DESTROYED;
1175 wqe_idx = (u32)FIELD_GET(IRDMA_CQ_WQEIDX, qword3);
1176 info->qp_handle = (irdma_qp_handle)(unsigned long)qp;
1178 if (q_type == IRDMA_CQE_QTYPE_RQ) {
1181 array_idx = wqe_idx / qp->rq_wqe_size_multiplier;
1183 if (info->comp_status == IRDMA_COMPL_STATUS_FLUSHED ||
1184 info->comp_status == IRDMA_COMPL_STATUS_UNKNOWN) {
1185 if (!IRDMA_RING_MORE_WORK(qp->rq_ring)) {
1186 ret_code = IRDMA_ERR_Q_EMPTY;
1190 info->wr_id = qp->rq_wrid_array[qp->rq_ring.tail];
1191 array_idx = qp->rq_ring.tail;
1193 info->wr_id = qp->rq_wrid_array[array_idx];
1196 info->bytes_xfered = (u32)FIELD_GET(IRDMACQ_PAYLDLEN, qword0);
1198 if (info->imm_valid)
1199 info->op_type = IRDMA_OP_TYPE_REC_IMM;
1201 info->op_type = IRDMA_OP_TYPE_REC;
1202 if (qword3 & IRDMACQ_STAG) {
1203 info->stag_invalid_set = true;
1204 info->inv_stag = (u32)FIELD_GET(IRDMACQ_INVSTAG, qword2);
1206 info->stag_invalid_set = false;
1208 IRDMA_RING_SET_TAIL(qp->rq_ring, array_idx + 1);
1209 if (info->comp_status == IRDMA_COMPL_STATUS_FLUSHED) {
1210 qp->rq_flush_seen = true;
1211 if (!IRDMA_RING_MORE_WORK(qp->rq_ring))
1212 qp->rq_flush_complete = true;
1214 move_cq_head = false;
1216 pring = &qp->rq_ring;
1217 } else { /* q_type is IRDMA_CQE_QTYPE_SQ */
1218 if (qp->first_sq_wq) {
1219 if (wqe_idx + 1 >= qp->conn_wqes)
1220 qp->first_sq_wq = false;
1222 if (wqe_idx < qp->conn_wqes && qp->sq_ring.head == qp->sq_ring.tail) {
1223 IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
1224 IRDMA_RING_MOVE_TAIL(cq->cq_ring);
1225 set_64bit_val(cq->shadow_area, 0,
1226 IRDMA_RING_CURRENT_HEAD(cq->cq_ring));
1228 sizeof(struct irdma_cq_poll_info));
1229 return irdma_uk_cq_poll_cmpl(cq, info);
1232 /*cease posting push mode on push drop*/
1233 if (info->push_dropped) {
1234 qp->push_mode = false;
1235 qp->push_dropped = true;
1237 if (info->comp_status != IRDMA_COMPL_STATUS_FLUSHED) {
1238 info->wr_id = qp->sq_wrtrk_array[wqe_idx].wrid;
1239 if (!info->comp_status)
1240 info->bytes_xfered = qp->sq_wrtrk_array[wqe_idx].wr_len;
1241 info->op_type = (u8)FIELD_GET(IRDMACQ_OP, qword3);
1242 IRDMA_RING_SET_TAIL(qp->sq_ring,
1243 wqe_idx + qp->sq_wrtrk_array[wqe_idx].quanta);
1245 if (!IRDMA_RING_MORE_WORK(qp->sq_ring)) {
1246 ret_code = IRDMA_ERR_Q_EMPTY;
1256 tail = qp->sq_ring.tail;
1257 sw_wqe = qp->sq_base[tail].elem;
1258 get_64bit_val(sw_wqe, 24,
1260 op_type = (u8)FIELD_GET(IRDMAQPSQ_OPCODE, wqe_qword);
1261 info->op_type = op_type;
1262 IRDMA_RING_SET_TAIL(qp->sq_ring,
1263 tail + qp->sq_wrtrk_array[tail].quanta);
1264 if (op_type != IRDMAQP_OP_NOP) {
1265 info->wr_id = qp->sq_wrtrk_array[tail].wrid;
1266 info->bytes_xfered = qp->sq_wrtrk_array[tail].wr_len;
1270 qp->sq_flush_seen = true;
1271 if (!IRDMA_RING_MORE_WORK(qp->sq_ring))
1272 qp->sq_flush_complete = true;
1274 pring = &qp->sq_ring;
1280 if (!ret_code && info->comp_status == IRDMA_COMPL_STATUS_FLUSHED)
1281 if (pring && IRDMA_RING_MORE_WORK(*pring))
1282 move_cq_head = false;
1285 IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
1286 if (!IRDMA_RING_CURRENT_HEAD(cq->cq_ring))
1289 if (ext_valid && !cq->avoid_mem_cflct) {
1290 IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
1291 if (!IRDMA_RING_CURRENT_HEAD(cq->cq_ring))
1295 IRDMA_RING_MOVE_TAIL(cq->cq_ring);
1296 if (!cq->avoid_mem_cflct && ext_valid)
1297 IRDMA_RING_MOVE_TAIL(cq->cq_ring);
1298 set_64bit_val(cq->shadow_area, 0,
1299 IRDMA_RING_CURRENT_HEAD(cq->cq_ring));
1301 qword3 &= ~IRDMA_CQ_WQEIDX;
1302 qword3 |= FIELD_PREP(IRDMA_CQ_WQEIDX, pring->tail);
1303 set_64bit_val(cqe, 24, qword3);
1310 * irdma_qp_round_up - return round up qp wq depth
1311 * @wqdepth: wq depth in quanta to round up
1313 static int irdma_qp_round_up(u32 wqdepth)
1317 for (wqdepth--; scount <= 16; scount *= 2)
1318 wqdepth |= wqdepth >> scount;
1324 * irdma_get_wqe_shift - get shift count for maximum wqe size
1325 * @uk_attrs: qp HW attributes
1326 * @sge: Maximum Scatter Gather Elements wqe
1327 * @inline_data: Maximum inline data size
1328 * @shift: Returns the shift needed based on sge
1330 * Shift can be used to left shift the wqe size based on number of SGEs and inlind data size.
1331 * For 1 SGE or inline data <= 8, shift = 0 (wqe size of 32
1332 * bytes). For 2 or 3 SGEs or inline data <= 39, shift = 1 (wqe
1333 * size of 64 bytes).
1334 * For 4-7 SGE's and inline <= 101 Shift of 2 otherwise (wqe
1335 * size of 256 bytes).
1337 void irdma_get_wqe_shift(struct irdma_uk_attrs *uk_attrs, u32 sge,
1338 u32 inline_data, u8 *shift)
1341 if (uk_attrs->hw_rev >= IRDMA_GEN_2) {
1342 if (sge > 1 || inline_data > 8) {
1343 if (sge < 4 && inline_data <= 39)
1345 else if (sge < 8 && inline_data <= 101)
1350 } else if (sge > 1 || inline_data > 16) {
1351 *shift = (sge < 4 && inline_data <= 48) ? 1 : 2;
1356 * irdma_get_sqdepth - get SQ depth (quanta)
1357 * @uk_attrs: qp HW attributes
1359 * @shift: shift which determines size of WQE
1360 * @sqdepth: depth of SQ
1363 enum irdma_status_code irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs,
1364 u32 sq_size, u8 shift, u32 *sqdepth)
1366 *sqdepth = irdma_qp_round_up((sq_size << shift) + IRDMA_SQ_RSVD);
1368 if (*sqdepth < (IRDMA_QP_SW_MIN_WQSIZE << shift))
1369 *sqdepth = IRDMA_QP_SW_MIN_WQSIZE << shift;
1370 else if (*sqdepth > uk_attrs->max_hw_wq_quanta)
1371 return IRDMA_ERR_INVALID_SIZE;
1377 * irdma_get_rqdepth - get RQ depth (quanta)
1378 * @uk_attrs: qp HW attributes
1380 * @shift: shift which determines size of WQE
1381 * @rqdepth: depth of RQ
1383 enum irdma_status_code irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs,
1384 u32 rq_size, u8 shift, u32 *rqdepth)
1386 *rqdepth = irdma_qp_round_up((rq_size << shift) + IRDMA_RQ_RSVD);
1388 if (*rqdepth < (IRDMA_QP_SW_MIN_WQSIZE << shift))
1389 *rqdepth = IRDMA_QP_SW_MIN_WQSIZE << shift;
1390 else if (*rqdepth > uk_attrs->max_hw_rq_quanta)
1391 return IRDMA_ERR_INVALID_SIZE;
1396 static const struct irdma_wqe_uk_ops iw_wqe_uk_ops = {
1397 .iw_copy_inline_data = irdma_copy_inline_data,
1398 .iw_inline_data_size_to_quanta = irdma_inline_data_size_to_quanta,
1399 .iw_set_fragment = irdma_set_fragment,
1400 .iw_set_mw_bind_wqe = irdma_set_mw_bind_wqe,
1403 static const struct irdma_wqe_uk_ops iw_wqe_uk_ops_gen_1 = {
1404 .iw_copy_inline_data = irdma_copy_inline_data_gen_1,
1405 .iw_inline_data_size_to_quanta = irdma_inline_data_size_to_quanta_gen_1,
1406 .iw_set_fragment = irdma_set_fragment_gen_1,
1407 .iw_set_mw_bind_wqe = irdma_set_mw_bind_wqe_gen_1,
1411 * irdma_setup_connection_wqes - setup WQEs necessary to complete
1413 * @qp: hw qp (user and kernel)
1414 * @info: qp initialization info
1416 static void irdma_setup_connection_wqes(struct irdma_qp_uk *qp,
1417 struct irdma_qp_uk_init_info *info)
1421 if (!info->legacy_mode &&
1422 (qp->uk_attrs->feature_flags & IRDMA_FEATURE_RTS_AE))
1425 qp->conn_wqes = move_cnt;
1426 IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->sq_ring, move_cnt);
1427 IRDMA_RING_MOVE_TAIL_BY_COUNT(qp->sq_ring, move_cnt);
1428 IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->initial_ring, move_cnt);
1432 * irdma_uk_qp_init - initialize shared qp
1433 * @qp: hw qp (user and kernel)
1434 * @info: qp initialization info
1436 * initializes the vars used in both user and kernel mode.
1437 * size of the wqe depends on numbers of max. fragements
1438 * allowed. Then size of wqe * the number of wqes should be the
1439 * amount of memory allocated for sq and rq.
1441 enum irdma_status_code irdma_uk_qp_init(struct irdma_qp_uk *qp,
1442 struct irdma_qp_uk_init_info *info)
1444 enum irdma_status_code ret_code = 0;
1446 u8 sqshift, rqshift;
1448 qp->uk_attrs = info->uk_attrs;
1449 if (info->max_sq_frag_cnt > qp->uk_attrs->max_hw_wq_frags ||
1450 info->max_rq_frag_cnt > qp->uk_attrs->max_hw_wq_frags)
1451 return IRDMA_ERR_INVALID_FRAG_COUNT;
1453 irdma_get_wqe_shift(qp->uk_attrs, info->max_rq_frag_cnt, 0, &rqshift);
1454 if (qp->uk_attrs->hw_rev == IRDMA_GEN_1) {
1455 irdma_get_wqe_shift(qp->uk_attrs, info->max_sq_frag_cnt,
1456 info->max_inline_data, &sqshift);
1457 if (info->abi_ver > 4)
1458 rqshift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
1460 irdma_get_wqe_shift(qp->uk_attrs, info->max_sq_frag_cnt + 1,
1461 info->max_inline_data, &sqshift);
1463 qp->qp_caps = info->qp_caps;
1464 qp->sq_base = info->sq;
1465 qp->rq_base = info->rq;
1466 qp->qp_type = info->type ? info->type : IRDMA_QP_TYPE_IWARP;
1467 qp->shadow_area = info->shadow_area;
1468 qp->sq_wrtrk_array = info->sq_wrtrk_array;
1470 qp->rq_wrid_array = info->rq_wrid_array;
1471 qp->wqe_alloc_db = info->wqe_alloc_db;
1472 qp->qp_id = info->qp_id;
1473 qp->sq_size = info->sq_size;
1474 qp->push_mode = false;
1475 qp->max_sq_frag_cnt = info->max_sq_frag_cnt;
1476 sq_ring_size = qp->sq_size << sqshift;
1477 IRDMA_RING_INIT(qp->sq_ring, sq_ring_size);
1478 IRDMA_RING_INIT(qp->initial_ring, sq_ring_size);
1479 if (info->first_sq_wq) {
1480 irdma_setup_connection_wqes(qp, info);
1481 qp->swqe_polarity = 1;
1482 qp->first_sq_wq = true;
1484 qp->swqe_polarity = 0;
1486 qp->swqe_polarity_deferred = 1;
1487 qp->rwqe_polarity = 0;
1488 qp->rq_size = info->rq_size;
1489 qp->max_rq_frag_cnt = info->max_rq_frag_cnt;
1490 qp->max_inline_data = info->max_inline_data;
1491 qp->rq_wqe_size = rqshift;
1492 IRDMA_RING_INIT(qp->rq_ring, qp->rq_size);
1493 qp->rq_wqe_size_multiplier = 1 << rqshift;
1494 if (qp->uk_attrs->hw_rev == IRDMA_GEN_1)
1495 qp->wqe_ops = iw_wqe_uk_ops_gen_1;
1497 qp->wqe_ops = iw_wqe_uk_ops;
1502 * irdma_uk_cq_init - initialize shared cq (user and kernel)
1504 * @info: hw cq initialization info
1506 enum irdma_status_code irdma_uk_cq_init(struct irdma_cq_uk *cq,
1507 struct irdma_cq_uk_init_info *info)
1509 cq->cq_base = info->cq_base;
1510 cq->cq_id = info->cq_id;
1511 cq->cq_size = info->cq_size;
1512 cq->cqe_alloc_db = info->cqe_alloc_db;
1513 cq->cq_ack_db = info->cq_ack_db;
1514 cq->shadow_area = info->shadow_area;
1515 cq->avoid_mem_cflct = info->avoid_mem_cflct;
1516 IRDMA_RING_INIT(cq->cq_ring, cq->cq_size);
1523 * irdma_uk_clean_cq - clean cq entries
1524 * @q: completion context
1527 void irdma_uk_clean_cq(void *q, struct irdma_cq_uk *cq)
1530 u64 qword3, comp_ctx;
1534 cq_head = cq->cq_ring.head;
1535 temp = cq->polarity;
1537 if (cq->avoid_mem_cflct)
1538 cqe = ((struct irdma_extended_cqe *)(cq->cq_base))[cq_head].buf;
1540 cqe = cq->cq_base[cq_head].buf;
1541 get_64bit_val(cqe, 24, &qword3);
1542 polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
1544 if (polarity != temp)
1547 get_64bit_val(cqe, 8, &comp_ctx);
1548 if ((void *)(unsigned long)comp_ctx == q)
1549 set_64bit_val(cqe, 8, 0);
1551 cq_head = (cq_head + 1) % cq->cq_ring.size;
1558 * irdma_nop - post a nop
1560 * @wr_id: work request id
1561 * @signaled: signaled for completion
1562 * @post_sq: ring doorbell
1564 enum irdma_status_code irdma_nop(struct irdma_qp_uk *qp, u64 wr_id,
1565 bool signaled, bool post_sq)
1570 struct irdma_post_sq_info info = {};
1572 info.push_wqe = false;
1574 wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, IRDMA_QP_WQE_MIN_QUANTA,
1577 return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
1579 irdma_clr_wqes(qp, wqe_idx);
1581 set_64bit_val(wqe, 0, 0);
1582 set_64bit_val(wqe, 8, 0);
1583 set_64bit_val(wqe, 16, 0);
1585 hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_NOP) |
1586 FIELD_PREP(IRDMAQPSQ_SIGCOMPL, signaled) |
1587 FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
1589 dma_wmb(); /* make sure WQE is populated before valid bit is set */
1591 set_64bit_val(wqe, 24, hdr);
1593 irdma_uk_qp_post_wr(qp);
1599 * irdma_fragcnt_to_quanta_sq - calculate quanta based on fragment count for SQ
1600 * @frag_cnt: number of fragments
1601 * @quanta: quanta for frag_cnt
1603 enum irdma_status_code irdma_fragcnt_to_quanta_sq(u32 frag_cnt, u16 *quanta)
1608 *quanta = IRDMA_QP_WQE_MIN_QUANTA;
1635 case 15: /* when immediate data is present */
1639 return IRDMA_ERR_INVALID_FRAG_COUNT;
1646 * irdma_fragcnt_to_wqesize_rq - calculate wqe size based on fragment count for RQ
1647 * @frag_cnt: number of fragments
1648 * @wqe_size: size in bytes given frag_cnt
1650 enum irdma_status_code irdma_fragcnt_to_wqesize_rq(u32 frag_cnt, u16 *wqe_size)
1677 return IRDMA_ERR_INVALID_FRAG_COUNT;