Merge tag 'renesas-dt-bindings-for-v5.15-tag1' of git://git.kernel.org/pub/scm/linux...
[linux-2.6-microblaze.git] / drivers / infiniband / hw / irdma / uk.c
1 // SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
2 /* Copyright (c) 2015 - 2021 Intel Corporation */
3 #include "osdep.h"
4 #include "status.h"
5 #include "defs.h"
6 #include "user.h"
7 #include "irdma.h"
8
9 /**
10  * irdma_set_fragment - set fragment in wqe
11  * @wqe: wqe for setting fragment
12  * @offset: offset value
13  * @sge: sge length and stag
14  * @valid: The wqe valid
15  */
16 static void irdma_set_fragment(__le64 *wqe, u32 offset, struct irdma_sge *sge,
17                                u8 valid)
18 {
19         if (sge) {
20                 set_64bit_val(wqe, offset,
21                               FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->tag_off));
22                 set_64bit_val(wqe, offset + 8,
23                               FIELD_PREP(IRDMAQPSQ_VALID, valid) |
24                               FIELD_PREP(IRDMAQPSQ_FRAG_LEN, sge->len) |
25                               FIELD_PREP(IRDMAQPSQ_FRAG_STAG, sge->stag));
26         } else {
27                 set_64bit_val(wqe, offset, 0);
28                 set_64bit_val(wqe, offset + 8,
29                               FIELD_PREP(IRDMAQPSQ_VALID, valid));
30         }
31 }
32
33 /**
34  * irdma_set_fragment_gen_1 - set fragment in wqe
35  * @wqe: wqe for setting fragment
36  * @offset: offset value
37  * @sge: sge length and stag
38  * @valid: wqe valid flag
39  */
40 static void irdma_set_fragment_gen_1(__le64 *wqe, u32 offset,
41                                      struct irdma_sge *sge, u8 valid)
42 {
43         if (sge) {
44                 set_64bit_val(wqe, offset,
45                               FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->tag_off));
46                 set_64bit_val(wqe, offset + 8,
47                               FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, sge->len) |
48                               FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_STAG, sge->stag));
49         } else {
50                 set_64bit_val(wqe, offset, 0);
51                 set_64bit_val(wqe, offset + 8, 0);
52         }
53 }
54
55 /**
56  * irdma_nop_1 - insert a NOP wqe
57  * @qp: hw qp ptr
58  */
59 static enum irdma_status_code irdma_nop_1(struct irdma_qp_uk *qp)
60 {
61         u64 hdr;
62         __le64 *wqe;
63         u32 wqe_idx;
64         bool signaled = false;
65
66         if (!qp->sq_ring.head)
67                 return IRDMA_ERR_PARAM;
68
69         wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
70         wqe = qp->sq_base[wqe_idx].elem;
71
72         qp->sq_wrtrk_array[wqe_idx].quanta = IRDMA_QP_WQE_MIN_QUANTA;
73
74         set_64bit_val(wqe, 0, 0);
75         set_64bit_val(wqe, 8, 0);
76         set_64bit_val(wqe, 16, 0);
77
78         hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_NOP) |
79               FIELD_PREP(IRDMAQPSQ_SIGCOMPL, signaled) |
80               FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
81
82         /* make sure WQE is written before valid bit is set */
83         dma_wmb();
84
85         set_64bit_val(wqe, 24, hdr);
86
87         return 0;
88 }
89
90 /**
91  * irdma_clr_wqes - clear next 128 sq entries
92  * @qp: hw qp ptr
93  * @qp_wqe_idx: wqe_idx
94  */
95 void irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx)
96 {
97         __le64 *wqe;
98         u32 wqe_idx;
99
100         if (!(qp_wqe_idx & 0x7F)) {
101                 wqe_idx = (qp_wqe_idx + 128) % qp->sq_ring.size;
102                 wqe = qp->sq_base[wqe_idx].elem;
103                 if (wqe_idx)
104                         memset(wqe, qp->swqe_polarity ? 0 : 0xFF, 0x1000);
105                 else
106                         memset(wqe, qp->swqe_polarity ? 0xFF : 0, 0x1000);
107         }
108 }
109
110 /**
111  * irdma_uk_qp_post_wr - ring doorbell
112  * @qp: hw qp ptr
113  */
114 void irdma_uk_qp_post_wr(struct irdma_qp_uk *qp)
115 {
116         u64 temp;
117         u32 hw_sq_tail;
118         u32 sw_sq_head;
119
120         /* valid bit is written and loads completed before reading shadow */
121         mb();
122
123         /* read the doorbell shadow area */
124         get_64bit_val(qp->shadow_area, 0, &temp);
125
126         hw_sq_tail = (u32)FIELD_GET(IRDMA_QP_DBSA_HW_SQ_TAIL, temp);
127         sw_sq_head = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
128         if (sw_sq_head != qp->initial_ring.head) {
129                 if (qp->push_dropped) {
130                         writel(qp->qp_id, qp->wqe_alloc_db);
131                         qp->push_dropped = false;
132                 } else if (sw_sq_head != hw_sq_tail) {
133                         if (sw_sq_head > qp->initial_ring.head) {
134                                 if (hw_sq_tail >= qp->initial_ring.head &&
135                                     hw_sq_tail < sw_sq_head)
136                                         writel(qp->qp_id, qp->wqe_alloc_db);
137                         } else {
138                                 if (hw_sq_tail >= qp->initial_ring.head ||
139                                     hw_sq_tail < sw_sq_head)
140                                         writel(qp->qp_id, qp->wqe_alloc_db);
141                         }
142                 }
143         }
144
145         qp->initial_ring.head = qp->sq_ring.head;
146 }
147
148 /**
149  * irdma_qp_ring_push_db -  ring qp doorbell
150  * @qp: hw qp ptr
151  * @wqe_idx: wqe index
152  */
153 static void irdma_qp_ring_push_db(struct irdma_qp_uk *qp, u32 wqe_idx)
154 {
155         set_32bit_val(qp->push_db, 0,
156                       FIELD_PREP(IRDMA_WQEALLOC_WQE_DESC_INDEX, wqe_idx >> 3) | qp->qp_id);
157         qp->initial_ring.head = qp->sq_ring.head;
158         qp->push_mode = true;
159         qp->push_dropped = false;
160 }
161
162 void irdma_qp_push_wqe(struct irdma_qp_uk *qp, __le64 *wqe, u16 quanta,
163                        u32 wqe_idx, bool post_sq)
164 {
165         __le64 *push;
166
167         if (IRDMA_RING_CURRENT_HEAD(qp->initial_ring) !=
168                     IRDMA_RING_CURRENT_TAIL(qp->sq_ring) &&
169             !qp->push_mode) {
170                 if (post_sq)
171                         irdma_uk_qp_post_wr(qp);
172         } else {
173                 push = (__le64 *)((uintptr_t)qp->push_wqe +
174                                   (wqe_idx & 0x7) * 0x20);
175                 memcpy(push, wqe, quanta * IRDMA_QP_WQE_MIN_SIZE);
176                 irdma_qp_ring_push_db(qp, wqe_idx);
177         }
178 }
179
180 /**
181  * irdma_qp_get_next_send_wqe - pad with NOP if needed, return where next WR should go
182  * @qp: hw qp ptr
183  * @wqe_idx: return wqe index
184  * @quanta: size of WR in quanta
185  * @total_size: size of WR in bytes
186  * @info: info on WR
187  */
188 __le64 *irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx,
189                                    u16 quanta, u32 total_size,
190                                    struct irdma_post_sq_info *info)
191 {
192         __le64 *wqe;
193         __le64 *wqe_0 = NULL;
194         u32 nop_wqe_idx;
195         u16 avail_quanta;
196         u16 i;
197
198         avail_quanta = qp->uk_attrs->max_hw_sq_chunk -
199                        (IRDMA_RING_CURRENT_HEAD(qp->sq_ring) %
200                        qp->uk_attrs->max_hw_sq_chunk);
201         if (quanta <= avail_quanta) {
202                 /* WR fits in current chunk */
203                 if (quanta > IRDMA_SQ_RING_FREE_QUANTA(qp->sq_ring))
204                         return NULL;
205         } else {
206                 /* Need to pad with NOP */
207                 if (quanta + avail_quanta >
208                         IRDMA_SQ_RING_FREE_QUANTA(qp->sq_ring))
209                         return NULL;
210
211                 nop_wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
212                 for (i = 0; i < avail_quanta; i++) {
213                         irdma_nop_1(qp);
214                         IRDMA_RING_MOVE_HEAD_NOCHECK(qp->sq_ring);
215                 }
216                 if (qp->push_db && info->push_wqe)
217                         irdma_qp_push_wqe(qp, qp->sq_base[nop_wqe_idx].elem,
218                                           avail_quanta, nop_wqe_idx, true);
219         }
220
221         *wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
222         if (!*wqe_idx)
223                 qp->swqe_polarity = !qp->swqe_polarity;
224
225         IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->sq_ring, quanta);
226
227         wqe = qp->sq_base[*wqe_idx].elem;
228         if (qp->uk_attrs->hw_rev == IRDMA_GEN_1 && quanta == 1 &&
229             (IRDMA_RING_CURRENT_HEAD(qp->sq_ring) & 1)) {
230                 wqe_0 = qp->sq_base[IRDMA_RING_CURRENT_HEAD(qp->sq_ring)].elem;
231                 wqe_0[3] = cpu_to_le64(FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity ? 0 : 1));
232         }
233         qp->sq_wrtrk_array[*wqe_idx].wrid = info->wr_id;
234         qp->sq_wrtrk_array[*wqe_idx].wr_len = total_size;
235         qp->sq_wrtrk_array[*wqe_idx].quanta = quanta;
236
237         return wqe;
238 }
239
240 /**
241  * irdma_qp_get_next_recv_wqe - get next qp's rcv wqe
242  * @qp: hw qp ptr
243  * @wqe_idx: return wqe index
244  */
245 __le64 *irdma_qp_get_next_recv_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx)
246 {
247         __le64 *wqe;
248         enum irdma_status_code ret_code;
249
250         if (IRDMA_RING_FULL_ERR(qp->rq_ring))
251                 return NULL;
252
253         IRDMA_ATOMIC_RING_MOVE_HEAD(qp->rq_ring, *wqe_idx, ret_code);
254         if (ret_code)
255                 return NULL;
256
257         if (!*wqe_idx)
258                 qp->rwqe_polarity = !qp->rwqe_polarity;
259         /* rq_wqe_size_multiplier is no of 32 byte quanta in one rq wqe */
260         wqe = qp->rq_base[*wqe_idx * qp->rq_wqe_size_multiplier].elem;
261
262         return wqe;
263 }
264
265 /**
266  * irdma_uk_rdma_write - rdma write operation
267  * @qp: hw qp ptr
268  * @info: post sq information
269  * @post_sq: flag to post sq
270  */
271 enum irdma_status_code irdma_uk_rdma_write(struct irdma_qp_uk *qp,
272                                            struct irdma_post_sq_info *info,
273                                            bool post_sq)
274 {
275         u64 hdr;
276         __le64 *wqe;
277         struct irdma_rdma_write *op_info;
278         u32 i, wqe_idx;
279         u32 total_size = 0, byte_off;
280         enum irdma_status_code ret_code;
281         u32 frag_cnt, addl_frag_cnt;
282         bool read_fence = false;
283         u16 quanta;
284
285         info->push_wqe = qp->push_db ? true : false;
286
287         op_info = &info->op.rdma_write;
288         if (op_info->num_lo_sges > qp->max_sq_frag_cnt)
289                 return IRDMA_ERR_INVALID_FRAG_COUNT;
290
291         for (i = 0; i < op_info->num_lo_sges; i++)
292                 total_size += op_info->lo_sg_list[i].len;
293
294         read_fence |= info->read_fence;
295
296         if (info->imm_data_valid)
297                 frag_cnt = op_info->num_lo_sges + 1;
298         else
299                 frag_cnt = op_info->num_lo_sges;
300         addl_frag_cnt = frag_cnt > 1 ? (frag_cnt - 1) : 0;
301         ret_code = irdma_fragcnt_to_quanta_sq(frag_cnt, &quanta);
302         if (ret_code)
303                 return ret_code;
304
305         wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
306                                          info);
307         if (!wqe)
308                 return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
309
310         irdma_clr_wqes(qp, wqe_idx);
311
312         set_64bit_val(wqe, 16,
313                       FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.tag_off));
314
315         if (info->imm_data_valid) {
316                 set_64bit_val(wqe, 0,
317                               FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
318                 i = 0;
319         } else {
320                 qp->wqe_ops.iw_set_fragment(wqe, 0,
321                                             op_info->lo_sg_list,
322                                             qp->swqe_polarity);
323                 i = 1;
324         }
325
326         for (byte_off = 32; i < op_info->num_lo_sges; i++) {
327                 qp->wqe_ops.iw_set_fragment(wqe, byte_off,
328                                             &op_info->lo_sg_list[i],
329                                             qp->swqe_polarity);
330                 byte_off += 16;
331         }
332
333         /* if not an odd number set valid bit in next fragment */
334         if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(frag_cnt & 0x01) &&
335             frag_cnt) {
336                 qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
337                                             qp->swqe_polarity);
338                 if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
339                         ++addl_frag_cnt;
340         }
341
342         hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.stag) |
343               FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
344               FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, info->imm_data_valid) |
345               FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt) |
346               FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
347               FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
348               FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
349               FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
350               FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
351               FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
352
353         dma_wmb(); /* make sure WQE is populated before valid bit is set */
354
355         set_64bit_val(wqe, 24, hdr);
356         if (info->push_wqe) {
357                 irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
358         } else {
359                 if (post_sq)
360                         irdma_uk_qp_post_wr(qp);
361         }
362
363         return 0;
364 }
365
366 /**
367  * irdma_uk_rdma_read - rdma read command
368  * @qp: hw qp ptr
369  * @info: post sq information
370  * @inv_stag: flag for inv_stag
371  * @post_sq: flag to post sq
372  */
373 enum irdma_status_code irdma_uk_rdma_read(struct irdma_qp_uk *qp,
374                                           struct irdma_post_sq_info *info,
375                                           bool inv_stag, bool post_sq)
376 {
377         struct irdma_rdma_read *op_info;
378         enum irdma_status_code ret_code;
379         u32 i, byte_off, total_size = 0;
380         bool local_fence = false;
381         u32 addl_frag_cnt;
382         __le64 *wqe;
383         u32 wqe_idx;
384         u16 quanta;
385         u64 hdr;
386
387         info->push_wqe = qp->push_db ? true : false;
388
389         op_info = &info->op.rdma_read;
390         if (qp->max_sq_frag_cnt < op_info->num_lo_sges)
391                 return IRDMA_ERR_INVALID_FRAG_COUNT;
392
393         for (i = 0; i < op_info->num_lo_sges; i++)
394                 total_size += op_info->lo_sg_list[i].len;
395
396         ret_code = irdma_fragcnt_to_quanta_sq(op_info->num_lo_sges, &quanta);
397         if (ret_code)
398                 return ret_code;
399
400         wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
401                                          info);
402         if (!wqe)
403                 return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
404
405         irdma_clr_wqes(qp, wqe_idx);
406
407         addl_frag_cnt = op_info->num_lo_sges > 1 ?
408                         (op_info->num_lo_sges - 1) : 0;
409         local_fence |= info->local_fence;
410
411         qp->wqe_ops.iw_set_fragment(wqe, 0, op_info->lo_sg_list,
412                                     qp->swqe_polarity);
413         for (i = 1, byte_off = 32; i < op_info->num_lo_sges; ++i) {
414                 qp->wqe_ops.iw_set_fragment(wqe, byte_off,
415                                             &op_info->lo_sg_list[i],
416                                             qp->swqe_polarity);
417                 byte_off += 16;
418         }
419
420         /* if not an odd number set valid bit in next fragment */
421         if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 &&
422             !(op_info->num_lo_sges & 0x01) && op_info->num_lo_sges) {
423                 qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
424                                             qp->swqe_polarity);
425                 if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
426                         ++addl_frag_cnt;
427         }
428         set_64bit_val(wqe, 16,
429                       FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.tag_off));
430         hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.stag) |
431               FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
432               FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
433               FIELD_PREP(IRDMAQPSQ_OPCODE,
434                          (inv_stag ? IRDMAQP_OP_RDMA_READ_LOC_INV : IRDMAQP_OP_RDMA_READ)) |
435               FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
436               FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
437               FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) |
438               FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
439               FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
440
441         dma_wmb(); /* make sure WQE is populated before valid bit is set */
442
443         set_64bit_val(wqe, 24, hdr);
444         if (info->push_wqe) {
445                 irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
446         } else {
447                 if (post_sq)
448                         irdma_uk_qp_post_wr(qp);
449         }
450
451         return 0;
452 }
453
454 /**
455  * irdma_uk_send - rdma send command
456  * @qp: hw qp ptr
457  * @info: post sq information
458  * @post_sq: flag to post sq
459  */
460 enum irdma_status_code irdma_uk_send(struct irdma_qp_uk *qp,
461                                      struct irdma_post_sq_info *info,
462                                      bool post_sq)
463 {
464         __le64 *wqe;
465         struct irdma_post_send *op_info;
466         u64 hdr;
467         u32 i, wqe_idx, total_size = 0, byte_off;
468         enum irdma_status_code ret_code;
469         u32 frag_cnt, addl_frag_cnt;
470         bool read_fence = false;
471         u16 quanta;
472
473         info->push_wqe = qp->push_db ? true : false;
474
475         op_info = &info->op.send;
476         if (qp->max_sq_frag_cnt < op_info->num_sges)
477                 return IRDMA_ERR_INVALID_FRAG_COUNT;
478
479         for (i = 0; i < op_info->num_sges; i++)
480                 total_size += op_info->sg_list[i].len;
481
482         if (info->imm_data_valid)
483                 frag_cnt = op_info->num_sges + 1;
484         else
485                 frag_cnt = op_info->num_sges;
486         ret_code = irdma_fragcnt_to_quanta_sq(frag_cnt, &quanta);
487         if (ret_code)
488                 return ret_code;
489
490         wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
491                                          info);
492         if (!wqe)
493                 return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
494
495         irdma_clr_wqes(qp, wqe_idx);
496
497         read_fence |= info->read_fence;
498         addl_frag_cnt = frag_cnt > 1 ? (frag_cnt - 1) : 0;
499         if (info->imm_data_valid) {
500                 set_64bit_val(wqe, 0,
501                               FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
502                 i = 0;
503         } else {
504                 qp->wqe_ops.iw_set_fragment(wqe, 0, op_info->sg_list,
505                                             qp->swqe_polarity);
506                 i = 1;
507         }
508
509         for (byte_off = 32; i < op_info->num_sges; i++) {
510                 qp->wqe_ops.iw_set_fragment(wqe, byte_off, &op_info->sg_list[i],
511                                             qp->swqe_polarity);
512                 byte_off += 16;
513         }
514
515         /* if not an odd number set valid bit in next fragment */
516         if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(frag_cnt & 0x01) &&
517             frag_cnt) {
518                 qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
519                                             qp->swqe_polarity);
520                 if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
521                         ++addl_frag_cnt;
522         }
523
524         set_64bit_val(wqe, 16,
525                       FIELD_PREP(IRDMAQPSQ_DESTQKEY, op_info->qkey) |
526                       FIELD_PREP(IRDMAQPSQ_DESTQPN, op_info->dest_qp));
527         hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, info->stag_to_inv) |
528               FIELD_PREP(IRDMAQPSQ_AHID, op_info->ah_id) |
529               FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG,
530                          (info->imm_data_valid ? 1 : 0)) |
531               FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
532               FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
533               FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
534               FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
535               FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
536               FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
537               FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
538               FIELD_PREP(IRDMAQPSQ_UDPHEADER, info->udp_hdr) |
539               FIELD_PREP(IRDMAQPSQ_L4LEN, info->l4len) |
540               FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
541
542         dma_wmb(); /* make sure WQE is populated before valid bit is set */
543
544         set_64bit_val(wqe, 24, hdr);
545         if (info->push_wqe) {
546                 irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
547         } else {
548                 if (post_sq)
549                         irdma_uk_qp_post_wr(qp);
550         }
551
552         return 0;
553 }
554
555 /**
556  * irdma_set_mw_bind_wqe_gen_1 - set mw bind wqe
557  * @wqe: wqe for setting fragment
558  * @op_info: info for setting bind wqe values
559  */
560 static void irdma_set_mw_bind_wqe_gen_1(__le64 *wqe,
561                                         struct irdma_bind_window *op_info)
562 {
563         set_64bit_val(wqe, 0, (uintptr_t)op_info->va);
564         set_64bit_val(wqe, 8,
565                       FIELD_PREP(IRDMAQPSQ_PARENTMRSTAG, op_info->mw_stag) |
566                       FIELD_PREP(IRDMAQPSQ_MWSTAG, op_info->mr_stag));
567         set_64bit_val(wqe, 16, op_info->bind_len);
568 }
569
570 /**
571  * irdma_copy_inline_data_gen_1 - Copy inline data to wqe
572  * @dest: pointer to wqe
573  * @src: pointer to inline data
574  * @len: length of inline data to copy
575  * @polarity: compatibility parameter
576  */
577 static void irdma_copy_inline_data_gen_1(u8 *dest, u8 *src, u32 len,
578                                          u8 polarity)
579 {
580         if (len <= 16) {
581                 memcpy(dest, src, len);
582         } else {
583                 memcpy(dest, src, 16);
584                 src += 16;
585                 dest = dest + 32;
586                 memcpy(dest, src, len - 16);
587         }
588 }
589
590 /**
591  * irdma_inline_data_size_to_quanta_gen_1 - based on inline data, quanta
592  * @data_size: data size for inline
593  *
594  * Gets the quanta based on inline and immediate data.
595  */
596 static inline u16 irdma_inline_data_size_to_quanta_gen_1(u32 data_size)
597 {
598         return data_size <= 16 ? IRDMA_QP_WQE_MIN_QUANTA : 2;
599 }
600
601 /**
602  * irdma_set_mw_bind_wqe - set mw bind in wqe
603  * @wqe: wqe for setting mw bind
604  * @op_info: info for setting wqe values
605  */
606 static void irdma_set_mw_bind_wqe(__le64 *wqe,
607                                   struct irdma_bind_window *op_info)
608 {
609         set_64bit_val(wqe, 0, (uintptr_t)op_info->va);
610         set_64bit_val(wqe, 8,
611                       FIELD_PREP(IRDMAQPSQ_PARENTMRSTAG, op_info->mr_stag) |
612                       FIELD_PREP(IRDMAQPSQ_MWSTAG, op_info->mw_stag));
613         set_64bit_val(wqe, 16, op_info->bind_len);
614 }
615
616 /**
617  * irdma_copy_inline_data - Copy inline data to wqe
618  * @dest: pointer to wqe
619  * @src: pointer to inline data
620  * @len: length of inline data to copy
621  * @polarity: polarity of wqe valid bit
622  */
623 static void irdma_copy_inline_data(u8 *dest, u8 *src, u32 len, u8 polarity)
624 {
625         u8 inline_valid = polarity << IRDMA_INLINE_VALID_S;
626         u32 copy_size;
627
628         dest += 8;
629         if (len <= 8) {
630                 memcpy(dest, src, len);
631                 return;
632         }
633
634         *((u64 *)dest) = *((u64 *)src);
635         len -= 8;
636         src += 8;
637         dest += 24; /* point to additional 32 byte quanta */
638
639         while (len) {
640                 copy_size = len < 31 ? len : 31;
641                 memcpy(dest, src, copy_size);
642                 *(dest + 31) = inline_valid;
643                 len -= copy_size;
644                 dest += 32;
645                 src += copy_size;
646         }
647 }
648
649 /**
650  * irdma_inline_data_size_to_quanta - based on inline data, quanta
651  * @data_size: data size for inline
652  *
653  * Gets the quanta based on inline and immediate data.
654  */
655 static u16 irdma_inline_data_size_to_quanta(u32 data_size)
656 {
657         if (data_size <= 8)
658                 return IRDMA_QP_WQE_MIN_QUANTA;
659         else if (data_size <= 39)
660                 return 2;
661         else if (data_size <= 70)
662                 return 3;
663         else if (data_size <= 101)
664                 return 4;
665         else if (data_size <= 132)
666                 return 5;
667         else if (data_size <= 163)
668                 return 6;
669         else if (data_size <= 194)
670                 return 7;
671         else
672                 return 8;
673 }
674
675 /**
676  * irdma_uk_inline_rdma_write - inline rdma write operation
677  * @qp: hw qp ptr
678  * @info: post sq information
679  * @post_sq: flag to post sq
680  */
681 enum irdma_status_code
682 irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
683                            bool post_sq)
684 {
685         __le64 *wqe;
686         struct irdma_inline_rdma_write *op_info;
687         u64 hdr = 0;
688         u32 wqe_idx;
689         bool read_fence = false;
690         u16 quanta;
691
692         info->push_wqe = qp->push_db ? true : false;
693         op_info = &info->op.inline_rdma_write;
694
695         if (op_info->len > qp->max_inline_data)
696                 return IRDMA_ERR_INVALID_INLINE_DATA_SIZE;
697
698         quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(op_info->len);
699         wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, op_info->len,
700                                          info);
701         if (!wqe)
702                 return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
703
704         irdma_clr_wqes(qp, wqe_idx);
705
706         read_fence |= info->read_fence;
707         set_64bit_val(wqe, 16,
708                       FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.tag_off));
709
710         hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.stag) |
711               FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
712               FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, op_info->len) |
713               FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt ? 1 : 0) |
714               FIELD_PREP(IRDMAQPSQ_INLINEDATAFLAG, 1) |
715               FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, info->imm_data_valid ? 1 : 0) |
716               FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe ? 1 : 0) |
717               FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
718               FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
719               FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
720               FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
721
722         if (info->imm_data_valid)
723                 set_64bit_val(wqe, 0,
724                               FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
725
726         qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->data, op_info->len,
727                                         qp->swqe_polarity);
728         dma_wmb(); /* make sure WQE is populated before valid bit is set */
729
730         set_64bit_val(wqe, 24, hdr);
731
732         if (info->push_wqe) {
733                 irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
734         } else {
735                 if (post_sq)
736                         irdma_uk_qp_post_wr(qp);
737         }
738
739         return 0;
740 }
741
742 /**
743  * irdma_uk_inline_send - inline send operation
744  * @qp: hw qp ptr
745  * @info: post sq information
746  * @post_sq: flag to post sq
747  */
748 enum irdma_status_code irdma_uk_inline_send(struct irdma_qp_uk *qp,
749                                             struct irdma_post_sq_info *info,
750                                             bool post_sq)
751 {
752         __le64 *wqe;
753         struct irdma_post_inline_send *op_info;
754         u64 hdr;
755         u32 wqe_idx;
756         bool read_fence = false;
757         u16 quanta;
758
759         info->push_wqe = qp->push_db ? true : false;
760         op_info = &info->op.inline_send;
761
762         if (op_info->len > qp->max_inline_data)
763                 return IRDMA_ERR_INVALID_INLINE_DATA_SIZE;
764
765         quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(op_info->len);
766         wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, op_info->len,
767                                          info);
768         if (!wqe)
769                 return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
770
771         irdma_clr_wqes(qp, wqe_idx);
772
773         set_64bit_val(wqe, 16,
774                       FIELD_PREP(IRDMAQPSQ_DESTQKEY, op_info->qkey) |
775                       FIELD_PREP(IRDMAQPSQ_DESTQPN, op_info->dest_qp));
776
777         read_fence |= info->read_fence;
778         hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, info->stag_to_inv) |
779               FIELD_PREP(IRDMAQPSQ_AHID, op_info->ah_id) |
780               FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
781               FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, op_info->len) |
782               FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG,
783                          (info->imm_data_valid ? 1 : 0)) |
784               FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
785               FIELD_PREP(IRDMAQPSQ_INLINEDATAFLAG, 1) |
786               FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
787               FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
788               FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
789               FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
790               FIELD_PREP(IRDMAQPSQ_UDPHEADER, info->udp_hdr) |
791               FIELD_PREP(IRDMAQPSQ_L4LEN, info->l4len) |
792               FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
793
794         if (info->imm_data_valid)
795                 set_64bit_val(wqe, 0,
796                               FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
797         qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->data, op_info->len,
798                                         qp->swqe_polarity);
799
800         dma_wmb(); /* make sure WQE is populated before valid bit is set */
801
802         set_64bit_val(wqe, 24, hdr);
803
804         if (info->push_wqe) {
805                 irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
806         } else {
807                 if (post_sq)
808                         irdma_uk_qp_post_wr(qp);
809         }
810
811         return 0;
812 }
813
814 /**
815  * irdma_uk_stag_local_invalidate - stag invalidate operation
816  * @qp: hw qp ptr
817  * @info: post sq information
818  * @post_sq: flag to post sq
819  */
820 enum irdma_status_code
821 irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
822                                struct irdma_post_sq_info *info, bool post_sq)
823 {
824         __le64 *wqe;
825         struct irdma_inv_local_stag *op_info;
826         u64 hdr;
827         u32 wqe_idx;
828         bool local_fence = false;
829         struct irdma_sge sge = {};
830
831         info->push_wqe = qp->push_db ? true : false;
832         op_info = &info->op.inv_local_stag;
833         local_fence = info->local_fence;
834
835         wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, IRDMA_QP_WQE_MIN_QUANTA,
836                                          0, info);
837         if (!wqe)
838                 return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
839
840         irdma_clr_wqes(qp, wqe_idx);
841
842         sge.stag = op_info->target_stag;
843         qp->wqe_ops.iw_set_fragment(wqe, 0, &sge, 0);
844
845         set_64bit_val(wqe, 16, 0);
846
847         hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMA_OP_TYPE_INV_STAG) |
848               FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
849               FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
850               FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) |
851               FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
852               FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
853
854         dma_wmb(); /* make sure WQE is populated before valid bit is set */
855
856         set_64bit_val(wqe, 24, hdr);
857
858         if (info->push_wqe) {
859                 irdma_qp_push_wqe(qp, wqe, IRDMA_QP_WQE_MIN_QUANTA, wqe_idx,
860                                   post_sq);
861         } else {
862                 if (post_sq)
863                         irdma_uk_qp_post_wr(qp);
864         }
865
866         return 0;
867 }
868
869 /**
870  * irdma_uk_mw_bind - bind Memory Window
871  * @qp: hw qp ptr
872  * @info: post sq information
873  * @post_sq: flag to post sq
874  */
875 enum irdma_status_code irdma_uk_mw_bind(struct irdma_qp_uk *qp,
876                                         struct irdma_post_sq_info *info,
877                                         bool post_sq)
878 {
879         __le64 *wqe;
880         struct irdma_bind_window *op_info;
881         u64 hdr;
882         u32 wqe_idx;
883         bool local_fence = false;
884
885         info->push_wqe = qp->push_db ? true : false;
886         op_info = &info->op.bind_window;
887         local_fence |= info->local_fence;
888
889         wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, IRDMA_QP_WQE_MIN_QUANTA,
890                                          0, info);
891         if (!wqe)
892                 return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
893
894         irdma_clr_wqes(qp, wqe_idx);
895
896         qp->wqe_ops.iw_set_mw_bind_wqe(wqe, op_info);
897
898         hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMA_OP_TYPE_BIND_MW) |
899               FIELD_PREP(IRDMAQPSQ_STAGRIGHTS,
900                          ((op_info->ena_reads << 2) | (op_info->ena_writes << 3))) |
901               FIELD_PREP(IRDMAQPSQ_VABASEDTO,
902                          (op_info->addressing_type == IRDMA_ADDR_TYPE_VA_BASED ? 1 : 0)) |
903               FIELD_PREP(IRDMAQPSQ_MEMWINDOWTYPE,
904                          (op_info->mem_window_type_1 ? 1 : 0)) |
905               FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
906               FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
907               FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) |
908               FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
909               FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
910
911         dma_wmb(); /* make sure WQE is populated before valid bit is set */
912
913         set_64bit_val(wqe, 24, hdr);
914
915         if (info->push_wqe) {
916                 irdma_qp_push_wqe(qp, wqe, IRDMA_QP_WQE_MIN_QUANTA, wqe_idx,
917                                   post_sq);
918         } else {
919                 if (post_sq)
920                         irdma_uk_qp_post_wr(qp);
921         }
922
923         return 0;
924 }
925
926 /**
927  * irdma_uk_post_receive - post receive wqe
928  * @qp: hw qp ptr
929  * @info: post rq information
930  */
931 enum irdma_status_code irdma_uk_post_receive(struct irdma_qp_uk *qp,
932                                              struct irdma_post_rq_info *info)
933 {
934         u32 wqe_idx, i, byte_off;
935         u32 addl_frag_cnt;
936         __le64 *wqe;
937         u64 hdr;
938
939         if (qp->max_rq_frag_cnt < info->num_sges)
940                 return IRDMA_ERR_INVALID_FRAG_COUNT;
941
942         wqe = irdma_qp_get_next_recv_wqe(qp, &wqe_idx);
943         if (!wqe)
944                 return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
945
946         qp->rq_wrid_array[wqe_idx] = info->wr_id;
947         addl_frag_cnt = info->num_sges > 1 ? (info->num_sges - 1) : 0;
948         qp->wqe_ops.iw_set_fragment(wqe, 0, info->sg_list,
949                                     qp->rwqe_polarity);
950
951         for (i = 1, byte_off = 32; i < info->num_sges; i++) {
952                 qp->wqe_ops.iw_set_fragment(wqe, byte_off, &info->sg_list[i],
953                                             qp->rwqe_polarity);
954                 byte_off += 16;
955         }
956
957         /* if not an odd number set valid bit in next fragment */
958         if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(info->num_sges & 0x01) &&
959             info->num_sges) {
960                 qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
961                                             qp->rwqe_polarity);
962                 if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
963                         ++addl_frag_cnt;
964         }
965
966         set_64bit_val(wqe, 16, 0);
967         hdr = FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
968               FIELD_PREP(IRDMAQPSQ_VALID, qp->rwqe_polarity);
969
970         dma_wmb(); /* make sure WQE is populated before valid bit is set */
971
972         set_64bit_val(wqe, 24, hdr);
973
974         return 0;
975 }
976
977 /**
978  * irdma_uk_cq_resize - reset the cq buffer info
979  * @cq: cq to resize
980  * @cq_base: new cq buffer addr
981  * @cq_size: number of cqes
982  */
983 void irdma_uk_cq_resize(struct irdma_cq_uk *cq, void *cq_base, int cq_size)
984 {
985         cq->cq_base = cq_base;
986         cq->cq_size = cq_size;
987         IRDMA_RING_INIT(cq->cq_ring, cq->cq_size);
988         cq->polarity = 1;
989 }
990
991 /**
992  * irdma_uk_cq_set_resized_cnt - record the count of the resized buffers
993  * @cq: cq to resize
994  * @cq_cnt: the count of the resized cq buffers
995  */
996 void irdma_uk_cq_set_resized_cnt(struct irdma_cq_uk *cq, u16 cq_cnt)
997 {
998         u64 temp_val;
999         u16 sw_cq_sel;
1000         u8 arm_next_se;
1001         u8 arm_next;
1002         u8 arm_seq_num;
1003
1004         get_64bit_val(cq->shadow_area, 32, &temp_val);
1005
1006         sw_cq_sel = (u16)FIELD_GET(IRDMA_CQ_DBSA_SW_CQ_SELECT, temp_val);
1007         sw_cq_sel += cq_cnt;
1008
1009         arm_seq_num = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_SEQ_NUM, temp_val);
1010         arm_next_se = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT_SE, temp_val);
1011         arm_next = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT, temp_val);
1012
1013         temp_val = FIELD_PREP(IRDMA_CQ_DBSA_ARM_SEQ_NUM, arm_seq_num) |
1014                    FIELD_PREP(IRDMA_CQ_DBSA_SW_CQ_SELECT, sw_cq_sel) |
1015                    FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT_SE, arm_next_se) |
1016                    FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT, arm_next);
1017
1018         set_64bit_val(cq->shadow_area, 32, temp_val);
1019 }
1020
1021 /**
1022  * irdma_uk_cq_request_notification - cq notification request (door bell)
1023  * @cq: hw cq
1024  * @cq_notify: notification type
1025  */
1026 void irdma_uk_cq_request_notification(struct irdma_cq_uk *cq,
1027                                       enum irdma_cmpl_notify cq_notify)
1028 {
1029         u64 temp_val;
1030         u16 sw_cq_sel;
1031         u8 arm_next_se = 0;
1032         u8 arm_next = 0;
1033         u8 arm_seq_num;
1034
1035         get_64bit_val(cq->shadow_area, 32, &temp_val);
1036         arm_seq_num = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_SEQ_NUM, temp_val);
1037         arm_seq_num++;
1038         sw_cq_sel = (u16)FIELD_GET(IRDMA_CQ_DBSA_SW_CQ_SELECT, temp_val);
1039         arm_next_se = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT_SE, temp_val);
1040         arm_next_se |= 1;
1041         if (cq_notify == IRDMA_CQ_COMPL_EVENT)
1042                 arm_next = 1;
1043         temp_val = FIELD_PREP(IRDMA_CQ_DBSA_ARM_SEQ_NUM, arm_seq_num) |
1044                    FIELD_PREP(IRDMA_CQ_DBSA_SW_CQ_SELECT, sw_cq_sel) |
1045                    FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT_SE, arm_next_se) |
1046                    FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT, arm_next);
1047
1048         set_64bit_val(cq->shadow_area, 32, temp_val);
1049
1050         dma_wmb(); /* make sure WQE is populated before valid bit is set */
1051
1052         writel(cq->cq_id, cq->cqe_alloc_db);
1053 }
1054
1055 /**
1056  * irdma_uk_cq_poll_cmpl - get cq completion info
1057  * @cq: hw cq
1058  * @info: cq poll information returned
1059  */
1060 enum irdma_status_code
1061 irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, struct irdma_cq_poll_info *info)
1062 {
1063         u64 comp_ctx, qword0, qword2, qword3;
1064         __le64 *cqe;
1065         struct irdma_qp_uk *qp;
1066         struct irdma_ring *pring = NULL;
1067         u32 wqe_idx, q_type;
1068         enum irdma_status_code ret_code;
1069         bool move_cq_head = true;
1070         u8 polarity;
1071         bool ext_valid;
1072         __le64 *ext_cqe;
1073
1074         if (cq->avoid_mem_cflct)
1075                 cqe = IRDMA_GET_CURRENT_EXTENDED_CQ_ELEM(cq);
1076         else
1077                 cqe = IRDMA_GET_CURRENT_CQ_ELEM(cq);
1078
1079         get_64bit_val(cqe, 24, &qword3);
1080         polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
1081         if (polarity != cq->polarity)
1082                 return IRDMA_ERR_Q_EMPTY;
1083
1084         /* Ensure CQE contents are read after valid bit is checked */
1085         dma_rmb();
1086
1087         ext_valid = (bool)FIELD_GET(IRDMA_CQ_EXTCQE, qword3);
1088         if (ext_valid) {
1089                 u64 qword6, qword7;
1090                 u32 peek_head;
1091
1092                 if (cq->avoid_mem_cflct) {
1093                         ext_cqe = (__le64 *)((u8 *)cqe + 32);
1094                         get_64bit_val(ext_cqe, 24, &qword7);
1095                         polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
1096                 } else {
1097                         peek_head = (cq->cq_ring.head + 1) % cq->cq_ring.size;
1098                         ext_cqe = cq->cq_base[peek_head].buf;
1099                         get_64bit_val(ext_cqe, 24, &qword7);
1100                         polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
1101                         if (!peek_head)
1102                                 polarity ^= 1;
1103                 }
1104                 if (polarity != cq->polarity)
1105                         return IRDMA_ERR_Q_EMPTY;
1106
1107                 /* Ensure ext CQE contents are read after ext valid bit is checked */
1108                 dma_rmb();
1109
1110                 info->imm_valid = (bool)FIELD_GET(IRDMA_CQ_IMMVALID, qword7);
1111                 if (info->imm_valid) {
1112                         u64 qword4;
1113
1114                         get_64bit_val(ext_cqe, 0, &qword4);
1115                         info->imm_data = (u32)FIELD_GET(IRDMA_CQ_IMMDATALOW32, qword4);
1116                 }
1117                 info->ud_smac_valid = (bool)FIELD_GET(IRDMA_CQ_UDSMACVALID, qword7);
1118                 info->ud_vlan_valid = (bool)FIELD_GET(IRDMA_CQ_UDVLANVALID, qword7);
1119                 if (info->ud_smac_valid || info->ud_vlan_valid) {
1120                         get_64bit_val(ext_cqe, 16, &qword6);
1121                         if (info->ud_vlan_valid)
1122                                 info->ud_vlan = (u16)FIELD_GET(IRDMA_CQ_UDVLAN, qword6);
1123                         if (info->ud_smac_valid) {
1124                                 info->ud_smac[5] = qword6 & 0xFF;
1125                                 info->ud_smac[4] = (qword6 >> 8) & 0xFF;
1126                                 info->ud_smac[3] = (qword6 >> 16) & 0xFF;
1127                                 info->ud_smac[2] = (qword6 >> 24) & 0xFF;
1128                                 info->ud_smac[1] = (qword6 >> 32) & 0xFF;
1129                                 info->ud_smac[0] = (qword6 >> 40) & 0xFF;
1130                         }
1131                 }
1132         } else {
1133                 info->imm_valid = false;
1134                 info->ud_smac_valid = false;
1135                 info->ud_vlan_valid = false;
1136         }
1137
1138         q_type = (u8)FIELD_GET(IRDMA_CQ_SQ, qword3);
1139         info->error = (bool)FIELD_GET(IRDMA_CQ_ERROR, qword3);
1140         info->push_dropped = (bool)FIELD_GET(IRDMACQ_PSHDROP, qword3);
1141         info->ipv4 = (bool)FIELD_GET(IRDMACQ_IPV4, qword3);
1142         if (info->error) {
1143                 info->major_err = FIELD_GET(IRDMA_CQ_MAJERR, qword3);
1144                 info->minor_err = FIELD_GET(IRDMA_CQ_MINERR, qword3);
1145                 if (info->major_err == IRDMA_FLUSH_MAJOR_ERR) {
1146                         info->comp_status = IRDMA_COMPL_STATUS_FLUSHED;
1147                         /* Set the min error to standard flush error code for remaining cqes */
1148                         if (info->minor_err != FLUSH_GENERAL_ERR) {
1149                                 qword3 &= ~IRDMA_CQ_MINERR;
1150                                 qword3 |= FIELD_PREP(IRDMA_CQ_MINERR, FLUSH_GENERAL_ERR);
1151                                 set_64bit_val(cqe, 24, qword3);
1152                         }
1153                 } else {
1154                         info->comp_status = IRDMA_COMPL_STATUS_UNKNOWN;
1155                 }
1156         } else {
1157                 info->comp_status = IRDMA_COMPL_STATUS_SUCCESS;
1158         }
1159
1160         get_64bit_val(cqe, 0, &qword0);
1161         get_64bit_val(cqe, 16, &qword2);
1162
1163         info->tcp_seq_num_rtt = (u32)FIELD_GET(IRDMACQ_TCPSEQNUMRTT, qword0);
1164         info->qp_id = (u32)FIELD_GET(IRDMACQ_QPID, qword2);
1165         info->ud_src_qpn = (u32)FIELD_GET(IRDMACQ_UDSRCQPN, qword2);
1166
1167         get_64bit_val(cqe, 8, &comp_ctx);
1168
1169         info->solicited_event = (bool)FIELD_GET(IRDMACQ_SOEVENT, qword3);
1170         qp = (struct irdma_qp_uk *)(unsigned long)comp_ctx;
1171         if (!qp || qp->destroy_pending) {
1172                 ret_code = IRDMA_ERR_Q_DESTROYED;
1173                 goto exit;
1174         }
1175         wqe_idx = (u32)FIELD_GET(IRDMA_CQ_WQEIDX, qword3);
1176         info->qp_handle = (irdma_qp_handle)(unsigned long)qp;
1177
1178         if (q_type == IRDMA_CQE_QTYPE_RQ) {
1179                 u32 array_idx;
1180
1181                 array_idx = wqe_idx / qp->rq_wqe_size_multiplier;
1182
1183                 if (info->comp_status == IRDMA_COMPL_STATUS_FLUSHED ||
1184                     info->comp_status == IRDMA_COMPL_STATUS_UNKNOWN) {
1185                         if (!IRDMA_RING_MORE_WORK(qp->rq_ring)) {
1186                                 ret_code = IRDMA_ERR_Q_EMPTY;
1187                                 goto exit;
1188                         }
1189
1190                         info->wr_id = qp->rq_wrid_array[qp->rq_ring.tail];
1191                         array_idx = qp->rq_ring.tail;
1192                 } else {
1193                         info->wr_id = qp->rq_wrid_array[array_idx];
1194                 }
1195
1196                 info->bytes_xfered = (u32)FIELD_GET(IRDMACQ_PAYLDLEN, qword0);
1197
1198                 if (info->imm_valid)
1199                         info->op_type = IRDMA_OP_TYPE_REC_IMM;
1200                 else
1201                         info->op_type = IRDMA_OP_TYPE_REC;
1202                 if (qword3 & IRDMACQ_STAG) {
1203                         info->stag_invalid_set = true;
1204                         info->inv_stag = (u32)FIELD_GET(IRDMACQ_INVSTAG, qword2);
1205                 } else {
1206                         info->stag_invalid_set = false;
1207                 }
1208                 IRDMA_RING_SET_TAIL(qp->rq_ring, array_idx + 1);
1209                 if (info->comp_status == IRDMA_COMPL_STATUS_FLUSHED) {
1210                         qp->rq_flush_seen = true;
1211                         if (!IRDMA_RING_MORE_WORK(qp->rq_ring))
1212                                 qp->rq_flush_complete = true;
1213                         else
1214                                 move_cq_head = false;
1215                 }
1216                 pring = &qp->rq_ring;
1217         } else { /* q_type is IRDMA_CQE_QTYPE_SQ */
1218                 if (qp->first_sq_wq) {
1219                         if (wqe_idx + 1 >= qp->conn_wqes)
1220                                 qp->first_sq_wq = false;
1221
1222                         if (wqe_idx < qp->conn_wqes && qp->sq_ring.head == qp->sq_ring.tail) {
1223                                 IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
1224                                 IRDMA_RING_MOVE_TAIL(cq->cq_ring);
1225                                 set_64bit_val(cq->shadow_area, 0,
1226                                               IRDMA_RING_CURRENT_HEAD(cq->cq_ring));
1227                                 memset(info, 0,
1228                                        sizeof(struct irdma_cq_poll_info));
1229                                 return irdma_uk_cq_poll_cmpl(cq, info);
1230                         }
1231                 }
1232                 /*cease posting push mode on push drop*/
1233                 if (info->push_dropped) {
1234                         qp->push_mode = false;
1235                         qp->push_dropped = true;
1236                 }
1237                 if (info->comp_status != IRDMA_COMPL_STATUS_FLUSHED) {
1238                         info->wr_id = qp->sq_wrtrk_array[wqe_idx].wrid;
1239                         if (!info->comp_status)
1240                                 info->bytes_xfered = qp->sq_wrtrk_array[wqe_idx].wr_len;
1241                         info->op_type = (u8)FIELD_GET(IRDMACQ_OP, qword3);
1242                         IRDMA_RING_SET_TAIL(qp->sq_ring,
1243                                             wqe_idx + qp->sq_wrtrk_array[wqe_idx].quanta);
1244                 } else {
1245                         if (!IRDMA_RING_MORE_WORK(qp->sq_ring)) {
1246                                 ret_code = IRDMA_ERR_Q_EMPTY;
1247                                 goto exit;
1248                         }
1249
1250                         do {
1251                                 __le64 *sw_wqe;
1252                                 u64 wqe_qword;
1253                                 u8 op_type;
1254                                 u32 tail;
1255
1256                                 tail = qp->sq_ring.tail;
1257                                 sw_wqe = qp->sq_base[tail].elem;
1258                                 get_64bit_val(sw_wqe, 24,
1259                                               &wqe_qword);
1260                                 op_type = (u8)FIELD_GET(IRDMAQPSQ_OPCODE, wqe_qword);
1261                                 info->op_type = op_type;
1262                                 IRDMA_RING_SET_TAIL(qp->sq_ring,
1263                                                     tail + qp->sq_wrtrk_array[tail].quanta);
1264                                 if (op_type != IRDMAQP_OP_NOP) {
1265                                         info->wr_id = qp->sq_wrtrk_array[tail].wrid;
1266                                         info->bytes_xfered = qp->sq_wrtrk_array[tail].wr_len;
1267                                         break;
1268                                 }
1269                         } while (1);
1270                         qp->sq_flush_seen = true;
1271                         if (!IRDMA_RING_MORE_WORK(qp->sq_ring))
1272                                 qp->sq_flush_complete = true;
1273                 }
1274                 pring = &qp->sq_ring;
1275         }
1276
1277         ret_code = 0;
1278
1279 exit:
1280         if (!ret_code && info->comp_status == IRDMA_COMPL_STATUS_FLUSHED)
1281                 if (pring && IRDMA_RING_MORE_WORK(*pring))
1282                         move_cq_head = false;
1283
1284         if (move_cq_head) {
1285                 IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
1286                 if (!IRDMA_RING_CURRENT_HEAD(cq->cq_ring))
1287                         cq->polarity ^= 1;
1288
1289                 if (ext_valid && !cq->avoid_mem_cflct) {
1290                         IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
1291                         if (!IRDMA_RING_CURRENT_HEAD(cq->cq_ring))
1292                                 cq->polarity ^= 1;
1293                 }
1294
1295                 IRDMA_RING_MOVE_TAIL(cq->cq_ring);
1296                 if (!cq->avoid_mem_cflct && ext_valid)
1297                         IRDMA_RING_MOVE_TAIL(cq->cq_ring);
1298                 set_64bit_val(cq->shadow_area, 0,
1299                               IRDMA_RING_CURRENT_HEAD(cq->cq_ring));
1300         } else {
1301                 qword3 &= ~IRDMA_CQ_WQEIDX;
1302                 qword3 |= FIELD_PREP(IRDMA_CQ_WQEIDX, pring->tail);
1303                 set_64bit_val(cqe, 24, qword3);
1304         }
1305
1306         return ret_code;
1307 }
1308
1309 /**
1310  * irdma_qp_round_up - return round up qp wq depth
1311  * @wqdepth: wq depth in quanta to round up
1312  */
1313 static int irdma_qp_round_up(u32 wqdepth)
1314 {
1315         int scount = 1;
1316
1317         for (wqdepth--; scount <= 16; scount *= 2)
1318                 wqdepth |= wqdepth >> scount;
1319
1320         return ++wqdepth;
1321 }
1322
1323 /**
1324  * irdma_get_wqe_shift - get shift count for maximum wqe size
1325  * @uk_attrs: qp HW attributes
1326  * @sge: Maximum Scatter Gather Elements wqe
1327  * @inline_data: Maximum inline data size
1328  * @shift: Returns the shift needed based on sge
1329  *
1330  * Shift can be used to left shift the wqe size based on number of SGEs and inlind data size.
1331  * For 1 SGE or inline data <= 8, shift = 0 (wqe size of 32
1332  * bytes). For 2 or 3 SGEs or inline data <= 39, shift = 1 (wqe
1333  * size of 64 bytes).
1334  * For 4-7 SGE's and inline <= 101 Shift of 2 otherwise (wqe
1335  * size of 256 bytes).
1336  */
1337 void irdma_get_wqe_shift(struct irdma_uk_attrs *uk_attrs, u32 sge,
1338                          u32 inline_data, u8 *shift)
1339 {
1340         *shift = 0;
1341         if (uk_attrs->hw_rev >= IRDMA_GEN_2) {
1342                 if (sge > 1 || inline_data > 8) {
1343                         if (sge < 4 && inline_data <= 39)
1344                                 *shift = 1;
1345                         else if (sge < 8 && inline_data <= 101)
1346                                 *shift = 2;
1347                         else
1348                                 *shift = 3;
1349                 }
1350         } else if (sge > 1 || inline_data > 16) {
1351                 *shift = (sge < 4 && inline_data <= 48) ? 1 : 2;
1352         }
1353 }
1354
1355 /*
1356  * irdma_get_sqdepth - get SQ depth (quanta)
1357  * @uk_attrs: qp HW attributes
1358  * @sq_size: SQ size
1359  * @shift: shift which determines size of WQE
1360  * @sqdepth: depth of SQ
1361  *
1362  */
1363 enum irdma_status_code irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs,
1364                                          u32 sq_size, u8 shift, u32 *sqdepth)
1365 {
1366         *sqdepth = irdma_qp_round_up((sq_size << shift) + IRDMA_SQ_RSVD);
1367
1368         if (*sqdepth < (IRDMA_QP_SW_MIN_WQSIZE << shift))
1369                 *sqdepth = IRDMA_QP_SW_MIN_WQSIZE << shift;
1370         else if (*sqdepth > uk_attrs->max_hw_wq_quanta)
1371                 return IRDMA_ERR_INVALID_SIZE;
1372
1373         return 0;
1374 }
1375
1376 /*
1377  * irdma_get_rqdepth - get RQ depth (quanta)
1378  * @uk_attrs: qp HW attributes
1379  * @rq_size: RQ size
1380  * @shift: shift which determines size of WQE
1381  * @rqdepth: depth of RQ
1382  */
1383 enum irdma_status_code irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs,
1384                                          u32 rq_size, u8 shift, u32 *rqdepth)
1385 {
1386         *rqdepth = irdma_qp_round_up((rq_size << shift) + IRDMA_RQ_RSVD);
1387
1388         if (*rqdepth < (IRDMA_QP_SW_MIN_WQSIZE << shift))
1389                 *rqdepth = IRDMA_QP_SW_MIN_WQSIZE << shift;
1390         else if (*rqdepth > uk_attrs->max_hw_rq_quanta)
1391                 return IRDMA_ERR_INVALID_SIZE;
1392
1393         return 0;
1394 }
1395
1396 static const struct irdma_wqe_uk_ops iw_wqe_uk_ops = {
1397         .iw_copy_inline_data = irdma_copy_inline_data,
1398         .iw_inline_data_size_to_quanta = irdma_inline_data_size_to_quanta,
1399         .iw_set_fragment = irdma_set_fragment,
1400         .iw_set_mw_bind_wqe = irdma_set_mw_bind_wqe,
1401 };
1402
1403 static const struct irdma_wqe_uk_ops iw_wqe_uk_ops_gen_1 = {
1404         .iw_copy_inline_data = irdma_copy_inline_data_gen_1,
1405         .iw_inline_data_size_to_quanta = irdma_inline_data_size_to_quanta_gen_1,
1406         .iw_set_fragment = irdma_set_fragment_gen_1,
1407         .iw_set_mw_bind_wqe = irdma_set_mw_bind_wqe_gen_1,
1408 };
1409
1410 /**
1411  * irdma_setup_connection_wqes - setup WQEs necessary to complete
1412  * connection.
1413  * @qp: hw qp (user and kernel)
1414  * @info: qp initialization info
1415  */
1416 static void irdma_setup_connection_wqes(struct irdma_qp_uk *qp,
1417                                         struct irdma_qp_uk_init_info *info)
1418 {
1419         u16 move_cnt = 1;
1420
1421         if (!info->legacy_mode &&
1422             (qp->uk_attrs->feature_flags & IRDMA_FEATURE_RTS_AE))
1423                 move_cnt = 3;
1424
1425         qp->conn_wqes = move_cnt;
1426         IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->sq_ring, move_cnt);
1427         IRDMA_RING_MOVE_TAIL_BY_COUNT(qp->sq_ring, move_cnt);
1428         IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->initial_ring, move_cnt);
1429 }
1430
1431 /**
1432  * irdma_uk_qp_init - initialize shared qp
1433  * @qp: hw qp (user and kernel)
1434  * @info: qp initialization info
1435  *
1436  * initializes the vars used in both user and kernel mode.
1437  * size of the wqe depends on numbers of max. fragements
1438  * allowed. Then size of wqe * the number of wqes should be the
1439  * amount of memory allocated for sq and rq.
1440  */
1441 enum irdma_status_code irdma_uk_qp_init(struct irdma_qp_uk *qp,
1442                                         struct irdma_qp_uk_init_info *info)
1443 {
1444         enum irdma_status_code ret_code = 0;
1445         u32 sq_ring_size;
1446         u8 sqshift, rqshift;
1447
1448         qp->uk_attrs = info->uk_attrs;
1449         if (info->max_sq_frag_cnt > qp->uk_attrs->max_hw_wq_frags ||
1450             info->max_rq_frag_cnt > qp->uk_attrs->max_hw_wq_frags)
1451                 return IRDMA_ERR_INVALID_FRAG_COUNT;
1452
1453         irdma_get_wqe_shift(qp->uk_attrs, info->max_rq_frag_cnt, 0, &rqshift);
1454         if (qp->uk_attrs->hw_rev == IRDMA_GEN_1) {
1455                 irdma_get_wqe_shift(qp->uk_attrs, info->max_sq_frag_cnt,
1456                                     info->max_inline_data, &sqshift);
1457                 if (info->abi_ver > 4)
1458                         rqshift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
1459         } else {
1460                 irdma_get_wqe_shift(qp->uk_attrs, info->max_sq_frag_cnt + 1,
1461                                     info->max_inline_data, &sqshift);
1462         }
1463         qp->qp_caps = info->qp_caps;
1464         qp->sq_base = info->sq;
1465         qp->rq_base = info->rq;
1466         qp->qp_type = info->type ? info->type : IRDMA_QP_TYPE_IWARP;
1467         qp->shadow_area = info->shadow_area;
1468         qp->sq_wrtrk_array = info->sq_wrtrk_array;
1469
1470         qp->rq_wrid_array = info->rq_wrid_array;
1471         qp->wqe_alloc_db = info->wqe_alloc_db;
1472         qp->qp_id = info->qp_id;
1473         qp->sq_size = info->sq_size;
1474         qp->push_mode = false;
1475         qp->max_sq_frag_cnt = info->max_sq_frag_cnt;
1476         sq_ring_size = qp->sq_size << sqshift;
1477         IRDMA_RING_INIT(qp->sq_ring, sq_ring_size);
1478         IRDMA_RING_INIT(qp->initial_ring, sq_ring_size);
1479         if (info->first_sq_wq) {
1480                 irdma_setup_connection_wqes(qp, info);
1481                 qp->swqe_polarity = 1;
1482                 qp->first_sq_wq = true;
1483         } else {
1484                 qp->swqe_polarity = 0;
1485         }
1486         qp->swqe_polarity_deferred = 1;
1487         qp->rwqe_polarity = 0;
1488         qp->rq_size = info->rq_size;
1489         qp->max_rq_frag_cnt = info->max_rq_frag_cnt;
1490         qp->max_inline_data = info->max_inline_data;
1491         qp->rq_wqe_size = rqshift;
1492         IRDMA_RING_INIT(qp->rq_ring, qp->rq_size);
1493         qp->rq_wqe_size_multiplier = 1 << rqshift;
1494         if (qp->uk_attrs->hw_rev == IRDMA_GEN_1)
1495                 qp->wqe_ops = iw_wqe_uk_ops_gen_1;
1496         else
1497                 qp->wqe_ops = iw_wqe_uk_ops;
1498         return ret_code;
1499 }
1500
1501 /**
1502  * irdma_uk_cq_init - initialize shared cq (user and kernel)
1503  * @cq: hw cq
1504  * @info: hw cq initialization info
1505  */
1506 enum irdma_status_code irdma_uk_cq_init(struct irdma_cq_uk *cq,
1507                                         struct irdma_cq_uk_init_info *info)
1508 {
1509         cq->cq_base = info->cq_base;
1510         cq->cq_id = info->cq_id;
1511         cq->cq_size = info->cq_size;
1512         cq->cqe_alloc_db = info->cqe_alloc_db;
1513         cq->cq_ack_db = info->cq_ack_db;
1514         cq->shadow_area = info->shadow_area;
1515         cq->avoid_mem_cflct = info->avoid_mem_cflct;
1516         IRDMA_RING_INIT(cq->cq_ring, cq->cq_size);
1517         cq->polarity = 1;
1518
1519         return 0;
1520 }
1521
1522 /**
1523  * irdma_uk_clean_cq - clean cq entries
1524  * @q: completion context
1525  * @cq: cq to clean
1526  */
1527 void irdma_uk_clean_cq(void *q, struct irdma_cq_uk *cq)
1528 {
1529         __le64 *cqe;
1530         u64 qword3, comp_ctx;
1531         u32 cq_head;
1532         u8 polarity, temp;
1533
1534         cq_head = cq->cq_ring.head;
1535         temp = cq->polarity;
1536         do {
1537                 if (cq->avoid_mem_cflct)
1538                         cqe = ((struct irdma_extended_cqe *)(cq->cq_base))[cq_head].buf;
1539                 else
1540                         cqe = cq->cq_base[cq_head].buf;
1541                 get_64bit_val(cqe, 24, &qword3);
1542                 polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
1543
1544                 if (polarity != temp)
1545                         break;
1546
1547                 get_64bit_val(cqe, 8, &comp_ctx);
1548                 if ((void *)(unsigned long)comp_ctx == q)
1549                         set_64bit_val(cqe, 8, 0);
1550
1551                 cq_head = (cq_head + 1) % cq->cq_ring.size;
1552                 if (!cq_head)
1553                         temp ^= 1;
1554         } while (true);
1555 }
1556
1557 /**
1558  * irdma_nop - post a nop
1559  * @qp: hw qp ptr
1560  * @wr_id: work request id
1561  * @signaled: signaled for completion
1562  * @post_sq: ring doorbell
1563  */
1564 enum irdma_status_code irdma_nop(struct irdma_qp_uk *qp, u64 wr_id,
1565                                  bool signaled, bool post_sq)
1566 {
1567         __le64 *wqe;
1568         u64 hdr;
1569         u32 wqe_idx;
1570         struct irdma_post_sq_info info = {};
1571
1572         info.push_wqe = false;
1573         info.wr_id = wr_id;
1574         wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, IRDMA_QP_WQE_MIN_QUANTA,
1575                                          0, &info);
1576         if (!wqe)
1577                 return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
1578
1579         irdma_clr_wqes(qp, wqe_idx);
1580
1581         set_64bit_val(wqe, 0, 0);
1582         set_64bit_val(wqe, 8, 0);
1583         set_64bit_val(wqe, 16, 0);
1584
1585         hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_NOP) |
1586               FIELD_PREP(IRDMAQPSQ_SIGCOMPL, signaled) |
1587               FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
1588
1589         dma_wmb(); /* make sure WQE is populated before valid bit is set */
1590
1591         set_64bit_val(wqe, 24, hdr);
1592         if (post_sq)
1593                 irdma_uk_qp_post_wr(qp);
1594
1595         return 0;
1596 }
1597
1598 /**
1599  * irdma_fragcnt_to_quanta_sq - calculate quanta based on fragment count for SQ
1600  * @frag_cnt: number of fragments
1601  * @quanta: quanta for frag_cnt
1602  */
1603 enum irdma_status_code irdma_fragcnt_to_quanta_sq(u32 frag_cnt, u16 *quanta)
1604 {
1605         switch (frag_cnt) {
1606         case 0:
1607         case 1:
1608                 *quanta = IRDMA_QP_WQE_MIN_QUANTA;
1609                 break;
1610         case 2:
1611         case 3:
1612                 *quanta = 2;
1613                 break;
1614         case 4:
1615         case 5:
1616                 *quanta = 3;
1617                 break;
1618         case 6:
1619         case 7:
1620                 *quanta = 4;
1621                 break;
1622         case 8:
1623         case 9:
1624                 *quanta = 5;
1625                 break;
1626         case 10:
1627         case 11:
1628                 *quanta = 6;
1629                 break;
1630         case 12:
1631         case 13:
1632                 *quanta = 7;
1633                 break;
1634         case 14:
1635         case 15: /* when immediate data is present */
1636                 *quanta = 8;
1637                 break;
1638         default:
1639                 return IRDMA_ERR_INVALID_FRAG_COUNT;
1640         }
1641
1642         return 0;
1643 }
1644
1645 /**
1646  * irdma_fragcnt_to_wqesize_rq - calculate wqe size based on fragment count for RQ
1647  * @frag_cnt: number of fragments
1648  * @wqe_size: size in bytes given frag_cnt
1649  */
1650 enum irdma_status_code irdma_fragcnt_to_wqesize_rq(u32 frag_cnt, u16 *wqe_size)
1651 {
1652         switch (frag_cnt) {
1653         case 0:
1654         case 1:
1655                 *wqe_size = 32;
1656                 break;
1657         case 2:
1658         case 3:
1659                 *wqe_size = 64;
1660                 break;
1661         case 4:
1662         case 5:
1663         case 6:
1664         case 7:
1665                 *wqe_size = 128;
1666                 break;
1667         case 8:
1668         case 9:
1669         case 10:
1670         case 11:
1671         case 12:
1672         case 13:
1673         case 14:
1674                 *wqe_size = 256;
1675                 break;
1676         default:
1677                 return IRDMA_ERR_INVALID_FRAG_COUNT;
1678         }
1679
1680         return 0;
1681 }