ASoC: da7219: Correct IRQ level in DT binding example
[linux-2.6-microblaze.git] / drivers / infiniband / hw / i40iw / i40iw_ctrl.c
1 /*******************************************************************************
2 *
3 * Copyright (c) 2015-2016 Intel Corporation.  All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses.  You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenFabrics.org BSD license below:
10 *
11 *   Redistribution and use in source and binary forms, with or
12 *   without modification, are permitted provided that the following
13 *   conditions are met:
14 *
15 *    - Redistributions of source code must retain the above
16 *       copyright notice, this list of conditions and the following
17 *       disclaimer.
18 *
19 *    - Redistributions in binary form must reproduce the above
20 *       copyright notice, this list of conditions and the following
21 *       disclaimer in the documentation and/or other materials
22 *       provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 *
33 *******************************************************************************/
34
35 #include "i40iw_osdep.h"
36 #include "i40iw_register.h"
37 #include "i40iw_status.h"
38 #include "i40iw_hmc.h"
39
40 #include "i40iw_d.h"
41 #include "i40iw_type.h"
42 #include "i40iw_p.h"
43 #include "i40iw_vf.h"
44 #include "i40iw_virtchnl.h"
45
46 /**
47  * i40iw_insert_wqe_hdr - write wqe header
48  * @wqe: cqp wqe for header
49  * @header: header for the cqp wqe
50  */
51 void i40iw_insert_wqe_hdr(u64 *wqe, u64 header)
52 {
53         wmb();            /* make sure WQE is populated before polarity is set */
54         set_64bit_val(wqe, 24, header);
55 }
56
57 void i40iw_check_cqp_progress(struct i40iw_cqp_timeout *cqp_timeout, struct i40iw_sc_dev *dev)
58 {
59         if (cqp_timeout->compl_cqp_cmds != dev->cqp_cmd_stats[OP_COMPLETED_COMMANDS]) {
60                 cqp_timeout->compl_cqp_cmds = dev->cqp_cmd_stats[OP_COMPLETED_COMMANDS];
61                 cqp_timeout->count = 0;
62         } else {
63                 if (dev->cqp_cmd_stats[OP_REQUESTED_COMMANDS] != cqp_timeout->compl_cqp_cmds)
64                         cqp_timeout->count++;
65         }
66 }
67
68 /**
69  * i40iw_get_cqp_reg_info - get head and tail for cqp using registers
70  * @cqp: struct for cqp hw
71  * @val: cqp tail register value
72  * @tail:wqtail register value
73  * @error: cqp processing err
74  */
75 static inline void i40iw_get_cqp_reg_info(struct i40iw_sc_cqp *cqp,
76                                           u32 *val,
77                                           u32 *tail,
78                                           u32 *error)
79 {
80         if (cqp->dev->is_pf) {
81                 *val = i40iw_rd32(cqp->dev->hw, I40E_PFPE_CQPTAIL);
82                 *tail = RS_32(*val, I40E_PFPE_CQPTAIL_WQTAIL);
83                 *error = RS_32(*val, I40E_PFPE_CQPTAIL_CQP_OP_ERR);
84         } else {
85                 *val = i40iw_rd32(cqp->dev->hw, I40E_VFPE_CQPTAIL1);
86                 *tail = RS_32(*val, I40E_VFPE_CQPTAIL_WQTAIL);
87                 *error = RS_32(*val, I40E_VFPE_CQPTAIL_CQP_OP_ERR);
88         }
89 }
90
91 /**
92  * i40iw_cqp_poll_registers - poll cqp registers
93  * @cqp: struct for cqp hw
94  * @tail:wqtail register value
95  * @count: how many times to try for completion
96  */
97 static enum i40iw_status_code i40iw_cqp_poll_registers(
98                                                 struct i40iw_sc_cqp *cqp,
99                                                 u32 tail,
100                                                 u32 count)
101 {
102         u32 i = 0;
103         u32 newtail, error, val;
104
105         while (i < count) {
106                 i++;
107                 i40iw_get_cqp_reg_info(cqp, &val, &newtail, &error);
108                 if (error) {
109                         error = (cqp->dev->is_pf) ?
110                                  i40iw_rd32(cqp->dev->hw, I40E_PFPE_CQPERRCODES) :
111                                  i40iw_rd32(cqp->dev->hw, I40E_VFPE_CQPERRCODES1);
112                         return I40IW_ERR_CQP_COMPL_ERROR;
113                 }
114                 if (newtail != tail) {
115                         /* SUCCESS */
116                         I40IW_RING_MOVE_TAIL(cqp->sq_ring);
117                         cqp->dev->cqp_cmd_stats[OP_COMPLETED_COMMANDS]++;
118                         return 0;
119                 }
120                 udelay(I40IW_SLEEP_COUNT);
121         }
122         return I40IW_ERR_TIMEOUT;
123 }
124
125 /**
126  * i40iw_sc_parse_fpm_commit_buf - parse fpm commit buffer
127  * @buf: ptr to fpm commit buffer
128  * @info: ptr to i40iw_hmc_obj_info struct
129  * @sd: number of SDs for HMC objects
130  *
131  * parses fpm commit info and copy base value
132  * of hmc objects in hmc_info
133  */
134 static enum i40iw_status_code i40iw_sc_parse_fpm_commit_buf(
135                                 u64 *buf,
136                                 struct i40iw_hmc_obj_info *info,
137                                 u32 *sd)
138 {
139         u64 temp;
140         u64 size;
141         u64 base = 0;
142         u32 i, j;
143         u32 k = 0;
144
145         /* copy base values in obj_info */
146         for (i = I40IW_HMC_IW_QP, j = 0; i <= I40IW_HMC_IW_PBLE; i++, j += 8) {
147                 if ((i == I40IW_HMC_IW_SRQ) ||
148                         (i == I40IW_HMC_IW_FSIMC) ||
149                         (i == I40IW_HMC_IW_FSIAV)) {
150                         info[i].base = 0;
151                         info[i].cnt = 0;
152                         continue;
153                 }
154                 get_64bit_val(buf, j, &temp);
155                 info[i].base = RS_64_1(temp, 32) * 512;
156                 if (info[i].base > base) {
157                         base = info[i].base;
158                         k = i;
159                 }
160                 if (i == I40IW_HMC_IW_APBVT_ENTRY) {
161                         info[i].cnt = 1;
162                         continue;
163                 }
164                 if (i == I40IW_HMC_IW_QP)
165                         info[i].cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_QPS);
166                 else if (i == I40IW_HMC_IW_CQ)
167                         info[i].cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_CQS);
168                 else
169                         info[i].cnt = (u32)(temp);
170         }
171         size = info[k].cnt * info[k].size + info[k].base;
172         if (size & 0x1FFFFF)
173                 *sd = (u32)((size >> 21) + 1); /* add 1 for remainder */
174         else
175                 *sd = (u32)(size >> 21);
176
177         return 0;
178 }
179
180 /**
181  * i40iw_sc_decode_fpm_query() - Decode a 64 bit value into max count and size
182  * @buf: ptr to fpm query buffer
183  * @buf_idx: index into buf
184  * @info: ptr to i40iw_hmc_obj_info struct
185  * @rsrc_idx: resource index into info
186  *
187  * Decode a 64 bit value from fpm query buffer into max count and size
188  */
189 static u64 i40iw_sc_decode_fpm_query(u64 *buf,
190                                             u32 buf_idx,
191                                             struct i40iw_hmc_obj_info *obj_info,
192                                             u32 rsrc_idx)
193 {
194         u64 temp;
195         u32 size;
196
197         get_64bit_val(buf, buf_idx, &temp);
198         obj_info[rsrc_idx].max_cnt = (u32)temp;
199         size = (u32)RS_64_1(temp, 32);
200         obj_info[rsrc_idx].size = LS_64_1(1, size);
201
202         return temp;
203 }
204
205 /**
206  * i40iw_sc_parse_fpm_query_buf() - parses fpm query buffer
207  * @buf: ptr to fpm query buffer
208  * @info: ptr to i40iw_hmc_obj_info struct
209  * @hmc_fpm_misc: ptr to fpm data
210  *
211  * parses fpm query buffer and copy max_cnt and
212  * size value of hmc objects in hmc_info
213  */
214 static enum i40iw_status_code i40iw_sc_parse_fpm_query_buf(
215                                 u64 *buf,
216                                 struct i40iw_hmc_info *hmc_info,
217                                 struct i40iw_hmc_fpm_misc *hmc_fpm_misc)
218 {
219         struct i40iw_hmc_obj_info *obj_info;
220         u64 temp;
221         u32 size;
222         u16 max_pe_sds;
223
224         obj_info = hmc_info->hmc_obj;
225
226         get_64bit_val(buf, 0, &temp);
227         hmc_info->first_sd_index = (u16)RS_64(temp, I40IW_QUERY_FPM_FIRST_PE_SD_INDEX);
228         max_pe_sds = (u16)RS_64(temp, I40IW_QUERY_FPM_MAX_PE_SDS);
229
230         /* Reduce SD count for VFs by 1 to account for PBLE backing page rounding */
231         if (hmc_info->hmc_fn_id >= I40IW_FIRST_VF_FPM_ID)
232                 max_pe_sds--;
233         hmc_fpm_misc->max_sds = max_pe_sds;
234         hmc_info->sd_table.sd_cnt = max_pe_sds + hmc_info->first_sd_index;
235
236         get_64bit_val(buf, 8, &temp);
237         obj_info[I40IW_HMC_IW_QP].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_QPS);
238         size = (u32)RS_64_1(temp, 32);
239         obj_info[I40IW_HMC_IW_QP].size = LS_64_1(1, size);
240
241         get_64bit_val(buf, 16, &temp);
242         obj_info[I40IW_HMC_IW_CQ].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_CQS);
243         size = (u32)RS_64_1(temp, 32);
244         obj_info[I40IW_HMC_IW_CQ].size = LS_64_1(1, size);
245
246         i40iw_sc_decode_fpm_query(buf, 32, obj_info, I40IW_HMC_IW_HTE);
247         i40iw_sc_decode_fpm_query(buf, 40, obj_info, I40IW_HMC_IW_ARP);
248
249         obj_info[I40IW_HMC_IW_APBVT_ENTRY].size = 8192;
250         obj_info[I40IW_HMC_IW_APBVT_ENTRY].max_cnt = 1;
251
252         i40iw_sc_decode_fpm_query(buf, 48, obj_info, I40IW_HMC_IW_MR);
253         i40iw_sc_decode_fpm_query(buf, 56, obj_info, I40IW_HMC_IW_XF);
254
255         get_64bit_val(buf, 64, &temp);
256         obj_info[I40IW_HMC_IW_XFFL].max_cnt = (u32)temp;
257         obj_info[I40IW_HMC_IW_XFFL].size = 4;
258         hmc_fpm_misc->xf_block_size = RS_64(temp, I40IW_QUERY_FPM_XFBLOCKSIZE);
259         if (!hmc_fpm_misc->xf_block_size)
260                 return I40IW_ERR_INVALID_SIZE;
261
262         i40iw_sc_decode_fpm_query(buf, 72, obj_info, I40IW_HMC_IW_Q1);
263
264         get_64bit_val(buf, 80, &temp);
265         obj_info[I40IW_HMC_IW_Q1FL].max_cnt = (u32)temp;
266         obj_info[I40IW_HMC_IW_Q1FL].size = 4;
267         hmc_fpm_misc->q1_block_size = RS_64(temp, I40IW_QUERY_FPM_Q1BLOCKSIZE);
268         if (!hmc_fpm_misc->q1_block_size)
269                 return I40IW_ERR_INVALID_SIZE;
270
271         i40iw_sc_decode_fpm_query(buf, 88, obj_info, I40IW_HMC_IW_TIMER);
272
273         get_64bit_val(buf, 112, &temp);
274         obj_info[I40IW_HMC_IW_PBLE].max_cnt = (u32)temp;
275         obj_info[I40IW_HMC_IW_PBLE].size = 8;
276
277         get_64bit_val(buf, 120, &temp);
278         hmc_fpm_misc->max_ceqs = (u8)RS_64(temp, I40IW_QUERY_FPM_MAX_CEQS);
279         hmc_fpm_misc->ht_multiplier = RS_64(temp, I40IW_QUERY_FPM_HTMULTIPLIER);
280         hmc_fpm_misc->timer_bucket = RS_64(temp, I40IW_QUERY_FPM_TIMERBUCKET);
281
282         return 0;
283 }
284
285 /**
286  * i40iw_fill_qos_list - Change all unknown qs handles to available ones
287  * @qs_list: list of qs_handles to be fixed with valid qs_handles
288  */
289 static void i40iw_fill_qos_list(u16 *qs_list)
290 {
291         u16 qshandle = qs_list[0];
292         int i;
293
294         for (i = 0; i < I40IW_MAX_USER_PRIORITY; i++) {
295                 if (qs_list[i] == QS_HANDLE_UNKNOWN)
296                         qs_list[i] = qshandle;
297                 else
298                         qshandle = qs_list[i];
299         }
300 }
301
302 /**
303  * i40iw_qp_from_entry - Given entry, get to the qp structure
304  * @entry: Points to list of qp structure
305  */
306 static struct i40iw_sc_qp *i40iw_qp_from_entry(struct list_head *entry)
307 {
308         if (!entry)
309                 return NULL;
310
311         return (struct i40iw_sc_qp *)((char *)entry - offsetof(struct i40iw_sc_qp, list));
312 }
313
314 /**
315  * i40iw_get_qp - get the next qp from the list given current qp
316  * @head: Listhead of qp's
317  * @qp: current qp
318  */
319 static struct i40iw_sc_qp *i40iw_get_qp(struct list_head *head, struct i40iw_sc_qp *qp)
320 {
321         struct list_head *entry = NULL;
322         struct list_head *lastentry;
323
324         if (list_empty(head))
325                 return NULL;
326
327         if (!qp) {
328                 entry = head->next;
329         } else {
330                 lastentry = &qp->list;
331                 entry = (lastentry != head) ? lastentry->next : NULL;
332         }
333
334         return i40iw_qp_from_entry(entry);
335 }
336
337 /**
338  * i40iw_change_l2params - given the new l2 parameters, change all qp
339  * @vsi: pointer to the vsi structure
340  * @l2params: New paramaters from l2
341  */
342 void i40iw_change_l2params(struct i40iw_sc_vsi *vsi, struct i40iw_l2params *l2params)
343 {
344         struct i40iw_sc_dev *dev = vsi->dev;
345         struct i40iw_sc_qp *qp = NULL;
346         bool qs_handle_change = false;
347         unsigned long flags;
348         u16 qs_handle;
349         int i;
350
351         if (vsi->mtu != l2params->mtu) {
352                 vsi->mtu = l2params->mtu;
353                 i40iw_reinitialize_ieq(dev);
354         }
355
356         i40iw_fill_qos_list(l2params->qs_handle_list);
357         for (i = 0; i < I40IW_MAX_USER_PRIORITY; i++) {
358                 qs_handle = l2params->qs_handle_list[i];
359                 if (vsi->qos[i].qs_handle != qs_handle)
360                         qs_handle_change = true;
361                 spin_lock_irqsave(&vsi->qos[i].lock, flags);
362                 qp = i40iw_get_qp(&vsi->qos[i].qplist, qp);
363                 while (qp) {
364                         if (qs_handle_change) {
365                                 qp->qs_handle = qs_handle;
366                                 /* issue cqp suspend command */
367                                 i40iw_qp_suspend_resume(dev, qp, true);
368                         }
369                         qp = i40iw_get_qp(&vsi->qos[i].qplist, qp);
370                 }
371                 spin_unlock_irqrestore(&vsi->qos[i].lock, flags);
372                 vsi->qos[i].qs_handle = qs_handle;
373         }
374 }
375
376 /**
377  * i40iw_qp_rem_qos - remove qp from qos lists during destroy qp
378  * @qp: qp to be removed from qos
379  */
380 void i40iw_qp_rem_qos(struct i40iw_sc_qp *qp)
381 {
382         struct i40iw_sc_vsi *vsi = qp->vsi;
383         unsigned long flags;
384
385         if (!qp->on_qoslist)
386                 return;
387         spin_lock_irqsave(&vsi->qos[qp->user_pri].lock, flags);
388         list_del(&qp->list);
389         spin_unlock_irqrestore(&vsi->qos[qp->user_pri].lock, flags);
390 }
391
392 /**
393  * i40iw_qp_add_qos - called during setctx fot qp to be added to qos
394  * @qp: qp to be added to qos
395  */
396 void i40iw_qp_add_qos(struct i40iw_sc_qp *qp)
397 {
398         struct i40iw_sc_vsi *vsi = qp->vsi;
399         unsigned long flags;
400
401         if (qp->on_qoslist)
402                 return;
403         spin_lock_irqsave(&vsi->qos[qp->user_pri].lock, flags);
404         qp->qs_handle = vsi->qos[qp->user_pri].qs_handle;
405         list_add(&qp->list, &vsi->qos[qp->user_pri].qplist);
406         qp->on_qoslist = true;
407         spin_unlock_irqrestore(&vsi->qos[qp->user_pri].lock, flags);
408 }
409
410 /**
411  * i40iw_sc_pd_init - initialize sc pd struct
412  * @dev: sc device struct
413  * @pd: sc pd ptr
414  * @pd_id: pd_id for allocated pd
415  * @abi_ver: ABI version from user context, -1 if not valid
416  */
417 static void i40iw_sc_pd_init(struct i40iw_sc_dev *dev,
418                              struct i40iw_sc_pd *pd,
419                              u16 pd_id,
420                              int abi_ver)
421 {
422         pd->size = sizeof(*pd);
423         pd->pd_id = pd_id;
424         pd->abi_ver = abi_ver;
425         pd->dev = dev;
426 }
427
428 /**
429  * i40iw_get_encoded_wqe_size - given wq size, returns hardware encoded size
430  * @wqsize: size of the wq (sq, rq, srq) to encoded_size
431  * @cqpsq: encoded size for sq for cqp as its encoded size is 1+ other wq's
432  */
433 u8 i40iw_get_encoded_wqe_size(u32 wqsize, bool cqpsq)
434 {
435         u8 encoded_size = 0;
436
437         /* cqp sq's hw coded value starts from 1 for size of 4
438          * while it starts from 0 for qp' wq's.
439          */
440         if (cqpsq)
441                 encoded_size = 1;
442         wqsize >>= 2;
443         while (wqsize >>= 1)
444                 encoded_size++;
445         return encoded_size;
446 }
447
448 /**
449  * i40iw_sc_cqp_init - Initialize buffers for a control Queue Pair
450  * @cqp: IWARP control queue pair pointer
451  * @info: IWARP control queue pair init info pointer
452  *
453  * Initializes the object and context buffers for a control Queue Pair.
454  */
455 static enum i40iw_status_code i40iw_sc_cqp_init(struct i40iw_sc_cqp *cqp,
456                                                 struct i40iw_cqp_init_info *info)
457 {
458         u8 hw_sq_size;
459
460         if ((info->sq_size > I40IW_CQP_SW_SQSIZE_2048) ||
461             (info->sq_size < I40IW_CQP_SW_SQSIZE_4) ||
462             ((info->sq_size & (info->sq_size - 1))))
463                 return I40IW_ERR_INVALID_SIZE;
464
465         hw_sq_size = i40iw_get_encoded_wqe_size(info->sq_size, true);
466         cqp->size = sizeof(*cqp);
467         cqp->sq_size = info->sq_size;
468         cqp->hw_sq_size = hw_sq_size;
469         cqp->sq_base = info->sq;
470         cqp->host_ctx = info->host_ctx;
471         cqp->sq_pa = info->sq_pa;
472         cqp->host_ctx_pa = info->host_ctx_pa;
473         cqp->dev = info->dev;
474         cqp->struct_ver = info->struct_ver;
475         cqp->scratch_array = info->scratch_array;
476         cqp->polarity = 0;
477         cqp->en_datacenter_tcp = info->en_datacenter_tcp;
478         cqp->enabled_vf_count = info->enabled_vf_count;
479         cqp->hmc_profile = info->hmc_profile;
480         info->dev->cqp = cqp;
481
482         I40IW_RING_INIT(cqp->sq_ring, cqp->sq_size);
483         cqp->dev->cqp_cmd_stats[OP_REQUESTED_COMMANDS] = 0;
484         cqp->dev->cqp_cmd_stats[OP_COMPLETED_COMMANDS] = 0;
485         INIT_LIST_HEAD(&cqp->dev->cqp_cmd_head);               /* for the cqp commands backlog. */
486
487         i40iw_wr32(cqp->dev->hw, I40E_PFPE_CQPTAIL, 0);
488         i40iw_wr32(cqp->dev->hw, I40E_PFPE_CQPDB, 0);
489
490         i40iw_debug(cqp->dev, I40IW_DEBUG_WQE,
491                     "%s: sq_size[%04d] hw_sq_size[%04d] sq_base[%p] sq_pa[%llxh] cqp[%p] polarity[x%04X]\n",
492                     __func__, cqp->sq_size, cqp->hw_sq_size,
493                     cqp->sq_base, cqp->sq_pa, cqp, cqp->polarity);
494         return 0;
495 }
496
497 /**
498  * i40iw_sc_cqp_create - create cqp during bringup
499  * @cqp: struct for cqp hw
500  * @maj_err: If error, major err number
501  * @min_err: If error, minor err number
502  */
503 static enum i40iw_status_code i40iw_sc_cqp_create(struct i40iw_sc_cqp *cqp,
504                                                   u16 *maj_err,
505                                                   u16 *min_err)
506 {
507         u64 temp;
508         u32 cnt = 0, p1, p2, val = 0, err_code;
509         enum i40iw_status_code ret_code;
510
511         *maj_err = 0;
512         *min_err = 0;
513
514         ret_code = i40iw_allocate_dma_mem(cqp->dev->hw,
515                                           &cqp->sdbuf,
516                                           128,
517                                           I40IW_SD_BUF_ALIGNMENT);
518
519         if (ret_code)
520                 goto exit;
521
522         temp = LS_64(cqp->hw_sq_size, I40IW_CQPHC_SQSIZE) |
523                LS_64(cqp->struct_ver, I40IW_CQPHC_SVER);
524
525         set_64bit_val(cqp->host_ctx, 0, temp);
526         set_64bit_val(cqp->host_ctx, 8, cqp->sq_pa);
527         temp = LS_64(cqp->enabled_vf_count, I40IW_CQPHC_ENABLED_VFS) |
528                LS_64(cqp->hmc_profile, I40IW_CQPHC_HMC_PROFILE);
529         set_64bit_val(cqp->host_ctx, 16, temp);
530         set_64bit_val(cqp->host_ctx, 24, (uintptr_t)cqp);
531         set_64bit_val(cqp->host_ctx, 32, 0);
532         set_64bit_val(cqp->host_ctx, 40, 0);
533         set_64bit_val(cqp->host_ctx, 48, 0);
534         set_64bit_val(cqp->host_ctx, 56, 0);
535
536         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CQP_HOST_CTX",
537                         cqp->host_ctx, I40IW_CQP_CTX_SIZE * 8);
538
539         p1 = RS_32_1(cqp->host_ctx_pa, 32);
540         p2 = (u32)cqp->host_ctx_pa;
541
542         if (cqp->dev->is_pf) {
543                 i40iw_wr32(cqp->dev->hw, I40E_PFPE_CCQPHIGH, p1);
544                 i40iw_wr32(cqp->dev->hw, I40E_PFPE_CCQPLOW, p2);
545         } else {
546                 i40iw_wr32(cqp->dev->hw, I40E_VFPE_CCQPHIGH1, p1);
547                 i40iw_wr32(cqp->dev->hw, I40E_VFPE_CCQPLOW1, p2);
548         }
549         do {
550                 if (cnt++ > I40IW_DONE_COUNT) {
551                         i40iw_free_dma_mem(cqp->dev->hw, &cqp->sdbuf);
552                         ret_code = I40IW_ERR_TIMEOUT;
553                         /*
554                          * read PFPE_CQPERRORCODES register to get the minor
555                          * and major error code
556                          */
557                         if (cqp->dev->is_pf)
558                                 err_code = i40iw_rd32(cqp->dev->hw, I40E_PFPE_CQPERRCODES);
559                         else
560                                 err_code = i40iw_rd32(cqp->dev->hw, I40E_VFPE_CQPERRCODES1);
561                         *min_err = RS_32(err_code, I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE);
562                         *maj_err = RS_32(err_code, I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE);
563                         goto exit;
564                 }
565                 udelay(I40IW_SLEEP_COUNT);
566                 if (cqp->dev->is_pf)
567                         val = i40iw_rd32(cqp->dev->hw, I40E_PFPE_CCQPSTATUS);
568                 else
569                         val = i40iw_rd32(cqp->dev->hw, I40E_VFPE_CCQPSTATUS1);
570         } while (!val);
571
572 exit:
573         if (!ret_code)
574                 cqp->process_cqp_sds = i40iw_update_sds_noccq;
575         return ret_code;
576 }
577
578 /**
579  * i40iw_sc_cqp_post_sq - post of cqp's sq
580  * @cqp: struct for cqp hw
581  */
582 void i40iw_sc_cqp_post_sq(struct i40iw_sc_cqp *cqp)
583 {
584         if (cqp->dev->is_pf)
585                 i40iw_wr32(cqp->dev->hw, I40E_PFPE_CQPDB, I40IW_RING_GETCURRENT_HEAD(cqp->sq_ring));
586         else
587                 i40iw_wr32(cqp->dev->hw, I40E_VFPE_CQPDB1, I40IW_RING_GETCURRENT_HEAD(cqp->sq_ring));
588
589         i40iw_debug(cqp->dev,
590                     I40IW_DEBUG_WQE,
591                     "%s: HEAD_TAIL[%04d,%04d,%04d]\n",
592                     __func__,
593                     cqp->sq_ring.head,
594                     cqp->sq_ring.tail,
595                     cqp->sq_ring.size);
596 }
597
598 /**
599  * i40iw_sc_cqp_get_next_send_wqe - get next wqe on cqp sq
600  * @cqp: struct for cqp hw
601  * @wqe_idx: we index of cqp ring
602  */
603 u64 *i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp *cqp, u64 scratch)
604 {
605         u64 *wqe = NULL;
606         u32     wqe_idx;
607         enum i40iw_status_code ret_code;
608
609         if (I40IW_RING_FULL_ERR(cqp->sq_ring)) {
610                 i40iw_debug(cqp->dev,
611                             I40IW_DEBUG_WQE,
612                             "%s: ring is full head %x tail %x size %x\n",
613                             __func__,
614                             cqp->sq_ring.head,
615                             cqp->sq_ring.tail,
616                             cqp->sq_ring.size);
617                 return NULL;
618         }
619         I40IW_ATOMIC_RING_MOVE_HEAD(cqp->sq_ring, wqe_idx, ret_code);
620         cqp->dev->cqp_cmd_stats[OP_REQUESTED_COMMANDS]++;
621         if (ret_code)
622                 return NULL;
623         if (!wqe_idx)
624                 cqp->polarity = !cqp->polarity;
625
626         wqe = cqp->sq_base[wqe_idx].elem;
627         cqp->scratch_array[wqe_idx] = scratch;
628         I40IW_CQP_INIT_WQE(wqe);
629
630         return wqe;
631 }
632
633 /**
634  * i40iw_sc_cqp_destroy - destroy cqp during close
635  * @cqp: struct for cqp hw
636  */
637 static enum i40iw_status_code i40iw_sc_cqp_destroy(struct i40iw_sc_cqp *cqp)
638 {
639         u32 cnt = 0, val = 1;
640         enum i40iw_status_code ret_code = 0;
641         u32 cqpstat_addr;
642
643         if (cqp->dev->is_pf) {
644                 i40iw_wr32(cqp->dev->hw, I40E_PFPE_CCQPHIGH, 0);
645                 i40iw_wr32(cqp->dev->hw, I40E_PFPE_CCQPLOW, 0);
646                 cqpstat_addr = I40E_PFPE_CCQPSTATUS;
647         } else {
648                 i40iw_wr32(cqp->dev->hw, I40E_VFPE_CCQPHIGH1, 0);
649                 i40iw_wr32(cqp->dev->hw, I40E_VFPE_CCQPLOW1, 0);
650                 cqpstat_addr = I40E_VFPE_CCQPSTATUS1;
651         }
652         do {
653                 if (cnt++ > I40IW_DONE_COUNT) {
654                         ret_code = I40IW_ERR_TIMEOUT;
655                         break;
656                 }
657                 udelay(I40IW_SLEEP_COUNT);
658                 val = i40iw_rd32(cqp->dev->hw, cqpstat_addr);
659         } while (val);
660
661         i40iw_free_dma_mem(cqp->dev->hw, &cqp->sdbuf);
662         return ret_code;
663 }
664
665 /**
666  * i40iw_sc_ccq_arm - enable intr for control cq
667  * @ccq: ccq sc struct
668  */
669 static void i40iw_sc_ccq_arm(struct i40iw_sc_cq *ccq)
670 {
671         u64 temp_val;
672         u16 sw_cq_sel;
673         u8 arm_next_se;
674         u8 arm_seq_num;
675
676         /* write to cq doorbell shadow area */
677         /* arm next se should always be zero */
678         get_64bit_val(ccq->cq_uk.shadow_area, 32, &temp_val);
679
680         sw_cq_sel = (u16)RS_64(temp_val, I40IW_CQ_DBSA_SW_CQ_SELECT);
681         arm_next_se = (u8)RS_64(temp_val, I40IW_CQ_DBSA_ARM_NEXT_SE);
682
683         arm_seq_num = (u8)RS_64(temp_val, I40IW_CQ_DBSA_ARM_SEQ_NUM);
684         arm_seq_num++;
685
686         temp_val = LS_64(arm_seq_num, I40IW_CQ_DBSA_ARM_SEQ_NUM) |
687                    LS_64(sw_cq_sel, I40IW_CQ_DBSA_SW_CQ_SELECT) |
688                    LS_64(arm_next_se, I40IW_CQ_DBSA_ARM_NEXT_SE) |
689                    LS_64(1, I40IW_CQ_DBSA_ARM_NEXT);
690
691         set_64bit_val(ccq->cq_uk.shadow_area, 32, temp_val);
692
693         wmb();       /* make sure shadow area is updated before arming */
694
695         if (ccq->dev->is_pf)
696                 i40iw_wr32(ccq->dev->hw, I40E_PFPE_CQARM, ccq->cq_uk.cq_id);
697         else
698                 i40iw_wr32(ccq->dev->hw, I40E_VFPE_CQARM1, ccq->cq_uk.cq_id);
699 }
700
701 /**
702  * i40iw_sc_ccq_get_cqe_info - get ccq's cq entry
703  * @ccq: ccq sc struct
704  * @info: completion q entry to return
705  */
706 static enum i40iw_status_code i40iw_sc_ccq_get_cqe_info(
707                                         struct i40iw_sc_cq *ccq,
708                                         struct i40iw_ccq_cqe_info *info)
709 {
710         u64 qp_ctx, temp, temp1;
711         u64 *cqe;
712         struct i40iw_sc_cqp *cqp;
713         u32 wqe_idx;
714         u8 polarity;
715         enum i40iw_status_code ret_code = 0;
716
717         if (ccq->cq_uk.avoid_mem_cflct)
718                 cqe = (u64 *)I40IW_GET_CURRENT_EXTENDED_CQ_ELEMENT(&ccq->cq_uk);
719         else
720                 cqe = (u64 *)I40IW_GET_CURRENT_CQ_ELEMENT(&ccq->cq_uk);
721
722         get_64bit_val(cqe, 24, &temp);
723         polarity = (u8)RS_64(temp, I40IW_CQ_VALID);
724         if (polarity != ccq->cq_uk.polarity)
725                 return I40IW_ERR_QUEUE_EMPTY;
726
727         get_64bit_val(cqe, 8, &qp_ctx);
728         cqp = (struct i40iw_sc_cqp *)(unsigned long)qp_ctx;
729         info->error = (bool)RS_64(temp, I40IW_CQ_ERROR);
730         info->min_err_code = (u16)RS_64(temp, I40IW_CQ_MINERR);
731         if (info->error) {
732                 info->maj_err_code = (u16)RS_64(temp, I40IW_CQ_MAJERR);
733                 info->min_err_code = (u16)RS_64(temp, I40IW_CQ_MINERR);
734         }
735         wqe_idx = (u32)RS_64(temp, I40IW_CQ_WQEIDX);
736         info->scratch = cqp->scratch_array[wqe_idx];
737
738         get_64bit_val(cqe, 16, &temp1);
739         info->op_ret_val = (u32)RS_64(temp1, I40IW_CCQ_OPRETVAL);
740         get_64bit_val(cqp->sq_base[wqe_idx].elem, 24, &temp1);
741         info->op_code = (u8)RS_64(temp1, I40IW_CQPSQ_OPCODE);
742         info->cqp = cqp;
743
744         /*  move the head for cq */
745         I40IW_RING_MOVE_HEAD(ccq->cq_uk.cq_ring, ret_code);
746         if (I40IW_RING_GETCURRENT_HEAD(ccq->cq_uk.cq_ring) == 0)
747                 ccq->cq_uk.polarity ^= 1;
748
749         /* update cq tail in cq shadow memory also */
750         I40IW_RING_MOVE_TAIL(ccq->cq_uk.cq_ring);
751         set_64bit_val(ccq->cq_uk.shadow_area,
752                       0,
753                       I40IW_RING_GETCURRENT_HEAD(ccq->cq_uk.cq_ring));
754         wmb(); /* write shadow area before tail */
755         I40IW_RING_MOVE_TAIL(cqp->sq_ring);
756         ccq->dev->cqp_cmd_stats[OP_COMPLETED_COMMANDS]++;
757
758         return ret_code;
759 }
760
761 /**
762  * i40iw_sc_poll_for_cqp_op_done - Waits for last write to complete in CQP SQ
763  * @cqp: struct for cqp hw
764  * @op_code: cqp opcode for completion
765  * @info: completion q entry to return
766  */
767 static enum i40iw_status_code i40iw_sc_poll_for_cqp_op_done(
768                                         struct i40iw_sc_cqp *cqp,
769                                         u8 op_code,
770                                         struct i40iw_ccq_cqe_info *compl_info)
771 {
772         struct i40iw_ccq_cqe_info info;
773         struct i40iw_sc_cq *ccq;
774         enum i40iw_status_code ret_code = 0;
775         u32 cnt = 0;
776
777         memset(&info, 0, sizeof(info));
778         ccq = cqp->dev->ccq;
779         while (1) {
780                 if (cnt++ > I40IW_DONE_COUNT)
781                         return I40IW_ERR_TIMEOUT;
782
783                 if (i40iw_sc_ccq_get_cqe_info(ccq, &info)) {
784                         udelay(I40IW_SLEEP_COUNT);
785                         continue;
786                 }
787
788                 if (info.error) {
789                         ret_code = I40IW_ERR_CQP_COMPL_ERROR;
790                         break;
791                 }
792                 /* check if opcode is cq create */
793                 if (op_code != info.op_code) {
794                         i40iw_debug(cqp->dev, I40IW_DEBUG_WQE,
795                                     "%s: opcode mismatch for my op code 0x%x, returned opcode %x\n",
796                                     __func__, op_code, info.op_code);
797                 }
798                 /* success, exit out of the loop */
799                 if (op_code == info.op_code)
800                         break;
801         }
802
803         if (compl_info)
804                 memcpy(compl_info, &info, sizeof(*compl_info));
805
806         return ret_code;
807 }
808
809 /**
810  * i40iw_sc_manage_push_page - Handle push page
811  * @cqp: struct for cqp hw
812  * @info: push page info
813  * @scratch: u64 saved to be used during cqp completion
814  * @post_sq: flag for cqp db to ring
815  */
816 static enum i40iw_status_code i40iw_sc_manage_push_page(
817                                 struct i40iw_sc_cqp *cqp,
818                                 struct i40iw_cqp_manage_push_page_info *info,
819                                 u64 scratch,
820                                 bool post_sq)
821 {
822         u64 *wqe;
823         u64 header;
824
825         if (info->push_idx >= I40IW_MAX_PUSH_PAGE_COUNT)
826                 return I40IW_ERR_INVALID_PUSH_PAGE_INDEX;
827
828         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
829         if (!wqe)
830                 return I40IW_ERR_RING_FULL;
831
832         set_64bit_val(wqe, 16, info->qs_handle);
833
834         header = LS_64(info->push_idx, I40IW_CQPSQ_MPP_PPIDX) |
835                  LS_64(I40IW_CQP_OP_MANAGE_PUSH_PAGES, I40IW_CQPSQ_OPCODE) |
836                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID) |
837                  LS_64(info->free_page, I40IW_CQPSQ_MPP_FREE_PAGE);
838
839         i40iw_insert_wqe_hdr(wqe, header);
840
841         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_PUSH_PAGES WQE",
842                         wqe, I40IW_CQP_WQE_SIZE * 8);
843
844         if (post_sq)
845                 i40iw_sc_cqp_post_sq(cqp);
846         return 0;
847 }
848
849 /**
850  * i40iw_sc_manage_hmc_pm_func_table - manage of function table
851  * @cqp: struct for cqp hw
852  * @scratch: u64 saved to be used during cqp completion
853  * @vf_index: vf index for cqp
854  * @free_pm_fcn: function number
855  * @post_sq: flag for cqp db to ring
856  */
857 static enum i40iw_status_code i40iw_sc_manage_hmc_pm_func_table(
858                                 struct i40iw_sc_cqp *cqp,
859                                 u64 scratch,
860                                 u8 vf_index,
861                                 bool free_pm_fcn,
862                                 bool post_sq)
863 {
864         u64 *wqe;
865         u64 header;
866
867         if (vf_index >= I40IW_MAX_VF_PER_PF)
868                 return I40IW_ERR_INVALID_VF_ID;
869         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
870         if (!wqe)
871                 return I40IW_ERR_RING_FULL;
872
873         header = LS_64(vf_index, I40IW_CQPSQ_MHMC_VFIDX) |
874                  LS_64(I40IW_CQP_OP_MANAGE_HMC_PM_FUNC_TABLE, I40IW_CQPSQ_OPCODE) |
875                  LS_64(free_pm_fcn, I40IW_CQPSQ_MHMC_FREEPMFN) |
876                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
877
878         i40iw_insert_wqe_hdr(wqe, header);
879         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_HMC_PM_FUNC_TABLE WQE",
880                         wqe, I40IW_CQP_WQE_SIZE * 8);
881         if (post_sq)
882                 i40iw_sc_cqp_post_sq(cqp);
883         return 0;
884 }
885
886 /**
887  * i40iw_sc_set_hmc_resource_profile - cqp wqe for hmc profile
888  * @cqp: struct for cqp hw
889  * @scratch: u64 saved to be used during cqp completion
890  * @hmc_profile_type: type of profile to set
891  * @vf_num: vf number for profile
892  * @post_sq: flag for cqp db to ring
893  * @poll_registers: flag to poll register for cqp completion
894  */
895 static enum i40iw_status_code i40iw_sc_set_hmc_resource_profile(
896                                 struct i40iw_sc_cqp *cqp,
897                                 u64 scratch,
898                                 u8 hmc_profile_type,
899                                 u8 vf_num, bool post_sq,
900                                 bool poll_registers)
901 {
902         u64 *wqe;
903         u64 header;
904         u32 val, tail, error;
905         enum i40iw_status_code ret_code = 0;
906
907         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
908         if (!wqe)
909                 return I40IW_ERR_RING_FULL;
910
911         set_64bit_val(wqe, 16,
912                       (LS_64(hmc_profile_type, I40IW_CQPSQ_SHMCRP_HMC_PROFILE) |
913                                 LS_64(vf_num, I40IW_CQPSQ_SHMCRP_VFNUM)));
914
915         header = LS_64(I40IW_CQP_OP_SET_HMC_RESOURCE_PROFILE, I40IW_CQPSQ_OPCODE) |
916                        LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
917
918         i40iw_insert_wqe_hdr(wqe, header);
919
920         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_HMC_PM_FUNC_TABLE WQE",
921                         wqe, I40IW_CQP_WQE_SIZE * 8);
922
923         i40iw_get_cqp_reg_info(cqp, &val, &tail, &error);
924         if (error)
925                 return I40IW_ERR_CQP_COMPL_ERROR;
926
927         if (post_sq) {
928                 i40iw_sc_cqp_post_sq(cqp);
929                 if (poll_registers)
930                         ret_code = i40iw_cqp_poll_registers(cqp, tail, 1000000);
931                 else
932                         ret_code = i40iw_sc_poll_for_cqp_op_done(cqp,
933                                                                  I40IW_CQP_OP_SHMC_PAGES_ALLOCATED,
934                                                                  NULL);
935         }
936
937         return ret_code;
938 }
939
940 /**
941  * i40iw_sc_manage_hmc_pm_func_table_done - wait for cqp wqe completion for function table
942  * @cqp: struct for cqp hw
943  */
944 static enum i40iw_status_code i40iw_sc_manage_hmc_pm_func_table_done(struct i40iw_sc_cqp *cqp)
945 {
946         return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_MANAGE_HMC_PM_FUNC_TABLE, NULL);
947 }
948
949 /**
950  * i40iw_sc_commit_fpm_values_done - wait for cqp eqe completion for fpm commit
951  * @cqp: struct for cqp hw
952  */
953 static enum i40iw_status_code i40iw_sc_commit_fpm_values_done(struct i40iw_sc_cqp *cqp)
954 {
955         return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_COMMIT_FPM_VALUES, NULL);
956 }
957
958 /**
959  * i40iw_sc_commit_fpm_values - cqp wqe for commit fpm values
960  * @cqp: struct for cqp hw
961  * @scratch: u64 saved to be used during cqp completion
962  * @hmc_fn_id: hmc function id
963  * @commit_fpm_mem; Memory for fpm values
964  * @post_sq: flag for cqp db to ring
965  * @wait_type: poll ccq or cqp registers for cqp completion
966  */
967 static enum i40iw_status_code i40iw_sc_commit_fpm_values(
968                                         struct i40iw_sc_cqp *cqp,
969                                         u64 scratch,
970                                         u8 hmc_fn_id,
971                                         struct i40iw_dma_mem *commit_fpm_mem,
972                                         bool post_sq,
973                                         u8 wait_type)
974 {
975         u64 *wqe;
976         u64 header;
977         u32 tail, val, error;
978         enum i40iw_status_code ret_code = 0;
979
980         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
981         if (!wqe)
982                 return I40IW_ERR_RING_FULL;
983
984         set_64bit_val(wqe, 16, hmc_fn_id);
985         set_64bit_val(wqe, 32, commit_fpm_mem->pa);
986
987         header = LS_64(I40IW_CQP_OP_COMMIT_FPM_VALUES, I40IW_CQPSQ_OPCODE) |
988                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
989
990         i40iw_insert_wqe_hdr(wqe, header);
991
992         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "COMMIT_FPM_VALUES WQE",
993                         wqe, I40IW_CQP_WQE_SIZE * 8);
994
995         i40iw_get_cqp_reg_info(cqp, &val, &tail, &error);
996         if (error)
997                 return I40IW_ERR_CQP_COMPL_ERROR;
998
999         if (post_sq) {
1000                 i40iw_sc_cqp_post_sq(cqp);
1001
1002                 if (wait_type == I40IW_CQP_WAIT_POLL_REGS)
1003                         ret_code = i40iw_cqp_poll_registers(cqp, tail, I40IW_DONE_COUNT);
1004                 else if (wait_type == I40IW_CQP_WAIT_POLL_CQ)
1005                         ret_code = i40iw_sc_commit_fpm_values_done(cqp);
1006         }
1007
1008         return ret_code;
1009 }
1010
1011 /**
1012  * i40iw_sc_query_fpm_values_done - poll for cqp wqe completion for query fpm
1013  * @cqp: struct for cqp hw
1014  */
1015 static enum i40iw_status_code i40iw_sc_query_fpm_values_done(struct i40iw_sc_cqp *cqp)
1016 {
1017         return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_QUERY_FPM_VALUES, NULL);
1018 }
1019
1020 /**
1021  * i40iw_sc_query_fpm_values - cqp wqe query fpm values
1022  * @cqp: struct for cqp hw
1023  * @scratch: u64 saved to be used during cqp completion
1024  * @hmc_fn_id: hmc function id
1025  * @query_fpm_mem: memory for return fpm values
1026  * @post_sq: flag for cqp db to ring
1027  * @wait_type: poll ccq or cqp registers for cqp completion
1028  */
1029 static enum i40iw_status_code i40iw_sc_query_fpm_values(
1030                                         struct i40iw_sc_cqp *cqp,
1031                                         u64 scratch,
1032                                         u8 hmc_fn_id,
1033                                         struct i40iw_dma_mem *query_fpm_mem,
1034                                         bool post_sq,
1035                                         u8 wait_type)
1036 {
1037         u64 *wqe;
1038         u64 header;
1039         u32 tail, val, error;
1040         enum i40iw_status_code ret_code = 0;
1041
1042         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1043         if (!wqe)
1044                 return I40IW_ERR_RING_FULL;
1045
1046         set_64bit_val(wqe, 16, hmc_fn_id);
1047         set_64bit_val(wqe, 32, query_fpm_mem->pa);
1048
1049         header = LS_64(I40IW_CQP_OP_QUERY_FPM_VALUES, I40IW_CQPSQ_OPCODE) |
1050                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1051
1052         i40iw_insert_wqe_hdr(wqe, header);
1053
1054         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QUERY_FPM WQE",
1055                         wqe, I40IW_CQP_WQE_SIZE * 8);
1056
1057         /* read the tail from CQP_TAIL register */
1058         i40iw_get_cqp_reg_info(cqp, &val, &tail, &error);
1059
1060         if (error)
1061                 return I40IW_ERR_CQP_COMPL_ERROR;
1062
1063         if (post_sq) {
1064                 i40iw_sc_cqp_post_sq(cqp);
1065                 if (wait_type == I40IW_CQP_WAIT_POLL_REGS)
1066                         ret_code = i40iw_cqp_poll_registers(cqp, tail, I40IW_DONE_COUNT);
1067                 else if (wait_type == I40IW_CQP_WAIT_POLL_CQ)
1068                         ret_code = i40iw_sc_query_fpm_values_done(cqp);
1069         }
1070
1071         return ret_code;
1072 }
1073
1074 /**
1075  * i40iw_sc_add_arp_cache_entry - cqp wqe add arp cache entry
1076  * @cqp: struct for cqp hw
1077  * @info: arp entry information
1078  * @scratch: u64 saved to be used during cqp completion
1079  * @post_sq: flag for cqp db to ring
1080  */
1081 static enum i40iw_status_code i40iw_sc_add_arp_cache_entry(
1082                                 struct i40iw_sc_cqp *cqp,
1083                                 struct i40iw_add_arp_cache_entry_info *info,
1084                                 u64 scratch,
1085                                 bool post_sq)
1086 {
1087         u64 *wqe;
1088         u64 temp, header;
1089
1090         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1091         if (!wqe)
1092                 return I40IW_ERR_RING_FULL;
1093         set_64bit_val(wqe, 8, info->reach_max);
1094
1095         temp = info->mac_addr[5] |
1096                LS_64_1(info->mac_addr[4], 8) |
1097                LS_64_1(info->mac_addr[3], 16) |
1098                LS_64_1(info->mac_addr[2], 24) |
1099                LS_64_1(info->mac_addr[1], 32) |
1100                LS_64_1(info->mac_addr[0], 40);
1101
1102         set_64bit_val(wqe, 16, temp);
1103
1104         header = info->arp_index |
1105                  LS_64(I40IW_CQP_OP_MANAGE_ARP, I40IW_CQPSQ_OPCODE) |
1106                  LS_64((info->permanent ? 1 : 0), I40IW_CQPSQ_MAT_PERMANENT) |
1107                  LS_64(1, I40IW_CQPSQ_MAT_ENTRYVALID) |
1108                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1109
1110         i40iw_insert_wqe_hdr(wqe, header);
1111
1112         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "ARP_CACHE_ENTRY WQE",
1113                         wqe, I40IW_CQP_WQE_SIZE * 8);
1114
1115         if (post_sq)
1116                 i40iw_sc_cqp_post_sq(cqp);
1117         return 0;
1118 }
1119
1120 /**
1121  * i40iw_sc_del_arp_cache_entry - dele arp cache entry
1122  * @cqp: struct for cqp hw
1123  * @scratch: u64 saved to be used during cqp completion
1124  * @arp_index: arp index to delete arp entry
1125  * @post_sq: flag for cqp db to ring
1126  */
1127 static enum i40iw_status_code i40iw_sc_del_arp_cache_entry(
1128                                         struct i40iw_sc_cqp *cqp,
1129                                         u64 scratch,
1130                                         u16 arp_index,
1131                                         bool post_sq)
1132 {
1133         u64 *wqe;
1134         u64 header;
1135
1136         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1137         if (!wqe)
1138                 return I40IW_ERR_RING_FULL;
1139
1140         header = arp_index |
1141                  LS_64(I40IW_CQP_OP_MANAGE_ARP, I40IW_CQPSQ_OPCODE) |
1142                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1143         i40iw_insert_wqe_hdr(wqe, header);
1144
1145         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "ARP_CACHE_DEL_ENTRY WQE",
1146                         wqe, I40IW_CQP_WQE_SIZE * 8);
1147
1148         if (post_sq)
1149                 i40iw_sc_cqp_post_sq(cqp);
1150         return 0;
1151 }
1152
1153 /**
1154  * i40iw_sc_query_arp_cache_entry - cqp wqe to query arp and arp index
1155  * @cqp: struct for cqp hw
1156  * @scratch: u64 saved to be used during cqp completion
1157  * @arp_index: arp index to delete arp entry
1158  * @post_sq: flag for cqp db to ring
1159  */
1160 static enum i40iw_status_code i40iw_sc_query_arp_cache_entry(
1161                                 struct i40iw_sc_cqp *cqp,
1162                                 u64 scratch,
1163                                 u16 arp_index,
1164                                 bool post_sq)
1165 {
1166         u64 *wqe;
1167         u64 header;
1168
1169         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1170         if (!wqe)
1171                 return I40IW_ERR_RING_FULL;
1172
1173         header = arp_index |
1174                  LS_64(I40IW_CQP_OP_MANAGE_ARP, I40IW_CQPSQ_OPCODE) |
1175                  LS_64(1, I40IW_CQPSQ_MAT_QUERY) |
1176                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1177
1178         i40iw_insert_wqe_hdr(wqe, header);
1179
1180         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QUERY_ARP_CACHE_ENTRY WQE",
1181                         wqe, I40IW_CQP_WQE_SIZE * 8);
1182
1183         if (post_sq)
1184                 i40iw_sc_cqp_post_sq(cqp);
1185         return 0;
1186 }
1187
1188 /**
1189  * i40iw_sc_manage_apbvt_entry - for adding and deleting apbvt entries
1190  * @cqp: struct for cqp hw
1191  * @info: info for apbvt entry to add or delete
1192  * @scratch: u64 saved to be used during cqp completion
1193  * @post_sq: flag for cqp db to ring
1194  */
1195 static enum i40iw_status_code i40iw_sc_manage_apbvt_entry(
1196                                 struct i40iw_sc_cqp *cqp,
1197                                 struct i40iw_apbvt_info *info,
1198                                 u64 scratch,
1199                                 bool post_sq)
1200 {
1201         u64 *wqe;
1202         u64 header;
1203
1204         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1205         if (!wqe)
1206                 return I40IW_ERR_RING_FULL;
1207
1208         set_64bit_val(wqe, 16, info->port);
1209
1210         header = LS_64(I40IW_CQP_OP_MANAGE_APBVT, I40IW_CQPSQ_OPCODE) |
1211                  LS_64(info->add, I40IW_CQPSQ_MAPT_ADDPORT) |
1212                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1213
1214         i40iw_insert_wqe_hdr(wqe, header);
1215
1216         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_APBVT WQE",
1217                         wqe, I40IW_CQP_WQE_SIZE * 8);
1218
1219         if (post_sq)
1220                 i40iw_sc_cqp_post_sq(cqp);
1221         return 0;
1222 }
1223
1224 /**
1225  * i40iw_sc_manage_qhash_table_entry - manage quad hash entries
1226  * @cqp: struct for cqp hw
1227  * @info: info for quad hash to manage
1228  * @scratch: u64 saved to be used during cqp completion
1229  * @post_sq: flag for cqp db to ring
1230  *
1231  * This is called before connection establishment is started. For passive connections, when
1232  * listener is created, it will call with entry type of  I40IW_QHASH_TYPE_TCP_SYN with local
1233  * ip address and tcp port. When SYN is received (passive connections) or
1234  * sent (active connections), this routine is called with entry type of
1235  * I40IW_QHASH_TYPE_TCP_ESTABLISHED and quad is passed in info.
1236  *
1237  * When iwarp connection is done and its state moves to RTS, the quad hash entry in
1238  * the hardware will point to iwarp's qp number and requires no calls from the driver.
1239  */
1240 static enum i40iw_status_code i40iw_sc_manage_qhash_table_entry(
1241                                         struct i40iw_sc_cqp *cqp,
1242                                         struct i40iw_qhash_table_info *info,
1243                                         u64 scratch,
1244                                         bool post_sq)
1245 {
1246         u64 *wqe;
1247         u64 qw1 = 0;
1248         u64 qw2 = 0;
1249         u64 temp;
1250         struct i40iw_sc_vsi *vsi = info->vsi;
1251
1252         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1253         if (!wqe)
1254                 return I40IW_ERR_RING_FULL;
1255
1256         temp = info->mac_addr[5] |
1257                 LS_64_1(info->mac_addr[4], 8) |
1258                 LS_64_1(info->mac_addr[3], 16) |
1259                 LS_64_1(info->mac_addr[2], 24) |
1260                 LS_64_1(info->mac_addr[1], 32) |
1261                 LS_64_1(info->mac_addr[0], 40);
1262
1263         set_64bit_val(wqe, 0, temp);
1264
1265         qw1 = LS_64(info->qp_num, I40IW_CQPSQ_QHASH_QPN) |
1266               LS_64(info->dest_port, I40IW_CQPSQ_QHASH_DEST_PORT);
1267         if (info->ipv4_valid) {
1268                 set_64bit_val(wqe,
1269                               48,
1270                               LS_64(info->dest_ip[0], I40IW_CQPSQ_QHASH_ADDR3));
1271         } else {
1272                 set_64bit_val(wqe,
1273                               56,
1274                               LS_64(info->dest_ip[0], I40IW_CQPSQ_QHASH_ADDR0) |
1275                               LS_64(info->dest_ip[1], I40IW_CQPSQ_QHASH_ADDR1));
1276
1277                 set_64bit_val(wqe,
1278                               48,
1279                               LS_64(info->dest_ip[2], I40IW_CQPSQ_QHASH_ADDR2) |
1280                               LS_64(info->dest_ip[3], I40IW_CQPSQ_QHASH_ADDR3));
1281         }
1282         qw2 = LS_64(vsi->qos[info->user_pri].qs_handle, I40IW_CQPSQ_QHASH_QS_HANDLE);
1283         if (info->vlan_valid)
1284                 qw2 |= LS_64(info->vlan_id, I40IW_CQPSQ_QHASH_VLANID);
1285         set_64bit_val(wqe, 16, qw2);
1286         if (info->entry_type == I40IW_QHASH_TYPE_TCP_ESTABLISHED) {
1287                 qw1 |= LS_64(info->src_port, I40IW_CQPSQ_QHASH_SRC_PORT);
1288                 if (!info->ipv4_valid) {
1289                         set_64bit_val(wqe,
1290                                       40,
1291                                       LS_64(info->src_ip[0], I40IW_CQPSQ_QHASH_ADDR0) |
1292                                       LS_64(info->src_ip[1], I40IW_CQPSQ_QHASH_ADDR1));
1293                         set_64bit_val(wqe,
1294                                       32,
1295                                       LS_64(info->src_ip[2], I40IW_CQPSQ_QHASH_ADDR2) |
1296                                       LS_64(info->src_ip[3], I40IW_CQPSQ_QHASH_ADDR3));
1297                 } else {
1298                         set_64bit_val(wqe,
1299                                       32,
1300                                       LS_64(info->src_ip[0], I40IW_CQPSQ_QHASH_ADDR3));
1301                 }
1302         }
1303
1304         set_64bit_val(wqe, 8, qw1);
1305         temp = LS_64(cqp->polarity, I40IW_CQPSQ_QHASH_WQEVALID) |
1306                LS_64(I40IW_CQP_OP_MANAGE_QUAD_HASH_TABLE_ENTRY, I40IW_CQPSQ_QHASH_OPCODE) |
1307                LS_64(info->manage, I40IW_CQPSQ_QHASH_MANAGE) |
1308                LS_64(info->ipv4_valid, I40IW_CQPSQ_QHASH_IPV4VALID) |
1309                LS_64(info->vlan_valid, I40IW_CQPSQ_QHASH_VLANVALID) |
1310                LS_64(info->entry_type, I40IW_CQPSQ_QHASH_ENTRYTYPE);
1311
1312         i40iw_insert_wqe_hdr(wqe, temp);
1313
1314         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_QHASH WQE",
1315                         wqe, I40IW_CQP_WQE_SIZE * 8);
1316
1317         if (post_sq)
1318                 i40iw_sc_cqp_post_sq(cqp);
1319         return 0;
1320 }
1321
1322 /**
1323  * i40iw_sc_alloc_local_mac_ipaddr_entry - cqp wqe for loc mac entry
1324  * @cqp: struct for cqp hw
1325  * @scratch: u64 saved to be used during cqp completion
1326  * @post_sq: flag for cqp db to ring
1327  */
1328 static enum i40iw_status_code i40iw_sc_alloc_local_mac_ipaddr_entry(
1329                                         struct i40iw_sc_cqp *cqp,
1330                                         u64 scratch,
1331                                         bool post_sq)
1332 {
1333         u64 *wqe;
1334         u64 header;
1335
1336         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1337         if (!wqe)
1338                 return I40IW_ERR_RING_FULL;
1339         header = LS_64(I40IW_CQP_OP_ALLOCATE_LOC_MAC_IP_TABLE_ENTRY, I40IW_CQPSQ_OPCODE) |
1340                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1341
1342         i40iw_insert_wqe_hdr(wqe, header);
1343         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "ALLOCATE_LOCAL_MAC_IPADDR WQE",
1344                         wqe, I40IW_CQP_WQE_SIZE * 8);
1345         if (post_sq)
1346                 i40iw_sc_cqp_post_sq(cqp);
1347         return 0;
1348 }
1349
1350 /**
1351  * i40iw_sc_add_local_mac_ipaddr_entry - add mac enry
1352  * @cqp: struct for cqp hw
1353  * @info:mac addr info
1354  * @scratch: u64 saved to be used during cqp completion
1355  * @post_sq: flag for cqp db to ring
1356  */
1357 static enum i40iw_status_code i40iw_sc_add_local_mac_ipaddr_entry(
1358                                 struct i40iw_sc_cqp *cqp,
1359                                 struct i40iw_local_mac_ipaddr_entry_info *info,
1360                                 u64 scratch,
1361                                 bool post_sq)
1362 {
1363         u64 *wqe;
1364         u64 temp, header;
1365
1366         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1367         if (!wqe)
1368                 return I40IW_ERR_RING_FULL;
1369         temp = info->mac_addr[5] |
1370                 LS_64_1(info->mac_addr[4], 8) |
1371                 LS_64_1(info->mac_addr[3], 16) |
1372                 LS_64_1(info->mac_addr[2], 24) |
1373                 LS_64_1(info->mac_addr[1], 32) |
1374                 LS_64_1(info->mac_addr[0], 40);
1375
1376         set_64bit_val(wqe, 32, temp);
1377
1378         header = LS_64(info->entry_idx, I40IW_CQPSQ_MLIPA_IPTABLEIDX) |
1379                  LS_64(I40IW_CQP_OP_MANAGE_LOC_MAC_IP_TABLE, I40IW_CQPSQ_OPCODE) |
1380                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1381
1382         i40iw_insert_wqe_hdr(wqe, header);
1383
1384         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "ADD_LOCAL_MAC_IPADDR WQE",
1385                         wqe, I40IW_CQP_WQE_SIZE * 8);
1386
1387         if (post_sq)
1388                 i40iw_sc_cqp_post_sq(cqp);
1389         return 0;
1390 }
1391
1392 /**
1393  * i40iw_sc_del_local_mac_ipaddr_entry - cqp wqe to dele local mac
1394  * @cqp: struct for cqp hw
1395  * @scratch: u64 saved to be used during cqp completion
1396  * @entry_idx: index of mac entry
1397  * @ ignore_ref_count: to force mac adde delete
1398  * @post_sq: flag for cqp db to ring
1399  */
1400 static enum i40iw_status_code i40iw_sc_del_local_mac_ipaddr_entry(
1401                                 struct i40iw_sc_cqp *cqp,
1402                                 u64 scratch,
1403                                 u8 entry_idx,
1404                                 u8 ignore_ref_count,
1405                                 bool post_sq)
1406 {
1407         u64 *wqe;
1408         u64 header;
1409
1410         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1411         if (!wqe)
1412                 return I40IW_ERR_RING_FULL;
1413         header = LS_64(entry_idx, I40IW_CQPSQ_MLIPA_IPTABLEIDX) |
1414                  LS_64(I40IW_CQP_OP_MANAGE_LOC_MAC_IP_TABLE, I40IW_CQPSQ_OPCODE) |
1415                  LS_64(1, I40IW_CQPSQ_MLIPA_FREEENTRY) |
1416                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID) |
1417                  LS_64(ignore_ref_count, I40IW_CQPSQ_MLIPA_IGNORE_REF_CNT);
1418
1419         i40iw_insert_wqe_hdr(wqe, header);
1420
1421         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "DEL_LOCAL_MAC_IPADDR WQE",
1422                         wqe, I40IW_CQP_WQE_SIZE * 8);
1423
1424         if (post_sq)
1425                 i40iw_sc_cqp_post_sq(cqp);
1426         return 0;
1427 }
1428
1429 /**
1430  * i40iw_sc_cqp_nop - send a nop wqe
1431  * @cqp: struct for cqp hw
1432  * @scratch: u64 saved to be used during cqp completion
1433  * @post_sq: flag for cqp db to ring
1434  */
1435 static enum i40iw_status_code i40iw_sc_cqp_nop(struct i40iw_sc_cqp *cqp,
1436                                                u64 scratch,
1437                                                bool post_sq)
1438 {
1439         u64 *wqe;
1440         u64 header;
1441
1442         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1443         if (!wqe)
1444                 return I40IW_ERR_RING_FULL;
1445         header = LS_64(I40IW_CQP_OP_NOP, I40IW_CQPSQ_OPCODE) |
1446                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1447         i40iw_insert_wqe_hdr(wqe, header);
1448         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "NOP WQE",
1449                         wqe, I40IW_CQP_WQE_SIZE * 8);
1450
1451         if (post_sq)
1452                 i40iw_sc_cqp_post_sq(cqp);
1453         return 0;
1454 }
1455
1456 /**
1457  * i40iw_sc_ceq_init - initialize ceq
1458  * @ceq: ceq sc structure
1459  * @info: ceq initialization info
1460  */
1461 static enum i40iw_status_code i40iw_sc_ceq_init(struct i40iw_sc_ceq *ceq,
1462                                                 struct i40iw_ceq_init_info *info)
1463 {
1464         u32 pble_obj_cnt;
1465
1466         if ((info->elem_cnt < I40IW_MIN_CEQ_ENTRIES) ||
1467             (info->elem_cnt > I40IW_MAX_CEQ_ENTRIES))
1468                 return I40IW_ERR_INVALID_SIZE;
1469
1470         if (info->ceq_id >= I40IW_MAX_CEQID)
1471                 return I40IW_ERR_INVALID_CEQ_ID;
1472
1473         pble_obj_cnt = info->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
1474
1475         if (info->virtual_map && (info->first_pm_pbl_idx >= pble_obj_cnt))
1476                 return I40IW_ERR_INVALID_PBLE_INDEX;
1477
1478         ceq->size = sizeof(*ceq);
1479         ceq->ceqe_base = (struct i40iw_ceqe *)info->ceqe_base;
1480         ceq->ceq_id = info->ceq_id;
1481         ceq->dev = info->dev;
1482         ceq->elem_cnt = info->elem_cnt;
1483         ceq->ceq_elem_pa = info->ceqe_pa;
1484         ceq->virtual_map = info->virtual_map;
1485
1486         ceq->pbl_chunk_size = (ceq->virtual_map ? info->pbl_chunk_size : 0);
1487         ceq->first_pm_pbl_idx = (ceq->virtual_map ? info->first_pm_pbl_idx : 0);
1488         ceq->pbl_list = (ceq->virtual_map ? info->pbl_list : NULL);
1489
1490         ceq->tph_en = info->tph_en;
1491         ceq->tph_val = info->tph_val;
1492         ceq->polarity = 1;
1493         I40IW_RING_INIT(ceq->ceq_ring, ceq->elem_cnt);
1494         ceq->dev->ceq[info->ceq_id] = ceq;
1495
1496         return 0;
1497 }
1498
1499 /**
1500  * i40iw_sc_ceq_create - create ceq wqe
1501  * @ceq: ceq sc structure
1502  * @scratch: u64 saved to be used during cqp completion
1503  * @post_sq: flag for cqp db to ring
1504  */
1505 static enum i40iw_status_code i40iw_sc_ceq_create(struct i40iw_sc_ceq *ceq,
1506                                                   u64 scratch,
1507                                                   bool post_sq)
1508 {
1509         struct i40iw_sc_cqp *cqp;
1510         u64 *wqe;
1511         u64 header;
1512
1513         cqp = ceq->dev->cqp;
1514         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1515         if (!wqe)
1516                 return I40IW_ERR_RING_FULL;
1517         set_64bit_val(wqe, 16, ceq->elem_cnt);
1518         set_64bit_val(wqe, 32, (ceq->virtual_map ? 0 : ceq->ceq_elem_pa));
1519         set_64bit_val(wqe, 48, (ceq->virtual_map ? ceq->first_pm_pbl_idx : 0));
1520         set_64bit_val(wqe, 56, LS_64(ceq->tph_val, I40IW_CQPSQ_TPHVAL));
1521
1522         header = ceq->ceq_id |
1523                  LS_64(I40IW_CQP_OP_CREATE_CEQ, I40IW_CQPSQ_OPCODE) |
1524                  LS_64(ceq->pbl_chunk_size, I40IW_CQPSQ_CEQ_LPBLSIZE) |
1525                  LS_64(ceq->virtual_map, I40IW_CQPSQ_CEQ_VMAP) |
1526                  LS_64(ceq->tph_en, I40IW_CQPSQ_TPHEN) |
1527                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1528
1529         i40iw_insert_wqe_hdr(wqe, header);
1530
1531         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CEQ_CREATE WQE",
1532                         wqe, I40IW_CQP_WQE_SIZE * 8);
1533
1534         if (post_sq)
1535                 i40iw_sc_cqp_post_sq(cqp);
1536         return 0;
1537 }
1538
1539 /**
1540  * i40iw_sc_cceq_create_done - poll for control ceq wqe to complete
1541  * @ceq: ceq sc structure
1542  */
1543 static enum i40iw_status_code i40iw_sc_cceq_create_done(struct i40iw_sc_ceq *ceq)
1544 {
1545         struct i40iw_sc_cqp *cqp;
1546
1547         cqp = ceq->dev->cqp;
1548         return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_CREATE_CEQ, NULL);
1549 }
1550
1551 /**
1552  * i40iw_sc_cceq_destroy_done - poll for destroy cceq to complete
1553  * @ceq: ceq sc structure
1554  */
1555 static enum i40iw_status_code i40iw_sc_cceq_destroy_done(struct i40iw_sc_ceq *ceq)
1556 {
1557         struct i40iw_sc_cqp *cqp;
1558
1559         cqp = ceq->dev->cqp;
1560         cqp->process_cqp_sds = i40iw_update_sds_noccq;
1561         return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_DESTROY_CEQ, NULL);
1562 }
1563
1564 /**
1565  * i40iw_sc_cceq_create - create cceq
1566  * @ceq: ceq sc structure
1567  * @scratch: u64 saved to be used during cqp completion
1568  */
1569 static enum i40iw_status_code i40iw_sc_cceq_create(struct i40iw_sc_ceq *ceq, u64 scratch)
1570 {
1571         enum i40iw_status_code ret_code;
1572
1573         ret_code = i40iw_sc_ceq_create(ceq, scratch, true);
1574         if (!ret_code)
1575                 ret_code = i40iw_sc_cceq_create_done(ceq);
1576         return ret_code;
1577 }
1578
1579 /**
1580  * i40iw_sc_ceq_destroy - destroy ceq
1581  * @ceq: ceq sc structure
1582  * @scratch: u64 saved to be used during cqp completion
1583  * @post_sq: flag for cqp db to ring
1584  */
1585 static enum i40iw_status_code i40iw_sc_ceq_destroy(struct i40iw_sc_ceq *ceq,
1586                                                    u64 scratch,
1587                                                    bool post_sq)
1588 {
1589         struct i40iw_sc_cqp *cqp;
1590         u64 *wqe;
1591         u64 header;
1592
1593         cqp = ceq->dev->cqp;
1594         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1595         if (!wqe)
1596                 return I40IW_ERR_RING_FULL;
1597         set_64bit_val(wqe, 16, ceq->elem_cnt);
1598         set_64bit_val(wqe, 48, ceq->first_pm_pbl_idx);
1599         header = ceq->ceq_id |
1600                  LS_64(I40IW_CQP_OP_DESTROY_CEQ, I40IW_CQPSQ_OPCODE) |
1601                  LS_64(ceq->pbl_chunk_size, I40IW_CQPSQ_CEQ_LPBLSIZE) |
1602                  LS_64(ceq->virtual_map, I40IW_CQPSQ_CEQ_VMAP) |
1603                  LS_64(ceq->tph_en, I40IW_CQPSQ_TPHEN) |
1604                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1605         i40iw_insert_wqe_hdr(wqe, header);
1606         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CEQ_DESTROY WQE",
1607                         wqe, I40IW_CQP_WQE_SIZE * 8);
1608
1609         if (post_sq)
1610                 i40iw_sc_cqp_post_sq(cqp);
1611         return 0;
1612 }
1613
1614 /**
1615  * i40iw_sc_process_ceq - process ceq
1616  * @dev: sc device struct
1617  * @ceq: ceq sc structure
1618  */
1619 static void *i40iw_sc_process_ceq(struct i40iw_sc_dev *dev, struct i40iw_sc_ceq *ceq)
1620 {
1621         u64 temp;
1622         u64 *ceqe;
1623         struct i40iw_sc_cq *cq = NULL;
1624         u8 polarity;
1625
1626         ceqe = (u64 *)I40IW_GET_CURRENT_CEQ_ELEMENT(ceq);
1627         get_64bit_val(ceqe, 0, &temp);
1628         polarity = (u8)RS_64(temp, I40IW_CEQE_VALID);
1629         if (polarity != ceq->polarity)
1630                 return cq;
1631
1632         cq = (struct i40iw_sc_cq *)(unsigned long)LS_64_1(temp, 1);
1633
1634         I40IW_RING_MOVE_TAIL(ceq->ceq_ring);
1635         if (I40IW_RING_GETCURRENT_TAIL(ceq->ceq_ring) == 0)
1636                 ceq->polarity ^= 1;
1637
1638         if (dev->is_pf)
1639                 i40iw_wr32(dev->hw, I40E_PFPE_CQACK, cq->cq_uk.cq_id);
1640         else
1641                 i40iw_wr32(dev->hw, I40E_VFPE_CQACK1, cq->cq_uk.cq_id);
1642
1643         return cq;
1644 }
1645
1646 /**
1647  * i40iw_sc_aeq_init - initialize aeq
1648  * @aeq: aeq structure ptr
1649  * @info: aeq initialization info
1650  */
1651 static enum i40iw_status_code i40iw_sc_aeq_init(struct i40iw_sc_aeq *aeq,
1652                                                 struct i40iw_aeq_init_info *info)
1653 {
1654         u32 pble_obj_cnt;
1655
1656         if ((info->elem_cnt < I40IW_MIN_AEQ_ENTRIES) ||
1657             (info->elem_cnt > I40IW_MAX_AEQ_ENTRIES))
1658                 return I40IW_ERR_INVALID_SIZE;
1659         pble_obj_cnt = info->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
1660
1661         if (info->virtual_map && (info->first_pm_pbl_idx >= pble_obj_cnt))
1662                 return I40IW_ERR_INVALID_PBLE_INDEX;
1663
1664         aeq->size = sizeof(*aeq);
1665         aeq->polarity = 1;
1666         aeq->aeqe_base = (struct i40iw_sc_aeqe *)info->aeqe_base;
1667         aeq->dev = info->dev;
1668         aeq->elem_cnt = info->elem_cnt;
1669
1670         aeq->aeq_elem_pa = info->aeq_elem_pa;
1671         I40IW_RING_INIT(aeq->aeq_ring, aeq->elem_cnt);
1672         info->dev->aeq = aeq;
1673
1674         aeq->virtual_map = info->virtual_map;
1675         aeq->pbl_list = (aeq->virtual_map ? info->pbl_list : NULL);
1676         aeq->pbl_chunk_size = (aeq->virtual_map ? info->pbl_chunk_size : 0);
1677         aeq->first_pm_pbl_idx = (aeq->virtual_map ? info->first_pm_pbl_idx : 0);
1678         info->dev->aeq = aeq;
1679         return 0;
1680 }
1681
1682 /**
1683  * i40iw_sc_aeq_create - create aeq
1684  * @aeq: aeq structure ptr
1685  * @scratch: u64 saved to be used during cqp completion
1686  * @post_sq: flag for cqp db to ring
1687  */
1688 static enum i40iw_status_code i40iw_sc_aeq_create(struct i40iw_sc_aeq *aeq,
1689                                                   u64 scratch,
1690                                                   bool post_sq)
1691 {
1692         u64 *wqe;
1693         struct i40iw_sc_cqp *cqp;
1694         u64 header;
1695
1696         cqp = aeq->dev->cqp;
1697         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1698         if (!wqe)
1699                 return I40IW_ERR_RING_FULL;
1700         set_64bit_val(wqe, 16, aeq->elem_cnt);
1701         set_64bit_val(wqe, 32,
1702                       (aeq->virtual_map ? 0 : aeq->aeq_elem_pa));
1703         set_64bit_val(wqe, 48,
1704                       (aeq->virtual_map ? aeq->first_pm_pbl_idx : 0));
1705
1706         header = LS_64(I40IW_CQP_OP_CREATE_AEQ, I40IW_CQPSQ_OPCODE) |
1707                  LS_64(aeq->pbl_chunk_size, I40IW_CQPSQ_AEQ_LPBLSIZE) |
1708                  LS_64(aeq->virtual_map, I40IW_CQPSQ_AEQ_VMAP) |
1709                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1710
1711         i40iw_insert_wqe_hdr(wqe, header);
1712         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "AEQ_CREATE WQE",
1713                         wqe, I40IW_CQP_WQE_SIZE * 8);
1714         if (post_sq)
1715                 i40iw_sc_cqp_post_sq(cqp);
1716         return 0;
1717 }
1718
1719 /**
1720  * i40iw_sc_aeq_destroy - destroy aeq during close
1721  * @aeq: aeq structure ptr
1722  * @scratch: u64 saved to be used during cqp completion
1723  * @post_sq: flag for cqp db to ring
1724  */
1725 static enum i40iw_status_code i40iw_sc_aeq_destroy(struct i40iw_sc_aeq *aeq,
1726                                                    u64 scratch,
1727                                                    bool post_sq)
1728 {
1729         u64 *wqe;
1730         struct i40iw_sc_cqp *cqp;
1731         u64 header;
1732
1733         cqp = aeq->dev->cqp;
1734         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1735         if (!wqe)
1736                 return I40IW_ERR_RING_FULL;
1737         set_64bit_val(wqe, 16, aeq->elem_cnt);
1738         set_64bit_val(wqe, 48, aeq->first_pm_pbl_idx);
1739         header = LS_64(I40IW_CQP_OP_DESTROY_AEQ, I40IW_CQPSQ_OPCODE) |
1740                  LS_64(aeq->pbl_chunk_size, I40IW_CQPSQ_AEQ_LPBLSIZE) |
1741                  LS_64(aeq->virtual_map, I40IW_CQPSQ_AEQ_VMAP) |
1742                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1743         i40iw_insert_wqe_hdr(wqe, header);
1744
1745         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "AEQ_DESTROY WQE",
1746                         wqe, I40IW_CQP_WQE_SIZE * 8);
1747         if (post_sq)
1748                 i40iw_sc_cqp_post_sq(cqp);
1749         return 0;
1750 }
1751
1752 /**
1753  * i40iw_sc_get_next_aeqe - get next aeq entry
1754  * @aeq: aeq structure ptr
1755  * @info: aeqe info to be returned
1756  */
1757 static enum i40iw_status_code i40iw_sc_get_next_aeqe(struct i40iw_sc_aeq *aeq,
1758                                                      struct i40iw_aeqe_info *info)
1759 {
1760         u64 temp, compl_ctx;
1761         u64 *aeqe;
1762         u16 wqe_idx;
1763         u8 ae_src;
1764         u8 polarity;
1765
1766         aeqe = (u64 *)I40IW_GET_CURRENT_AEQ_ELEMENT(aeq);
1767         get_64bit_val(aeqe, 0, &compl_ctx);
1768         get_64bit_val(aeqe, 8, &temp);
1769         polarity = (u8)RS_64(temp, I40IW_AEQE_VALID);
1770
1771         if (aeq->polarity != polarity)
1772                 return I40IW_ERR_QUEUE_EMPTY;
1773
1774         i40iw_debug_buf(aeq->dev, I40IW_DEBUG_WQE, "AEQ_ENTRY", aeqe, 16);
1775
1776         ae_src = (u8)RS_64(temp, I40IW_AEQE_AESRC);
1777         wqe_idx = (u16)RS_64(temp, I40IW_AEQE_WQDESCIDX);
1778         info->qp_cq_id = (u32)RS_64(temp, I40IW_AEQE_QPCQID);
1779         info->ae_id = (u16)RS_64(temp, I40IW_AEQE_AECODE);
1780         info->tcp_state = (u8)RS_64(temp, I40IW_AEQE_TCPSTATE);
1781         info->iwarp_state = (u8)RS_64(temp, I40IW_AEQE_IWSTATE);
1782         info->q2_data_written = (u8)RS_64(temp, I40IW_AEQE_Q2DATA);
1783         info->aeqe_overflow = (bool)RS_64(temp, I40IW_AEQE_OVERFLOW);
1784
1785         switch (info->ae_id) {
1786         case I40IW_AE_PRIV_OPERATION_DENIED:
1787         case I40IW_AE_UDA_XMIT_DGRAM_TOO_LONG:
1788         case I40IW_AE_UDA_XMIT_DGRAM_TOO_SHORT:
1789         case I40IW_AE_BAD_CLOSE:
1790         case I40IW_AE_RDMAP_ROE_BAD_LLP_CLOSE:
1791         case I40IW_AE_RDMA_READ_WHILE_ORD_ZERO:
1792         case I40IW_AE_STAG_ZERO_INVALID:
1793         case I40IW_AE_IB_RREQ_AND_Q1_FULL:
1794         case I40IW_AE_WQE_UNEXPECTED_OPCODE:
1795         case I40IW_AE_DDP_UBE_INVALID_DDP_VERSION:
1796         case I40IW_AE_DDP_UBE_INVALID_MO:
1797         case I40IW_AE_DDP_UBE_INVALID_QN:
1798         case I40IW_AE_DDP_NO_L_BIT:
1799         case I40IW_AE_RDMAP_ROE_INVALID_RDMAP_VERSION:
1800         case I40IW_AE_RDMAP_ROE_UNEXPECTED_OPCODE:
1801         case I40IW_AE_ROE_INVALID_RDMA_READ_REQUEST:
1802         case I40IW_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP:
1803         case I40IW_AE_INVALID_ARP_ENTRY:
1804         case I40IW_AE_INVALID_TCP_OPTION_RCVD:
1805         case I40IW_AE_STALE_ARP_ENTRY:
1806         case I40IW_AE_LLP_CLOSE_COMPLETE:
1807         case I40IW_AE_LLP_CONNECTION_RESET:
1808         case I40IW_AE_LLP_FIN_RECEIVED:
1809         case I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR:
1810         case I40IW_AE_LLP_SEGMENT_TOO_SMALL:
1811         case I40IW_AE_LLP_SYN_RECEIVED:
1812         case I40IW_AE_LLP_TERMINATE_RECEIVED:
1813         case I40IW_AE_LLP_TOO_MANY_RETRIES:
1814         case I40IW_AE_LLP_DOUBT_REACHABILITY:
1815         case I40IW_AE_RESET_SENT:
1816         case I40IW_AE_TERMINATE_SENT:
1817         case I40IW_AE_RESET_NOT_SENT:
1818         case I40IW_AE_LCE_QP_CATASTROPHIC:
1819         case I40IW_AE_QP_SUSPEND_COMPLETE:
1820                 info->qp = true;
1821                 info->compl_ctx = compl_ctx;
1822                 ae_src = I40IW_AE_SOURCE_RSVD;
1823                 break;
1824         case I40IW_AE_LCE_CQ_CATASTROPHIC:
1825                 info->cq = true;
1826                 info->compl_ctx = LS_64_1(compl_ctx, 1);
1827                 ae_src = I40IW_AE_SOURCE_RSVD;
1828                 break;
1829         }
1830
1831         switch (ae_src) {
1832         case I40IW_AE_SOURCE_RQ:
1833         case I40IW_AE_SOURCE_RQ_0011:
1834                 info->qp = true;
1835                 info->wqe_idx = wqe_idx;
1836                 info->compl_ctx = compl_ctx;
1837                 break;
1838         case I40IW_AE_SOURCE_CQ:
1839         case I40IW_AE_SOURCE_CQ_0110:
1840         case I40IW_AE_SOURCE_CQ_1010:
1841         case I40IW_AE_SOURCE_CQ_1110:
1842                 info->cq = true;
1843                 info->compl_ctx = LS_64_1(compl_ctx, 1);
1844                 break;
1845         case I40IW_AE_SOURCE_SQ:
1846         case I40IW_AE_SOURCE_SQ_0111:
1847                 info->qp = true;
1848                 info->sq = true;
1849                 info->wqe_idx = wqe_idx;
1850                 info->compl_ctx = compl_ctx;
1851                 break;
1852         case I40IW_AE_SOURCE_IN_RR_WR:
1853         case I40IW_AE_SOURCE_IN_RR_WR_1011:
1854                 info->qp = true;
1855                 info->compl_ctx = compl_ctx;
1856                 info->in_rdrsp_wr = true;
1857                 break;
1858         case I40IW_AE_SOURCE_OUT_RR:
1859         case I40IW_AE_SOURCE_OUT_RR_1111:
1860                 info->qp = true;
1861                 info->compl_ctx = compl_ctx;
1862                 info->out_rdrsp = true;
1863                 break;
1864         case I40IW_AE_SOURCE_RSVD:
1865                 /* fallthrough */
1866         default:
1867                 break;
1868         }
1869         I40IW_RING_MOVE_TAIL(aeq->aeq_ring);
1870         if (I40IW_RING_GETCURRENT_TAIL(aeq->aeq_ring) == 0)
1871                 aeq->polarity ^= 1;
1872         return 0;
1873 }
1874
1875 /**
1876  * i40iw_sc_repost_aeq_entries - repost completed aeq entries
1877  * @dev: sc device struct
1878  * @count: allocate count
1879  */
1880 static enum i40iw_status_code i40iw_sc_repost_aeq_entries(struct i40iw_sc_dev *dev,
1881                                                           u32 count)
1882 {
1883         if (count > I40IW_MAX_AEQ_ALLOCATE_COUNT)
1884                 return I40IW_ERR_INVALID_SIZE;
1885
1886         if (dev->is_pf)
1887                 i40iw_wr32(dev->hw, I40E_PFPE_AEQALLOC, count);
1888         else
1889                 i40iw_wr32(dev->hw, I40E_VFPE_AEQALLOC1, count);
1890
1891         return 0;
1892 }
1893
1894 /**
1895  * i40iw_sc_aeq_create_done - create aeq
1896  * @aeq: aeq structure ptr
1897  */
1898 static enum i40iw_status_code i40iw_sc_aeq_create_done(struct i40iw_sc_aeq *aeq)
1899 {
1900         struct i40iw_sc_cqp *cqp;
1901
1902         cqp = aeq->dev->cqp;
1903         return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_CREATE_AEQ, NULL);
1904 }
1905
1906 /**
1907  * i40iw_sc_aeq_destroy_done - destroy of aeq during close
1908  * @aeq: aeq structure ptr
1909  */
1910 static enum i40iw_status_code i40iw_sc_aeq_destroy_done(struct i40iw_sc_aeq *aeq)
1911 {
1912         struct i40iw_sc_cqp *cqp;
1913
1914         cqp = aeq->dev->cqp;
1915         return  i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_DESTROY_AEQ, NULL);
1916 }
1917
1918 /**
1919  * i40iw_sc_ccq_init - initialize control cq
1920  * @cq: sc's cq ctruct
1921  * @info: info for control cq initialization
1922  */
1923 static enum i40iw_status_code i40iw_sc_ccq_init(struct i40iw_sc_cq *cq,
1924                                                 struct i40iw_ccq_init_info *info)
1925 {
1926         u32 pble_obj_cnt;
1927
1928         if (info->num_elem < I40IW_MIN_CQ_SIZE || info->num_elem > I40IW_MAX_CQ_SIZE)
1929                 return I40IW_ERR_INVALID_SIZE;
1930
1931         if (info->ceq_id > I40IW_MAX_CEQID)
1932                 return I40IW_ERR_INVALID_CEQ_ID;
1933
1934         pble_obj_cnt = info->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
1935
1936         if (info->virtual_map && (info->first_pm_pbl_idx >= pble_obj_cnt))
1937                 return I40IW_ERR_INVALID_PBLE_INDEX;
1938
1939         cq->cq_pa = info->cq_pa;
1940         cq->cq_uk.cq_base = info->cq_base;
1941         cq->shadow_area_pa = info->shadow_area_pa;
1942         cq->cq_uk.shadow_area = info->shadow_area;
1943         cq->shadow_read_threshold = info->shadow_read_threshold;
1944         cq->dev = info->dev;
1945         cq->ceq_id = info->ceq_id;
1946         cq->cq_uk.cq_size = info->num_elem;
1947         cq->cq_type = I40IW_CQ_TYPE_CQP;
1948         cq->ceqe_mask = info->ceqe_mask;
1949         I40IW_RING_INIT(cq->cq_uk.cq_ring, info->num_elem);
1950
1951         cq->cq_uk.cq_id = 0;    /* control cq is id 0 always */
1952         cq->ceq_id_valid = info->ceq_id_valid;
1953         cq->tph_en = info->tph_en;
1954         cq->tph_val = info->tph_val;
1955         cq->cq_uk.avoid_mem_cflct = info->avoid_mem_cflct;
1956
1957         cq->pbl_list = info->pbl_list;
1958         cq->virtual_map = info->virtual_map;
1959         cq->pbl_chunk_size = info->pbl_chunk_size;
1960         cq->first_pm_pbl_idx = info->first_pm_pbl_idx;
1961         cq->cq_uk.polarity = true;
1962
1963         /* following are only for iw cqs so initialize them to zero */
1964         cq->cq_uk.cqe_alloc_reg = NULL;
1965         info->dev->ccq = cq;
1966         return 0;
1967 }
1968
1969 /**
1970  * i40iw_sc_ccq_create_done - poll cqp for ccq create
1971  * @ccq: ccq sc struct
1972  */
1973 static enum i40iw_status_code i40iw_sc_ccq_create_done(struct i40iw_sc_cq *ccq)
1974 {
1975         struct i40iw_sc_cqp *cqp;
1976
1977         cqp = ccq->dev->cqp;
1978         return  i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_CREATE_CQ, NULL);
1979 }
1980
1981 /**
1982  * i40iw_sc_ccq_create - create control cq
1983  * @ccq: ccq sc struct
1984  * @scratch: u64 saved to be used during cqp completion
1985  * @check_overflow: overlow flag for ccq
1986  * @post_sq: flag for cqp db to ring
1987  */
1988 static enum i40iw_status_code i40iw_sc_ccq_create(struct i40iw_sc_cq *ccq,
1989                                                   u64 scratch,
1990                                                   bool check_overflow,
1991                                                   bool post_sq)
1992 {
1993         u64 *wqe;
1994         struct i40iw_sc_cqp *cqp;
1995         u64 header;
1996         enum i40iw_status_code ret_code;
1997
1998         cqp = ccq->dev->cqp;
1999         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2000         if (!wqe)
2001                 return I40IW_ERR_RING_FULL;
2002         set_64bit_val(wqe, 0, ccq->cq_uk.cq_size);
2003         set_64bit_val(wqe, 8, RS_64_1(ccq, 1));
2004         set_64bit_val(wqe, 16,
2005                       LS_64(ccq->shadow_read_threshold, I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD));
2006         set_64bit_val(wqe, 32, (ccq->virtual_map ? 0 : ccq->cq_pa));
2007         set_64bit_val(wqe, 40, ccq->shadow_area_pa);
2008         set_64bit_val(wqe, 48,
2009                       (ccq->virtual_map ? ccq->first_pm_pbl_idx : 0));
2010         set_64bit_val(wqe, 56,
2011                       LS_64(ccq->tph_val, I40IW_CQPSQ_TPHVAL));
2012
2013         header = ccq->cq_uk.cq_id |
2014                  LS_64((ccq->ceq_id_valid ? ccq->ceq_id : 0), I40IW_CQPSQ_CQ_CEQID) |
2015                  LS_64(I40IW_CQP_OP_CREATE_CQ, I40IW_CQPSQ_OPCODE) |
2016                  LS_64(ccq->pbl_chunk_size, I40IW_CQPSQ_CQ_LPBLSIZE) |
2017                  LS_64(check_overflow, I40IW_CQPSQ_CQ_CHKOVERFLOW) |
2018                  LS_64(ccq->virtual_map, I40IW_CQPSQ_CQ_VIRTMAP) |
2019                  LS_64(ccq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) |
2020                  LS_64(ccq->ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) |
2021                  LS_64(ccq->tph_en, I40IW_CQPSQ_TPHEN) |
2022                  LS_64(ccq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) |
2023                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2024
2025         i40iw_insert_wqe_hdr(wqe, header);
2026
2027         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CCQ_CREATE WQE",
2028                         wqe, I40IW_CQP_WQE_SIZE * 8);
2029
2030         if (post_sq) {
2031                 i40iw_sc_cqp_post_sq(cqp);
2032                 ret_code = i40iw_sc_ccq_create_done(ccq);
2033                 if (ret_code)
2034                         return ret_code;
2035         }
2036         cqp->process_cqp_sds = i40iw_cqp_sds_cmd;
2037
2038         return 0;
2039 }
2040
2041 /**
2042  * i40iw_sc_ccq_destroy - destroy ccq during close
2043  * @ccq: ccq sc struct
2044  * @scratch: u64 saved to be used during cqp completion
2045  * @post_sq: flag for cqp db to ring
2046  */
2047 static enum i40iw_status_code i40iw_sc_ccq_destroy(struct i40iw_sc_cq *ccq,
2048                                                    u64 scratch,
2049                                                    bool post_sq)
2050 {
2051         struct i40iw_sc_cqp *cqp;
2052         u64 *wqe;
2053         u64 header;
2054         enum i40iw_status_code ret_code = 0;
2055         u32 tail, val, error;
2056
2057         cqp = ccq->dev->cqp;
2058         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2059         if (!wqe)
2060                 return I40IW_ERR_RING_FULL;
2061         set_64bit_val(wqe, 0, ccq->cq_uk.cq_size);
2062         set_64bit_val(wqe, 8, RS_64_1(ccq, 1));
2063         set_64bit_val(wqe, 40, ccq->shadow_area_pa);
2064
2065         header = ccq->cq_uk.cq_id |
2066                  LS_64((ccq->ceq_id_valid ? ccq->ceq_id : 0), I40IW_CQPSQ_CQ_CEQID) |
2067                  LS_64(I40IW_CQP_OP_DESTROY_CQ, I40IW_CQPSQ_OPCODE) |
2068                  LS_64(ccq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) |
2069                  LS_64(ccq->ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) |
2070                  LS_64(ccq->tph_en, I40IW_CQPSQ_TPHEN) |
2071                  LS_64(ccq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) |
2072                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2073
2074         i40iw_insert_wqe_hdr(wqe, header);
2075
2076         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CCQ_DESTROY WQE",
2077                         wqe, I40IW_CQP_WQE_SIZE * 8);
2078
2079         i40iw_get_cqp_reg_info(cqp, &val, &tail, &error);
2080         if (error)
2081                 return I40IW_ERR_CQP_COMPL_ERROR;
2082
2083         if (post_sq) {
2084                 i40iw_sc_cqp_post_sq(cqp);
2085                 ret_code = i40iw_cqp_poll_registers(cqp, tail, 1000);
2086         }
2087
2088         cqp->process_cqp_sds = i40iw_update_sds_noccq;
2089
2090         return ret_code;
2091 }
2092
2093 /**
2094  * i40iw_sc_cq_init - initialize completion q
2095  * @cq: cq struct
2096  * @info: cq initialization info
2097  */
2098 static enum i40iw_status_code i40iw_sc_cq_init(struct i40iw_sc_cq *cq,
2099                                                struct i40iw_cq_init_info *info)
2100 {
2101         u32 __iomem *cqe_alloc_reg = NULL;
2102         enum i40iw_status_code ret_code;
2103         u32 pble_obj_cnt;
2104         u32 arm_offset;
2105
2106         pble_obj_cnt = info->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
2107
2108         if (info->virtual_map && (info->first_pm_pbl_idx >= pble_obj_cnt))
2109                 return I40IW_ERR_INVALID_PBLE_INDEX;
2110
2111         cq->cq_pa = info->cq_base_pa;
2112         cq->dev = info->dev;
2113         cq->ceq_id = info->ceq_id;
2114         arm_offset = (info->dev->is_pf) ? I40E_PFPE_CQARM : I40E_VFPE_CQARM1;
2115         if (i40iw_get_hw_addr(cq->dev))
2116                 cqe_alloc_reg = (u32 __iomem *)(i40iw_get_hw_addr(cq->dev) +
2117                                               arm_offset);
2118         info->cq_uk_init_info.cqe_alloc_reg = cqe_alloc_reg;
2119         ret_code = i40iw_cq_uk_init(&cq->cq_uk, &info->cq_uk_init_info);
2120         if (ret_code)
2121                 return ret_code;
2122         cq->virtual_map = info->virtual_map;
2123         cq->pbl_chunk_size = info->pbl_chunk_size;
2124         cq->ceqe_mask = info->ceqe_mask;
2125         cq->cq_type = (info->type) ? info->type : I40IW_CQ_TYPE_IWARP;
2126
2127         cq->shadow_area_pa = info->shadow_area_pa;
2128         cq->shadow_read_threshold = info->shadow_read_threshold;
2129
2130         cq->ceq_id_valid = info->ceq_id_valid;
2131         cq->tph_en = info->tph_en;
2132         cq->tph_val = info->tph_val;
2133
2134         cq->first_pm_pbl_idx = info->first_pm_pbl_idx;
2135
2136         return 0;
2137 }
2138
2139 /**
2140  * i40iw_sc_cq_create - create completion q
2141  * @cq: cq struct
2142  * @scratch: u64 saved to be used during cqp completion
2143  * @check_overflow: flag for overflow check
2144  * @post_sq: flag for cqp db to ring
2145  */
2146 static enum i40iw_status_code i40iw_sc_cq_create(struct i40iw_sc_cq *cq,
2147                                                  u64 scratch,
2148                                                  bool check_overflow,
2149                                                  bool post_sq)
2150 {
2151         u64 *wqe;
2152         struct i40iw_sc_cqp *cqp;
2153         u64 header;
2154
2155         if (cq->cq_uk.cq_id > I40IW_MAX_CQID)
2156                 return I40IW_ERR_INVALID_CQ_ID;
2157
2158         if (cq->ceq_id > I40IW_MAX_CEQID)
2159                 return I40IW_ERR_INVALID_CEQ_ID;
2160
2161         cqp = cq->dev->cqp;
2162         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2163         if (!wqe)
2164                 return I40IW_ERR_RING_FULL;
2165
2166         set_64bit_val(wqe, 0, cq->cq_uk.cq_size);
2167         set_64bit_val(wqe, 8, RS_64_1(cq, 1));
2168         set_64bit_val(wqe,
2169                       16,
2170                       LS_64(cq->shadow_read_threshold, I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD));
2171
2172         set_64bit_val(wqe, 32, (cq->virtual_map ? 0 : cq->cq_pa));
2173
2174         set_64bit_val(wqe, 40, cq->shadow_area_pa);
2175         set_64bit_val(wqe, 48, (cq->virtual_map ? cq->first_pm_pbl_idx : 0));
2176         set_64bit_val(wqe, 56, LS_64(cq->tph_val, I40IW_CQPSQ_TPHVAL));
2177
2178         header = cq->cq_uk.cq_id |
2179                  LS_64((cq->ceq_id_valid ? cq->ceq_id : 0), I40IW_CQPSQ_CQ_CEQID) |
2180                  LS_64(I40IW_CQP_OP_CREATE_CQ, I40IW_CQPSQ_OPCODE) |
2181                  LS_64(cq->pbl_chunk_size, I40IW_CQPSQ_CQ_LPBLSIZE) |
2182                  LS_64(check_overflow, I40IW_CQPSQ_CQ_CHKOVERFLOW) |
2183                  LS_64(cq->virtual_map, I40IW_CQPSQ_CQ_VIRTMAP) |
2184                  LS_64(cq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) |
2185                  LS_64(cq->ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) |
2186                  LS_64(cq->tph_en, I40IW_CQPSQ_TPHEN) |
2187                  LS_64(cq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) |
2188                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2189
2190         i40iw_insert_wqe_hdr(wqe, header);
2191
2192         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CQ_CREATE WQE",
2193                         wqe, I40IW_CQP_WQE_SIZE * 8);
2194
2195         if (post_sq)
2196                 i40iw_sc_cqp_post_sq(cqp);
2197         return 0;
2198 }
2199
2200 /**
2201  * i40iw_sc_cq_destroy - destroy completion q
2202  * @cq: cq struct
2203  * @scratch: u64 saved to be used during cqp completion
2204  * @post_sq: flag for cqp db to ring
2205  */
2206 static enum i40iw_status_code i40iw_sc_cq_destroy(struct i40iw_sc_cq *cq,
2207                                                   u64 scratch,
2208                                                   bool post_sq)
2209 {
2210         struct i40iw_sc_cqp *cqp;
2211         u64 *wqe;
2212         u64 header;
2213
2214         cqp = cq->dev->cqp;
2215         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2216         if (!wqe)
2217                 return I40IW_ERR_RING_FULL;
2218         set_64bit_val(wqe, 0, cq->cq_uk.cq_size);
2219         set_64bit_val(wqe, 8, RS_64_1(cq, 1));
2220         set_64bit_val(wqe, 40, cq->shadow_area_pa);
2221         set_64bit_val(wqe, 48, (cq->virtual_map ? cq->first_pm_pbl_idx : 0));
2222
2223         header = cq->cq_uk.cq_id |
2224                  LS_64((cq->ceq_id_valid ? cq->ceq_id : 0), I40IW_CQPSQ_CQ_CEQID) |
2225                  LS_64(I40IW_CQP_OP_DESTROY_CQ, I40IW_CQPSQ_OPCODE) |
2226                  LS_64(cq->pbl_chunk_size, I40IW_CQPSQ_CQ_LPBLSIZE) |
2227                  LS_64(cq->virtual_map, I40IW_CQPSQ_CQ_VIRTMAP) |
2228                  LS_64(cq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) |
2229                  LS_64(cq->ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) |
2230                  LS_64(cq->tph_en, I40IW_CQPSQ_TPHEN) |
2231                  LS_64(cq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) |
2232                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2233
2234         i40iw_insert_wqe_hdr(wqe, header);
2235
2236         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CQ_DESTROY WQE",
2237                         wqe, I40IW_CQP_WQE_SIZE * 8);
2238
2239         if (post_sq)
2240                 i40iw_sc_cqp_post_sq(cqp);
2241         return 0;
2242 }
2243
2244 /**
2245  * i40iw_sc_cq_modify - modify a Completion Queue
2246  * @cq: cq struct
2247  * @info: modification info struct
2248  * @scratch:
2249  * @post_sq: flag to post to sq
2250  */
2251 static enum i40iw_status_code i40iw_sc_cq_modify(struct i40iw_sc_cq *cq,
2252                                                  struct i40iw_modify_cq_info *info,
2253                                                  u64 scratch,
2254                                                  bool post_sq)
2255 {
2256         struct i40iw_sc_cqp *cqp;
2257         u64 *wqe;
2258         u64 header;
2259         u32 cq_size, ceq_id, first_pm_pbl_idx;
2260         u8 pbl_chunk_size;
2261         bool virtual_map, ceq_id_valid, check_overflow;
2262         u32 pble_obj_cnt;
2263
2264         if (info->ceq_valid && (info->ceq_id > I40IW_MAX_CEQID))
2265                 return I40IW_ERR_INVALID_CEQ_ID;
2266
2267         pble_obj_cnt = cq->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
2268
2269         if (info->cq_resize && info->virtual_map &&
2270             (info->first_pm_pbl_idx >= pble_obj_cnt))
2271                 return I40IW_ERR_INVALID_PBLE_INDEX;
2272
2273         cqp = cq->dev->cqp;
2274         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2275         if (!wqe)
2276                 return I40IW_ERR_RING_FULL;
2277
2278         cq->pbl_list = info->pbl_list;
2279         cq->cq_pa = info->cq_pa;
2280         cq->first_pm_pbl_idx = info->first_pm_pbl_idx;
2281
2282         cq_size = info->cq_resize ? info->cq_size : cq->cq_uk.cq_size;
2283         if (info->ceq_change) {
2284                 ceq_id_valid = true;
2285                 ceq_id = info->ceq_id;
2286         } else {
2287                 ceq_id_valid = cq->ceq_id_valid;
2288                 ceq_id = ceq_id_valid ? cq->ceq_id : 0;
2289         }
2290         virtual_map = info->cq_resize ? info->virtual_map : cq->virtual_map;
2291         first_pm_pbl_idx = (info->cq_resize ?
2292                             (info->virtual_map ? info->first_pm_pbl_idx : 0) :
2293                             (cq->virtual_map ? cq->first_pm_pbl_idx : 0));
2294         pbl_chunk_size = (info->cq_resize ?
2295                           (info->virtual_map ? info->pbl_chunk_size : 0) :
2296                           (cq->virtual_map ? cq->pbl_chunk_size : 0));
2297         check_overflow = info->check_overflow_change ? info->check_overflow :
2298                          cq->check_overflow;
2299         cq->cq_uk.cq_size = cq_size;
2300         cq->ceq_id_valid = ceq_id_valid;
2301         cq->ceq_id = ceq_id;
2302         cq->virtual_map = virtual_map;
2303         cq->first_pm_pbl_idx = first_pm_pbl_idx;
2304         cq->pbl_chunk_size = pbl_chunk_size;
2305         cq->check_overflow = check_overflow;
2306
2307         set_64bit_val(wqe, 0, cq_size);
2308         set_64bit_val(wqe, 8, RS_64_1(cq, 1));
2309         set_64bit_val(wqe, 16,
2310                       LS_64(info->shadow_read_threshold, I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD));
2311         set_64bit_val(wqe, 32, (cq->virtual_map ? 0 : cq->cq_pa));
2312         set_64bit_val(wqe, 40, cq->shadow_area_pa);
2313         set_64bit_val(wqe, 48, (cq->virtual_map ? first_pm_pbl_idx : 0));
2314         set_64bit_val(wqe, 56, LS_64(cq->tph_val, I40IW_CQPSQ_TPHVAL));
2315
2316         header = cq->cq_uk.cq_id |
2317                  LS_64(ceq_id, I40IW_CQPSQ_CQ_CEQID) |
2318                  LS_64(I40IW_CQP_OP_MODIFY_CQ, I40IW_CQPSQ_OPCODE) |
2319                  LS_64(info->cq_resize, I40IW_CQPSQ_CQ_CQRESIZE) |
2320                  LS_64(pbl_chunk_size, I40IW_CQPSQ_CQ_LPBLSIZE) |
2321                  LS_64(check_overflow, I40IW_CQPSQ_CQ_CHKOVERFLOW) |
2322                  LS_64(virtual_map, I40IW_CQPSQ_CQ_VIRTMAP) |
2323                  LS_64(cq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) |
2324                  LS_64(ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) |
2325                  LS_64(cq->tph_en, I40IW_CQPSQ_TPHEN) |
2326                  LS_64(cq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) |
2327                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2328
2329         i40iw_insert_wqe_hdr(wqe, header);
2330
2331         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CQ_MODIFY WQE",
2332                         wqe, I40IW_CQP_WQE_SIZE * 8);
2333
2334         if (post_sq)
2335                 i40iw_sc_cqp_post_sq(cqp);
2336         return 0;
2337 }
2338
2339 /**
2340  * i40iw_sc_qp_init - initialize qp
2341  * @qp: sc qp
2342  * @info: initialization qp info
2343  */
2344 static enum i40iw_status_code i40iw_sc_qp_init(struct i40iw_sc_qp *qp,
2345                                                struct i40iw_qp_init_info *info)
2346 {
2347         u32 __iomem *wqe_alloc_reg = NULL;
2348         enum i40iw_status_code ret_code;
2349         u32 pble_obj_cnt;
2350         u8 wqe_size;
2351         u32 offset;
2352
2353         qp->dev = info->pd->dev;
2354         qp->vsi = info->vsi;
2355         qp->sq_pa = info->sq_pa;
2356         qp->rq_pa = info->rq_pa;
2357         qp->hw_host_ctx_pa = info->host_ctx_pa;
2358         qp->q2_pa = info->q2_pa;
2359         qp->shadow_area_pa = info->shadow_area_pa;
2360
2361         qp->q2_buf = info->q2;
2362         qp->pd = info->pd;
2363         qp->hw_host_ctx = info->host_ctx;
2364         offset = (qp->pd->dev->is_pf) ? I40E_PFPE_WQEALLOC : I40E_VFPE_WQEALLOC1;
2365         if (i40iw_get_hw_addr(qp->pd->dev))
2366                 wqe_alloc_reg = (u32 __iomem *)(i40iw_get_hw_addr(qp->pd->dev) +
2367                                               offset);
2368
2369         info->qp_uk_init_info.wqe_alloc_reg = wqe_alloc_reg;
2370         info->qp_uk_init_info.abi_ver = qp->pd->abi_ver;
2371         ret_code = i40iw_qp_uk_init(&qp->qp_uk, &info->qp_uk_init_info);
2372         if (ret_code)
2373                 return ret_code;
2374         qp->virtual_map = info->virtual_map;
2375
2376         pble_obj_cnt = info->pd->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
2377
2378         if ((info->virtual_map && (info->sq_pa >= pble_obj_cnt)) ||
2379             (info->virtual_map && (info->rq_pa >= pble_obj_cnt)))
2380                 return I40IW_ERR_INVALID_PBLE_INDEX;
2381
2382         qp->llp_stream_handle = (void *)(-1);
2383         qp->qp_type = (info->type) ? info->type : I40IW_QP_TYPE_IWARP;
2384
2385         qp->hw_sq_size = i40iw_get_encoded_wqe_size(qp->qp_uk.sq_ring.size,
2386                                                     false);
2387         i40iw_debug(qp->dev, I40IW_DEBUG_WQE, "%s: hw_sq_size[%04d] sq_ring.size[%04d]\n",
2388                     __func__, qp->hw_sq_size, qp->qp_uk.sq_ring.size);
2389
2390         switch (qp->pd->abi_ver) {
2391         case 4:
2392                 ret_code = i40iw_fragcnt_to_wqesize_rq(qp->qp_uk.max_rq_frag_cnt,
2393                                                        &wqe_size);
2394                 if (ret_code)
2395                         return ret_code;
2396                 break;
2397         case 5: /* fallthrough until next ABI version */
2398         default:
2399                 if (qp->qp_uk.max_rq_frag_cnt > I40IW_MAX_WQ_FRAGMENT_COUNT)
2400                         return I40IW_ERR_INVALID_FRAG_COUNT;
2401                 wqe_size = I40IW_MAX_WQE_SIZE_RQ;
2402                 break;
2403         }
2404         qp->hw_rq_size = i40iw_get_encoded_wqe_size(qp->qp_uk.rq_size *
2405                                 (wqe_size / I40IW_QP_WQE_MIN_SIZE), false);
2406         i40iw_debug(qp->dev, I40IW_DEBUG_WQE,
2407                     "%s: hw_rq_size[%04d] qp_uk.rq_size[%04d] wqe_size[%04d]\n",
2408                     __func__, qp->hw_rq_size, qp->qp_uk.rq_size, wqe_size);
2409         qp->sq_tph_val = info->sq_tph_val;
2410         qp->rq_tph_val = info->rq_tph_val;
2411         qp->sq_tph_en = info->sq_tph_en;
2412         qp->rq_tph_en = info->rq_tph_en;
2413         qp->rcv_tph_en = info->rcv_tph_en;
2414         qp->xmit_tph_en = info->xmit_tph_en;
2415         qp->qs_handle = qp->vsi->qos[qp->user_pri].qs_handle;
2416
2417         return 0;
2418 }
2419
2420 /**
2421  * i40iw_sc_qp_create - create qp
2422  * @qp: sc qp
2423  * @info: qp create info
2424  * @scratch: u64 saved to be used during cqp completion
2425  * @post_sq: flag for cqp db to ring
2426  */
2427 static enum i40iw_status_code i40iw_sc_qp_create(
2428                                 struct i40iw_sc_qp *qp,
2429                                 struct i40iw_create_qp_info *info,
2430                                 u64 scratch,
2431                                 bool post_sq)
2432 {
2433         struct i40iw_sc_cqp *cqp;
2434         u64 *wqe;
2435         u64 header;
2436
2437         if ((qp->qp_uk.qp_id < I40IW_MIN_IW_QP_ID) ||
2438             (qp->qp_uk.qp_id > I40IW_MAX_IW_QP_ID))
2439                 return I40IW_ERR_INVALID_QP_ID;
2440
2441         cqp = qp->pd->dev->cqp;
2442         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2443         if (!wqe)
2444                 return I40IW_ERR_RING_FULL;
2445
2446         set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
2447
2448         set_64bit_val(wqe, 40, qp->shadow_area_pa);
2449
2450         header = qp->qp_uk.qp_id |
2451                  LS_64(I40IW_CQP_OP_CREATE_QP, I40IW_CQPSQ_OPCODE) |
2452                  LS_64((info->ord_valid ? 1 : 0), I40IW_CQPSQ_QP_ORDVALID) |
2453                  LS_64(info->tcp_ctx_valid, I40IW_CQPSQ_QP_TOECTXVALID) |
2454                  LS_64(qp->qp_type, I40IW_CQPSQ_QP_QPTYPE) |
2455                  LS_64(qp->virtual_map, I40IW_CQPSQ_QP_VQ) |
2456                  LS_64(info->cq_num_valid, I40IW_CQPSQ_QP_CQNUMVALID) |
2457                  LS_64(info->arp_cache_idx_valid, I40IW_CQPSQ_QP_ARPTABIDXVALID) |
2458                  LS_64(info->next_iwarp_state, I40IW_CQPSQ_QP_NEXTIWSTATE) |
2459                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2460
2461         i40iw_insert_wqe_hdr(wqe, header);
2462         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QP_CREATE WQE",
2463                         wqe, I40IW_CQP_WQE_SIZE * 8);
2464
2465         if (post_sq)
2466                 i40iw_sc_cqp_post_sq(cqp);
2467         return 0;
2468 }
2469
2470 /**
2471  * i40iw_sc_qp_modify - modify qp cqp wqe
2472  * @qp: sc qp
2473  * @info: modify qp info
2474  * @scratch: u64 saved to be used during cqp completion
2475  * @post_sq: flag for cqp db to ring
2476  */
2477 static enum i40iw_status_code i40iw_sc_qp_modify(
2478                                 struct i40iw_sc_qp *qp,
2479                                 struct i40iw_modify_qp_info *info,
2480                                 u64 scratch,
2481                                 bool post_sq)
2482 {
2483         u64 *wqe;
2484         struct i40iw_sc_cqp *cqp;
2485         u64 header;
2486         u8 term_actions = 0;
2487         u8 term_len = 0;
2488
2489         cqp = qp->pd->dev->cqp;
2490         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2491         if (!wqe)
2492                 return I40IW_ERR_RING_FULL;
2493         if (info->next_iwarp_state == I40IW_QP_STATE_TERMINATE) {
2494                 if (info->dont_send_fin)
2495                         term_actions += I40IWQP_TERM_SEND_TERM_ONLY;
2496                 if (info->dont_send_term)
2497                         term_actions += I40IWQP_TERM_SEND_FIN_ONLY;
2498                 if ((term_actions == I40IWQP_TERM_SEND_TERM_AND_FIN) ||
2499                     (term_actions == I40IWQP_TERM_SEND_TERM_ONLY))
2500                         term_len = info->termlen;
2501         }
2502
2503         set_64bit_val(wqe,
2504                       8,
2505                       LS_64(term_len, I40IW_CQPSQ_QP_TERMLEN));
2506
2507         set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
2508         set_64bit_val(wqe, 40, qp->shadow_area_pa);
2509
2510         header = qp->qp_uk.qp_id |
2511                  LS_64(I40IW_CQP_OP_MODIFY_QP, I40IW_CQPSQ_OPCODE) |
2512                  LS_64(info->ord_valid, I40IW_CQPSQ_QP_ORDVALID) |
2513                  LS_64(info->tcp_ctx_valid, I40IW_CQPSQ_QP_TOECTXVALID) |
2514                  LS_64(info->cached_var_valid, I40IW_CQPSQ_QP_CACHEDVARVALID) |
2515                  LS_64(qp->virtual_map, I40IW_CQPSQ_QP_VQ) |
2516                  LS_64(info->cq_num_valid, I40IW_CQPSQ_QP_CQNUMVALID) |
2517                  LS_64(info->force_loopback, I40IW_CQPSQ_QP_FORCELOOPBACK) |
2518                  LS_64(qp->qp_type, I40IW_CQPSQ_QP_QPTYPE) |
2519                  LS_64(info->remove_hash_idx, I40IW_CQPSQ_QP_REMOVEHASHENTRY) |
2520                  LS_64(term_actions, I40IW_CQPSQ_QP_TERMACT) |
2521                  LS_64(info->reset_tcp_conn, I40IW_CQPSQ_QP_RESETCON) |
2522                  LS_64(info->arp_cache_idx_valid, I40IW_CQPSQ_QP_ARPTABIDXVALID) |
2523                  LS_64(info->next_iwarp_state, I40IW_CQPSQ_QP_NEXTIWSTATE) |
2524                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2525
2526         i40iw_insert_wqe_hdr(wqe, header);
2527
2528         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QP_MODIFY WQE",
2529                         wqe, I40IW_CQP_WQE_SIZE * 8);
2530
2531         if (post_sq)
2532                 i40iw_sc_cqp_post_sq(cqp);
2533         return 0;
2534 }
2535
2536 /**
2537  * i40iw_sc_qp_destroy - cqp destroy qp
2538  * @qp: sc qp
2539  * @scratch: u64 saved to be used during cqp completion
2540  * @remove_hash_idx: flag if to remove hash idx
2541  * @ignore_mw_bnd: memory window bind flag
2542  * @post_sq: flag for cqp db to ring
2543  */
2544 static enum i40iw_status_code i40iw_sc_qp_destroy(
2545                                         struct i40iw_sc_qp *qp,
2546                                         u64 scratch,
2547                                         bool remove_hash_idx,
2548                                         bool ignore_mw_bnd,
2549                                         bool post_sq)
2550 {
2551         u64 *wqe;
2552         struct i40iw_sc_cqp *cqp;
2553         u64 header;
2554
2555         i40iw_qp_rem_qos(qp);
2556         cqp = qp->pd->dev->cqp;
2557         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2558         if (!wqe)
2559                 return I40IW_ERR_RING_FULL;
2560         set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
2561         set_64bit_val(wqe, 40, qp->shadow_area_pa);
2562
2563         header = qp->qp_uk.qp_id |
2564                  LS_64(I40IW_CQP_OP_DESTROY_QP, I40IW_CQPSQ_OPCODE) |
2565                  LS_64(qp->qp_type, I40IW_CQPSQ_QP_QPTYPE) |
2566                  LS_64(ignore_mw_bnd, I40IW_CQPSQ_QP_IGNOREMWBOUND) |
2567                  LS_64(remove_hash_idx, I40IW_CQPSQ_QP_REMOVEHASHENTRY) |
2568                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2569
2570         i40iw_insert_wqe_hdr(wqe, header);
2571         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QP_DESTROY WQE",
2572                         wqe, I40IW_CQP_WQE_SIZE * 8);
2573
2574         if (post_sq)
2575                 i40iw_sc_cqp_post_sq(cqp);
2576         return 0;
2577 }
2578
2579 /**
2580  * i40iw_sc_qp_flush_wqes - flush qp's wqe
2581  * @qp: sc qp
2582  * @info: dlush information
2583  * @scratch: u64 saved to be used during cqp completion
2584  * @post_sq: flag for cqp db to ring
2585  */
2586 static enum i40iw_status_code i40iw_sc_qp_flush_wqes(
2587                                 struct i40iw_sc_qp *qp,
2588                                 struct i40iw_qp_flush_info *info,
2589                                 u64 scratch,
2590                                 bool post_sq)
2591 {
2592         u64 temp = 0;
2593         u64 *wqe;
2594         struct i40iw_sc_cqp *cqp;
2595         u64 header;
2596         bool flush_sq = false, flush_rq = false;
2597
2598         if (info->rq && !qp->flush_rq)
2599                 flush_rq = true;
2600
2601         if (info->sq && !qp->flush_sq)
2602                 flush_sq = true;
2603
2604         qp->flush_sq |= flush_sq;
2605         qp->flush_rq |= flush_rq;
2606         if (!flush_sq && !flush_rq) {
2607                 if (info->ae_code != I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR)
2608                         return 0;
2609         }
2610
2611         cqp = qp->pd->dev->cqp;
2612         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2613         if (!wqe)
2614                 return I40IW_ERR_RING_FULL;
2615         if (info->userflushcode) {
2616                 if (flush_rq) {
2617                         temp |= LS_64(info->rq_minor_code, I40IW_CQPSQ_FWQE_RQMNERR) |
2618                                 LS_64(info->rq_major_code, I40IW_CQPSQ_FWQE_RQMJERR);
2619                 }
2620                 if (flush_sq) {
2621                         temp |= LS_64(info->sq_minor_code, I40IW_CQPSQ_FWQE_SQMNERR) |
2622                                 LS_64(info->sq_major_code, I40IW_CQPSQ_FWQE_SQMJERR);
2623                 }
2624         }
2625         set_64bit_val(wqe, 16, temp);
2626
2627         temp = (info->generate_ae) ?
2628                 info->ae_code | LS_64(info->ae_source, I40IW_CQPSQ_FWQE_AESOURCE) : 0;
2629
2630         set_64bit_val(wqe, 8, temp);
2631
2632         header = qp->qp_uk.qp_id |
2633                  LS_64(I40IW_CQP_OP_FLUSH_WQES, I40IW_CQPSQ_OPCODE) |
2634                  LS_64(info->generate_ae, I40IW_CQPSQ_FWQE_GENERATE_AE) |
2635                  LS_64(info->userflushcode, I40IW_CQPSQ_FWQE_USERFLCODE) |
2636                  LS_64(flush_sq, I40IW_CQPSQ_FWQE_FLUSHSQ) |
2637                  LS_64(flush_rq, I40IW_CQPSQ_FWQE_FLUSHRQ) |
2638                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2639
2640         i40iw_insert_wqe_hdr(wqe, header);
2641
2642         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QP_FLUSH WQE",
2643                         wqe, I40IW_CQP_WQE_SIZE * 8);
2644
2645         if (post_sq)
2646                 i40iw_sc_cqp_post_sq(cqp);
2647         return 0;
2648 }
2649
2650 /**
2651  * i40iw_sc_qp_upload_context - upload qp's context
2652  * @dev: sc device struct
2653  * @info: upload context info ptr for return
2654  * @scratch: u64 saved to be used during cqp completion
2655  * @post_sq: flag for cqp db to ring
2656  */
2657 static enum i40iw_status_code i40iw_sc_qp_upload_context(
2658                                         struct i40iw_sc_dev *dev,
2659                                         struct i40iw_upload_context_info *info,
2660                                         u64 scratch,
2661                                         bool post_sq)
2662 {
2663         u64 *wqe;
2664         struct i40iw_sc_cqp *cqp;
2665         u64 header;
2666
2667         cqp = dev->cqp;
2668         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2669         if (!wqe)
2670                 return I40IW_ERR_RING_FULL;
2671         set_64bit_val(wqe, 16, info->buf_pa);
2672
2673         header = LS_64(info->qp_id, I40IW_CQPSQ_UCTX_QPID) |
2674                  LS_64(I40IW_CQP_OP_UPLOAD_CONTEXT, I40IW_CQPSQ_OPCODE) |
2675                  LS_64(info->qp_type, I40IW_CQPSQ_UCTX_QPTYPE) |
2676                  LS_64(info->raw_format, I40IW_CQPSQ_UCTX_RAWFORMAT) |
2677                  LS_64(info->freeze_qp, I40IW_CQPSQ_UCTX_FREEZEQP) |
2678                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2679
2680         i40iw_insert_wqe_hdr(wqe, header);
2681
2682         i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "QP_UPLOAD_CTX WQE",
2683                         wqe, I40IW_CQP_WQE_SIZE * 8);
2684
2685         if (post_sq)
2686                 i40iw_sc_cqp_post_sq(cqp);
2687         return 0;
2688 }
2689
2690 /**
2691  * i40iw_sc_qp_setctx - set qp's context
2692  * @qp: sc qp
2693  * @qp_ctx: context ptr
2694  * @info: ctx info
2695  */
2696 static enum i40iw_status_code i40iw_sc_qp_setctx(
2697                                 struct i40iw_sc_qp *qp,
2698                                 u64 *qp_ctx,
2699                                 struct i40iw_qp_host_ctx_info *info)
2700 {
2701         struct i40iwarp_offload_info *iw;
2702         struct i40iw_tcp_offload_info *tcp;
2703         struct i40iw_sc_vsi *vsi;
2704         struct i40iw_sc_dev *dev;
2705         u64 qw0, qw3, qw7 = 0;
2706
2707         iw = info->iwarp_info;
2708         tcp = info->tcp_info;
2709         vsi = qp->vsi;
2710         dev = qp->dev;
2711         if (info->add_to_qoslist) {
2712                 qp->user_pri = info->user_pri;
2713                 i40iw_qp_add_qos(qp);
2714                 i40iw_debug(qp->dev, I40IW_DEBUG_DCB, "%s qp[%d] UP[%d] qset[%d]\n",
2715                             __func__, qp->qp_uk.qp_id, qp->user_pri, qp->qs_handle);
2716         }
2717         qw0 = LS_64(qp->qp_uk.rq_wqe_size, I40IWQPC_RQWQESIZE) |
2718               LS_64(info->err_rq_idx_valid, I40IWQPC_ERR_RQ_IDX_VALID) |
2719               LS_64(qp->rcv_tph_en, I40IWQPC_RCVTPHEN) |
2720               LS_64(qp->xmit_tph_en, I40IWQPC_XMITTPHEN) |
2721               LS_64(qp->rq_tph_en, I40IWQPC_RQTPHEN) |
2722               LS_64(qp->sq_tph_en, I40IWQPC_SQTPHEN) |
2723               LS_64(info->push_idx, I40IWQPC_PPIDX) |
2724               LS_64(info->push_mode_en, I40IWQPC_PMENA);
2725
2726         set_64bit_val(qp_ctx, 8, qp->sq_pa);
2727         set_64bit_val(qp_ctx, 16, qp->rq_pa);
2728
2729         qw3 = LS_64(qp->src_mac_addr_idx, I40IWQPC_SRCMACADDRIDX) |
2730               LS_64(qp->hw_rq_size, I40IWQPC_RQSIZE) |
2731               LS_64(qp->hw_sq_size, I40IWQPC_SQSIZE);
2732
2733         set_64bit_val(qp_ctx,
2734                       128,
2735                       LS_64(info->err_rq_idx, I40IWQPC_ERR_RQ_IDX));
2736
2737         set_64bit_val(qp_ctx,
2738                       136,
2739                       LS_64(info->send_cq_num, I40IWQPC_TXCQNUM) |
2740                       LS_64(info->rcv_cq_num, I40IWQPC_RXCQNUM));
2741
2742         set_64bit_val(qp_ctx,
2743                       168,
2744                       LS_64(info->qp_compl_ctx, I40IWQPC_QPCOMPCTX));
2745         set_64bit_val(qp_ctx,
2746                       176,
2747                       LS_64(qp->sq_tph_val, I40IWQPC_SQTPHVAL) |
2748                       LS_64(qp->rq_tph_val, I40IWQPC_RQTPHVAL) |
2749                       LS_64(qp->qs_handle, I40IWQPC_QSHANDLE) |
2750                       LS_64(vsi->exception_lan_queue, I40IWQPC_EXCEPTION_LAN_QUEUE));
2751
2752         if (info->iwarp_info_valid) {
2753                 qw0 |= LS_64(iw->ddp_ver, I40IWQPC_DDP_VER) |
2754                        LS_64(iw->rdmap_ver, I40IWQPC_RDMAP_VER);
2755
2756                 qw7 |= LS_64(iw->pd_id, I40IWQPC_PDIDX);
2757                 set_64bit_val(qp_ctx,
2758                               144,
2759                               LS_64(qp->q2_pa, I40IWQPC_Q2ADDR) |
2760                               LS_64(vsi->fcn_id, I40IWQPC_STAT_INDEX));
2761                 set_64bit_val(qp_ctx,
2762                               152,
2763                               LS_64(iw->last_byte_sent, I40IWQPC_LASTBYTESENT));
2764
2765                 set_64bit_val(qp_ctx,
2766                               160,
2767                               LS_64(iw->ord_size, I40IWQPC_ORDSIZE) |
2768                               LS_64(iw->ird_size, I40IWQPC_IRDSIZE) |
2769                               LS_64(iw->wr_rdresp_en, I40IWQPC_WRRDRSPOK) |
2770                               LS_64(iw->rd_enable, I40IWQPC_RDOK) |
2771                               LS_64(iw->snd_mark_en, I40IWQPC_SNDMARKERS) |
2772                               LS_64(iw->bind_en, I40IWQPC_BINDEN) |
2773                               LS_64(iw->fast_reg_en, I40IWQPC_FASTREGEN) |
2774                               LS_64(iw->priv_mode_en, I40IWQPC_PRIVEN) |
2775                               LS_64((((vsi->stats_fcn_id_alloc) &&
2776                                       (dev->is_pf) && (vsi->fcn_id >= I40IW_FIRST_NON_PF_STAT)) ? 1 : 0),
2777                                     I40IWQPC_USESTATSINSTANCE) |
2778                               LS_64(1, I40IWQPC_IWARPMODE) |
2779                               LS_64(iw->rcv_mark_en, I40IWQPC_RCVMARKERS) |
2780                               LS_64(iw->align_hdrs, I40IWQPC_ALIGNHDRS) |
2781                               LS_64(iw->rcv_no_mpa_crc, I40IWQPC_RCVNOMPACRC) |
2782                               LS_64(iw->rcv_mark_offset, I40IWQPC_RCVMARKOFFSET) |
2783                               LS_64(iw->snd_mark_offset, I40IWQPC_SNDMARKOFFSET));
2784         }
2785         if (info->tcp_info_valid) {
2786                 qw0 |= LS_64(tcp->ipv4, I40IWQPC_IPV4) |
2787                        LS_64(tcp->no_nagle, I40IWQPC_NONAGLE) |
2788                        LS_64(tcp->insert_vlan_tag, I40IWQPC_INSERTVLANTAG) |
2789                        LS_64(tcp->time_stamp, I40IWQPC_TIMESTAMP) |
2790                        LS_64(tcp->cwnd_inc_limit, I40IWQPC_LIMIT) |
2791                        LS_64(tcp->drop_ooo_seg, I40IWQPC_DROPOOOSEG) |
2792                        LS_64(tcp->dup_ack_thresh, I40IWQPC_DUPACK_THRESH);
2793
2794                 qw3 |= LS_64(tcp->ttl, I40IWQPC_TTL) |
2795                        LS_64(tcp->src_mac_addr_idx, I40IWQPC_SRCMACADDRIDX) |
2796                        LS_64(tcp->avoid_stretch_ack, I40IWQPC_AVOIDSTRETCHACK) |
2797                        LS_64(tcp->tos, I40IWQPC_TOS) |
2798                        LS_64(tcp->src_port, I40IWQPC_SRCPORTNUM) |
2799                        LS_64(tcp->dst_port, I40IWQPC_DESTPORTNUM);
2800
2801                 qp->src_mac_addr_idx = tcp->src_mac_addr_idx;
2802                 set_64bit_val(qp_ctx,
2803                               32,
2804                               LS_64(tcp->dest_ip_addr2, I40IWQPC_DESTIPADDR2) |
2805                               LS_64(tcp->dest_ip_addr3, I40IWQPC_DESTIPADDR3));
2806
2807                 set_64bit_val(qp_ctx,
2808                               40,
2809                               LS_64(tcp->dest_ip_addr0, I40IWQPC_DESTIPADDR0) |
2810                               LS_64(tcp->dest_ip_addr1, I40IWQPC_DESTIPADDR1));
2811
2812                 set_64bit_val(qp_ctx,
2813                               48,
2814                               LS_64(tcp->snd_mss, I40IWQPC_SNDMSS) |
2815                                 LS_64(tcp->vlan_tag, I40IWQPC_VLANTAG) |
2816                                 LS_64(tcp->arp_idx, I40IWQPC_ARPIDX));
2817
2818                 qw7 |= LS_64(tcp->flow_label, I40IWQPC_FLOWLABEL) |
2819                        LS_64(tcp->wscale, I40IWQPC_WSCALE) |
2820                        LS_64(tcp->ignore_tcp_opt, I40IWQPC_IGNORE_TCP_OPT) |
2821                        LS_64(tcp->ignore_tcp_uns_opt, I40IWQPC_IGNORE_TCP_UNS_OPT) |
2822                        LS_64(tcp->tcp_state, I40IWQPC_TCPSTATE) |
2823                        LS_64(tcp->rcv_wscale, I40IWQPC_RCVSCALE) |
2824                        LS_64(tcp->snd_wscale, I40IWQPC_SNDSCALE);
2825
2826                 set_64bit_val(qp_ctx,
2827                               72,
2828                               LS_64(tcp->time_stamp_recent, I40IWQPC_TIMESTAMP_RECENT) |
2829                               LS_64(tcp->time_stamp_age, I40IWQPC_TIMESTAMP_AGE));
2830                 set_64bit_val(qp_ctx,
2831                               80,
2832                               LS_64(tcp->snd_nxt, I40IWQPC_SNDNXT) |
2833                               LS_64(tcp->snd_wnd, I40IWQPC_SNDWND));
2834
2835                 set_64bit_val(qp_ctx,
2836                               88,
2837                               LS_64(tcp->rcv_nxt, I40IWQPC_RCVNXT) |
2838                               LS_64(tcp->rcv_wnd, I40IWQPC_RCVWND));
2839                 set_64bit_val(qp_ctx,
2840                               96,
2841                               LS_64(tcp->snd_max, I40IWQPC_SNDMAX) |
2842                               LS_64(tcp->snd_una, I40IWQPC_SNDUNA));
2843                 set_64bit_val(qp_ctx,
2844                               104,
2845                               LS_64(tcp->srtt, I40IWQPC_SRTT) |
2846                               LS_64(tcp->rtt_var, I40IWQPC_RTTVAR));
2847                 set_64bit_val(qp_ctx,
2848                               112,
2849                               LS_64(tcp->ss_thresh, I40IWQPC_SSTHRESH) |
2850                               LS_64(tcp->cwnd, I40IWQPC_CWND));
2851                 set_64bit_val(qp_ctx,
2852                               120,
2853                               LS_64(tcp->snd_wl1, I40IWQPC_SNDWL1) |
2854                               LS_64(tcp->snd_wl2, I40IWQPC_SNDWL2));
2855                 set_64bit_val(qp_ctx,
2856                               128,
2857                               LS_64(tcp->max_snd_window, I40IWQPC_MAXSNDWND) |
2858                               LS_64(tcp->rexmit_thresh, I40IWQPC_REXMIT_THRESH));
2859                 set_64bit_val(qp_ctx,
2860                               184,
2861                               LS_64(tcp->local_ipaddr3, I40IWQPC_LOCAL_IPADDR3) |
2862                               LS_64(tcp->local_ipaddr2, I40IWQPC_LOCAL_IPADDR2));
2863                 set_64bit_val(qp_ctx,
2864                               192,
2865                               LS_64(tcp->local_ipaddr1, I40IWQPC_LOCAL_IPADDR1) |
2866                               LS_64(tcp->local_ipaddr0, I40IWQPC_LOCAL_IPADDR0));
2867         }
2868
2869         set_64bit_val(qp_ctx, 0, qw0);
2870         set_64bit_val(qp_ctx, 24, qw3);
2871         set_64bit_val(qp_ctx, 56, qw7);
2872
2873         i40iw_debug_buf(qp->dev, I40IW_DEBUG_WQE, "QP_HOST)CTX WQE",
2874                         qp_ctx, I40IW_QP_CTX_SIZE);
2875         return 0;
2876 }
2877
2878 /**
2879  * i40iw_sc_alloc_stag - mr stag alloc
2880  * @dev: sc device struct
2881  * @info: stag info
2882  * @scratch: u64 saved to be used during cqp completion
2883  * @post_sq: flag for cqp db to ring
2884  */
2885 static enum i40iw_status_code i40iw_sc_alloc_stag(
2886                                 struct i40iw_sc_dev *dev,
2887                                 struct i40iw_allocate_stag_info *info,
2888                                 u64 scratch,
2889                                 bool post_sq)
2890 {
2891         u64 *wqe;
2892         struct i40iw_sc_cqp *cqp;
2893         u64 header;
2894         enum i40iw_page_size page_size;
2895
2896         page_size = (info->page_size == 0x200000) ? I40IW_PAGE_SIZE_2M : I40IW_PAGE_SIZE_4K;
2897         cqp = dev->cqp;
2898         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2899         if (!wqe)
2900                 return I40IW_ERR_RING_FULL;
2901         set_64bit_val(wqe,
2902                       8,
2903                       LS_64(info->pd_id, I40IW_CQPSQ_STAG_PDID) |
2904                       LS_64(info->total_len, I40IW_CQPSQ_STAG_STAGLEN));
2905         set_64bit_val(wqe,
2906                       16,
2907                       LS_64(info->stag_idx, I40IW_CQPSQ_STAG_IDX));
2908         set_64bit_val(wqe,
2909                       40,
2910                       LS_64(info->hmc_fcn_index, I40IW_CQPSQ_STAG_HMCFNIDX));
2911
2912         header = LS_64(I40IW_CQP_OP_ALLOC_STAG, I40IW_CQPSQ_OPCODE) |
2913                  LS_64(1, I40IW_CQPSQ_STAG_MR) |
2914                  LS_64(info->access_rights, I40IW_CQPSQ_STAG_ARIGHTS) |
2915                  LS_64(info->chunk_size, I40IW_CQPSQ_STAG_LPBLSIZE) |
2916                  LS_64(page_size, I40IW_CQPSQ_STAG_HPAGESIZE) |
2917                  LS_64(info->remote_access, I40IW_CQPSQ_STAG_REMACCENABLED) |
2918                  LS_64(info->use_hmc_fcn_index, I40IW_CQPSQ_STAG_USEHMCFNIDX) |
2919                  LS_64(info->use_pf_rid, I40IW_CQPSQ_STAG_USEPFRID) |
2920                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2921
2922         i40iw_insert_wqe_hdr(wqe, header);
2923
2924         i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "ALLOC_STAG WQE",
2925                         wqe, I40IW_CQP_WQE_SIZE * 8);
2926
2927         if (post_sq)
2928                 i40iw_sc_cqp_post_sq(cqp);
2929         return 0;
2930 }
2931
2932 /**
2933  * i40iw_sc_mr_reg_non_shared - non-shared mr registration
2934  * @dev: sc device struct
2935  * @info: mr info
2936  * @scratch: u64 saved to be used during cqp completion
2937  * @post_sq: flag for cqp db to ring
2938  */
2939 static enum i40iw_status_code i40iw_sc_mr_reg_non_shared(
2940                                 struct i40iw_sc_dev *dev,
2941                                 struct i40iw_reg_ns_stag_info *info,
2942                                 u64 scratch,
2943                                 bool post_sq)
2944 {
2945         u64 *wqe;
2946         u64 temp;
2947         struct i40iw_sc_cqp *cqp;
2948         u64 header;
2949         u32 pble_obj_cnt;
2950         bool remote_access;
2951         u8 addr_type;
2952         enum i40iw_page_size page_size;
2953
2954         page_size = (info->page_size == 0x200000) ? I40IW_PAGE_SIZE_2M : I40IW_PAGE_SIZE_4K;
2955         if (info->access_rights & (I40IW_ACCESS_FLAGS_REMOTEREAD_ONLY |
2956                                    I40IW_ACCESS_FLAGS_REMOTEWRITE_ONLY))
2957                 remote_access = true;
2958         else
2959                 remote_access = false;
2960
2961         pble_obj_cnt = dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
2962
2963         if (info->chunk_size && (info->first_pm_pbl_index >= pble_obj_cnt))
2964                 return I40IW_ERR_INVALID_PBLE_INDEX;
2965
2966         cqp = dev->cqp;
2967         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2968         if (!wqe)
2969                 return I40IW_ERR_RING_FULL;
2970
2971         temp = (info->addr_type == I40IW_ADDR_TYPE_VA_BASED) ? (uintptr_t)info->va : info->fbo;
2972         set_64bit_val(wqe, 0, temp);
2973
2974         set_64bit_val(wqe,
2975                       8,
2976                       LS_64(info->total_len, I40IW_CQPSQ_STAG_STAGLEN) |
2977                       LS_64(info->pd_id, I40IW_CQPSQ_STAG_PDID));
2978
2979         set_64bit_val(wqe,
2980                       16,
2981                       LS_64(info->stag_key, I40IW_CQPSQ_STAG_KEY) |
2982                       LS_64(info->stag_idx, I40IW_CQPSQ_STAG_IDX));
2983         if (!info->chunk_size) {
2984                 set_64bit_val(wqe, 32, info->reg_addr_pa);
2985                 set_64bit_val(wqe, 48, 0);
2986         } else {
2987                 set_64bit_val(wqe, 32, 0);
2988                 set_64bit_val(wqe, 48, info->first_pm_pbl_index);
2989         }
2990         set_64bit_val(wqe, 40, info->hmc_fcn_index);
2991         set_64bit_val(wqe, 56, 0);
2992
2993         addr_type = (info->addr_type == I40IW_ADDR_TYPE_VA_BASED) ? 1 : 0;
2994         header = LS_64(I40IW_CQP_OP_REG_MR, I40IW_CQPSQ_OPCODE) |
2995                  LS_64(1, I40IW_CQPSQ_STAG_MR) |
2996                  LS_64(info->chunk_size, I40IW_CQPSQ_STAG_LPBLSIZE) |
2997                  LS_64(page_size, I40IW_CQPSQ_STAG_HPAGESIZE) |
2998                  LS_64(info->access_rights, I40IW_CQPSQ_STAG_ARIGHTS) |
2999                  LS_64(remote_access, I40IW_CQPSQ_STAG_REMACCENABLED) |
3000                  LS_64(addr_type, I40IW_CQPSQ_STAG_VABASEDTO) |
3001                  LS_64(info->use_hmc_fcn_index, I40IW_CQPSQ_STAG_USEHMCFNIDX) |
3002                  LS_64(info->use_pf_rid, I40IW_CQPSQ_STAG_USEPFRID) |
3003                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
3004
3005         i40iw_insert_wqe_hdr(wqe, header);
3006
3007         i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "MR_REG_NS WQE",
3008                         wqe, I40IW_CQP_WQE_SIZE * 8);
3009
3010         if (post_sq)
3011                 i40iw_sc_cqp_post_sq(cqp);
3012         return 0;
3013 }
3014
3015 /**
3016  * i40iw_sc_mr_reg_shared - registered shared memory region
3017  * @dev: sc device struct
3018  * @info: info for shared memory registeration
3019  * @scratch: u64 saved to be used during cqp completion
3020  * @post_sq: flag for cqp db to ring
3021  */
3022 static enum i40iw_status_code i40iw_sc_mr_reg_shared(
3023                                         struct i40iw_sc_dev *dev,
3024                                         struct i40iw_register_shared_stag *info,
3025                                         u64 scratch,
3026                                         bool post_sq)
3027 {
3028         u64 *wqe;
3029         struct i40iw_sc_cqp *cqp;
3030         u64 temp, va64, fbo, header;
3031         u32 va32;
3032         bool remote_access;
3033         u8 addr_type;
3034
3035         if (info->access_rights & (I40IW_ACCESS_FLAGS_REMOTEREAD_ONLY |
3036                                    I40IW_ACCESS_FLAGS_REMOTEWRITE_ONLY))
3037                 remote_access = true;
3038         else
3039                 remote_access = false;
3040         cqp = dev->cqp;
3041         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
3042         if (!wqe)
3043                 return I40IW_ERR_RING_FULL;
3044         va64 = (uintptr_t)(info->va);
3045         va32 = (u32)(va64 & 0x00000000FFFFFFFF);
3046         fbo = (u64)(va32 & (4096 - 1));
3047
3048         set_64bit_val(wqe,
3049                       0,
3050                       (info->addr_type == I40IW_ADDR_TYPE_VA_BASED ? (uintptr_t)info->va : fbo));
3051
3052         set_64bit_val(wqe,
3053                       8,
3054                       LS_64(info->pd_id, I40IW_CQPSQ_STAG_PDID));
3055         temp = LS_64(info->new_stag_key, I40IW_CQPSQ_STAG_KEY) |
3056                LS_64(info->new_stag_idx, I40IW_CQPSQ_STAG_IDX) |
3057                LS_64(info->parent_stag_idx, I40IW_CQPSQ_STAG_PARENTSTAGIDX);
3058         set_64bit_val(wqe, 16, temp);
3059
3060         addr_type = (info->addr_type == I40IW_ADDR_TYPE_VA_BASED) ? 1 : 0;
3061         header = LS_64(I40IW_CQP_OP_REG_SMR, I40IW_CQPSQ_OPCODE) |
3062                  LS_64(1, I40IW_CQPSQ_STAG_MR) |
3063                  LS_64(info->access_rights, I40IW_CQPSQ_STAG_ARIGHTS) |
3064                  LS_64(remote_access, I40IW_CQPSQ_STAG_REMACCENABLED) |
3065                  LS_64(addr_type, I40IW_CQPSQ_STAG_VABASEDTO) |
3066                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
3067
3068         i40iw_insert_wqe_hdr(wqe, header);
3069
3070         i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "MR_REG_SHARED WQE",
3071                         wqe, I40IW_CQP_WQE_SIZE * 8);
3072
3073         if (post_sq)
3074                 i40iw_sc_cqp_post_sq(cqp);
3075         return 0;
3076 }
3077
3078 /**
3079  * i40iw_sc_dealloc_stag - deallocate stag
3080  * @dev: sc device struct
3081  * @info: dealloc stag info
3082  * @scratch: u64 saved to be used during cqp completion
3083  * @post_sq: flag for cqp db to ring
3084  */
3085 static enum i40iw_status_code i40iw_sc_dealloc_stag(
3086                                         struct i40iw_sc_dev *dev,
3087                                         struct i40iw_dealloc_stag_info *info,
3088                                         u64 scratch,
3089                                         bool post_sq)
3090 {
3091         u64 header;
3092         u64 *wqe;
3093         struct i40iw_sc_cqp *cqp;
3094
3095         cqp = dev->cqp;
3096         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
3097         if (!wqe)
3098                 return I40IW_ERR_RING_FULL;
3099         set_64bit_val(wqe,
3100                       8,
3101                       LS_64(info->pd_id, I40IW_CQPSQ_STAG_PDID));
3102         set_64bit_val(wqe,
3103                       16,
3104                       LS_64(info->stag_idx, I40IW_CQPSQ_STAG_IDX));
3105
3106         header = LS_64(I40IW_CQP_OP_DEALLOC_STAG, I40IW_CQPSQ_OPCODE) |
3107                  LS_64(info->mr, I40IW_CQPSQ_STAG_MR) |
3108                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
3109
3110         i40iw_insert_wqe_hdr(wqe, header);
3111
3112         i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "DEALLOC_STAG WQE",
3113                         wqe, I40IW_CQP_WQE_SIZE * 8);
3114
3115         if (post_sq)
3116                 i40iw_sc_cqp_post_sq(cqp);
3117         return 0;
3118 }
3119
3120 /**
3121  * i40iw_sc_query_stag - query hardware for stag
3122  * @dev: sc device struct
3123  * @scratch: u64 saved to be used during cqp completion
3124  * @stag_index: stag index for query
3125  * @post_sq: flag for cqp db to ring
3126  */
3127 static enum i40iw_status_code i40iw_sc_query_stag(struct i40iw_sc_dev *dev,
3128                                                   u64 scratch,
3129                                                   u32 stag_index,
3130                                                   bool post_sq)
3131 {
3132         u64 header;
3133         u64 *wqe;
3134         struct i40iw_sc_cqp *cqp;
3135
3136         cqp = dev->cqp;
3137         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
3138         if (!wqe)
3139                 return I40IW_ERR_RING_FULL;
3140         set_64bit_val(wqe,
3141                       16,
3142                       LS_64(stag_index, I40IW_CQPSQ_QUERYSTAG_IDX));
3143
3144         header = LS_64(I40IW_CQP_OP_QUERY_STAG, I40IW_CQPSQ_OPCODE) |
3145                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
3146
3147         i40iw_insert_wqe_hdr(wqe, header);
3148
3149         i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "QUERY_STAG WQE",
3150                         wqe, I40IW_CQP_WQE_SIZE * 8);
3151
3152         if (post_sq)
3153                 i40iw_sc_cqp_post_sq(cqp);
3154         return 0;
3155 }
3156
3157 /**
3158  * i40iw_sc_mw_alloc - mw allocate
3159  * @dev: sc device struct
3160  * @scratch: u64 saved to be used during cqp completion
3161  * @mw_stag_index:stag index
3162  * @pd_id: pd is for this mw
3163  * @post_sq: flag for cqp db to ring
3164  */
3165 static enum i40iw_status_code i40iw_sc_mw_alloc(
3166                                         struct i40iw_sc_dev *dev,
3167                                         u64 scratch,
3168                                         u32 mw_stag_index,
3169                                         u16 pd_id,
3170                                         bool post_sq)
3171 {
3172         u64 header;
3173         struct i40iw_sc_cqp *cqp;
3174         u64 *wqe;
3175
3176         cqp = dev->cqp;
3177         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
3178         if (!wqe)
3179                 return I40IW_ERR_RING_FULL;
3180         set_64bit_val(wqe, 8, LS_64(pd_id, I40IW_CQPSQ_STAG_PDID));
3181         set_64bit_val(wqe,
3182                       16,
3183                       LS_64(mw_stag_index, I40IW_CQPSQ_STAG_IDX));
3184
3185         header = LS_64(I40IW_CQP_OP_ALLOC_STAG, I40IW_CQPSQ_OPCODE) |
3186                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
3187
3188         i40iw_insert_wqe_hdr(wqe, header);
3189
3190         i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "MW_ALLOC WQE",
3191                         wqe, I40IW_CQP_WQE_SIZE * 8);
3192
3193         if (post_sq)
3194                 i40iw_sc_cqp_post_sq(cqp);
3195         return 0;
3196 }
3197
3198 /**
3199  * i40iw_sc_mr_fast_register - Posts RDMA fast register mr WR to iwarp qp
3200  * @qp: sc qp struct
3201  * @info: fast mr info
3202  * @post_sq: flag for cqp db to ring
3203  */
3204 enum i40iw_status_code i40iw_sc_mr_fast_register(
3205                                 struct i40iw_sc_qp *qp,
3206                                 struct i40iw_fast_reg_stag_info *info,
3207                                 bool post_sq)
3208 {
3209         u64 temp, header;
3210         u64 *wqe;
3211         u32 wqe_idx;
3212         enum i40iw_page_size page_size;
3213
3214         page_size = (info->page_size == 0x200000) ? I40IW_PAGE_SIZE_2M : I40IW_PAGE_SIZE_4K;
3215         wqe = i40iw_qp_get_next_send_wqe(&qp->qp_uk, &wqe_idx, I40IW_QP_WQE_MIN_SIZE,
3216                                          0, info->wr_id);
3217         if (!wqe)
3218                 return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
3219
3220         i40iw_debug(qp->dev, I40IW_DEBUG_MR, "%s: wr_id[%llxh] wqe_idx[%04d] location[%p]\n",
3221                     __func__, info->wr_id, wqe_idx,
3222                     &qp->qp_uk.sq_wrtrk_array[wqe_idx].wrid);
3223         temp = (info->addr_type == I40IW_ADDR_TYPE_VA_BASED) ? (uintptr_t)info->va : info->fbo;
3224         set_64bit_val(wqe, 0, temp);
3225
3226         temp = RS_64(info->first_pm_pbl_index >> 16, I40IWQPSQ_FIRSTPMPBLIDXHI);
3227         set_64bit_val(wqe,
3228                       8,
3229                       LS_64(temp, I40IWQPSQ_FIRSTPMPBLIDXHI) |
3230                       LS_64(info->reg_addr_pa >> I40IWQPSQ_PBLADDR_SHIFT, I40IWQPSQ_PBLADDR));
3231
3232         set_64bit_val(wqe,
3233                       16,
3234                       info->total_len |
3235                       LS_64(info->first_pm_pbl_index, I40IWQPSQ_FIRSTPMPBLIDXLO));
3236
3237         header = LS_64(info->stag_key, I40IWQPSQ_STAGKEY) |
3238                  LS_64(info->stag_idx, I40IWQPSQ_STAGINDEX) |
3239                  LS_64(I40IWQP_OP_FAST_REGISTER, I40IWQPSQ_OPCODE) |
3240                  LS_64(info->chunk_size, I40IWQPSQ_LPBLSIZE) |
3241                  LS_64(page_size, I40IWQPSQ_HPAGESIZE) |
3242                  LS_64(info->access_rights, I40IWQPSQ_STAGRIGHTS) |
3243                  LS_64(info->addr_type, I40IWQPSQ_VABASEDTO) |
3244                  LS_64(info->read_fence, I40IWQPSQ_READFENCE) |
3245                  LS_64(info->local_fence, I40IWQPSQ_LOCALFENCE) |
3246                  LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) |
3247                  LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
3248
3249         i40iw_insert_wqe_hdr(wqe, header);
3250
3251         i40iw_debug_buf(qp->dev, I40IW_DEBUG_WQE, "FAST_REG WQE",
3252                         wqe, I40IW_QP_WQE_MIN_SIZE);
3253
3254         if (post_sq)
3255                 i40iw_qp_post_wr(&qp->qp_uk);
3256         return 0;
3257 }
3258
3259 /**
3260  * i40iw_sc_send_lsmm - send last streaming mode message
3261  * @qp: sc qp struct
3262  * @lsmm_buf: buffer with lsmm message
3263  * @size: size of lsmm buffer
3264  * @stag: stag of lsmm buffer
3265  */
3266 static void i40iw_sc_send_lsmm(struct i40iw_sc_qp *qp,
3267                                void *lsmm_buf,
3268                                u32 size,
3269                                i40iw_stag stag)
3270 {
3271         u64 *wqe;
3272         u64 header;
3273         struct i40iw_qp_uk *qp_uk;
3274
3275         qp_uk = &qp->qp_uk;
3276         wqe = qp_uk->sq_base->elem;
3277
3278         set_64bit_val(wqe, 0, (uintptr_t)lsmm_buf);
3279
3280         set_64bit_val(wqe, 8, (size | LS_64(stag, I40IWQPSQ_FRAG_STAG)));
3281
3282         set_64bit_val(wqe, 16, 0);
3283
3284         header = LS_64(I40IWQP_OP_RDMA_SEND, I40IWQPSQ_OPCODE) |
3285                  LS_64(1, I40IWQPSQ_STREAMMODE) |
3286                  LS_64(1, I40IWQPSQ_WAITFORRCVPDU) |
3287                  LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
3288
3289         i40iw_insert_wqe_hdr(wqe, header);
3290
3291         i40iw_debug_buf(qp->dev, I40IW_DEBUG_QP, "SEND_LSMM WQE",
3292                         wqe, I40IW_QP_WQE_MIN_SIZE);
3293 }
3294
3295 /**
3296  * i40iw_sc_send_lsmm_nostag - for privilege qp
3297  * @qp: sc qp struct
3298  * @lsmm_buf: buffer with lsmm message
3299  * @size: size of lsmm buffer
3300  */
3301 static void i40iw_sc_send_lsmm_nostag(struct i40iw_sc_qp *qp,
3302                                       void *lsmm_buf,
3303                                       u32 size)
3304 {
3305         u64 *wqe;
3306         u64 header;
3307         struct i40iw_qp_uk *qp_uk;
3308
3309         qp_uk = &qp->qp_uk;
3310         wqe = qp_uk->sq_base->elem;
3311
3312         set_64bit_val(wqe, 0, (uintptr_t)lsmm_buf);
3313
3314         set_64bit_val(wqe, 8, size);
3315
3316         set_64bit_val(wqe, 16, 0);
3317
3318         header = LS_64(I40IWQP_OP_RDMA_SEND, I40IWQPSQ_OPCODE) |
3319                  LS_64(1, I40IWQPSQ_STREAMMODE) |
3320                  LS_64(1, I40IWQPSQ_WAITFORRCVPDU) |
3321                  LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
3322
3323         i40iw_insert_wqe_hdr(wqe, header);
3324
3325         i40iw_debug_buf(qp->dev, I40IW_DEBUG_WQE, "SEND_LSMM_NOSTAG WQE",
3326                         wqe, I40IW_QP_WQE_MIN_SIZE);
3327 }
3328
3329 /**
3330  * i40iw_sc_send_rtt - send last read0 or write0
3331  * @qp: sc qp struct
3332  * @read: Do read0 or write0
3333  */
3334 static void i40iw_sc_send_rtt(struct i40iw_sc_qp *qp, bool read)
3335 {
3336         u64 *wqe;
3337         u64 header;
3338         struct i40iw_qp_uk *qp_uk;
3339
3340         qp_uk = &qp->qp_uk;
3341         wqe = qp_uk->sq_base->elem;
3342
3343         set_64bit_val(wqe, 0, 0);
3344         set_64bit_val(wqe, 8, 0);
3345         set_64bit_val(wqe, 16, 0);
3346         if (read) {
3347                 header = LS_64(0x1234, I40IWQPSQ_REMSTAG) |
3348                          LS_64(I40IWQP_OP_RDMA_READ, I40IWQPSQ_OPCODE) |
3349                          LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
3350                 set_64bit_val(wqe, 8, ((u64)0xabcd << 32));
3351         } else {
3352                 header = LS_64(I40IWQP_OP_RDMA_WRITE, I40IWQPSQ_OPCODE) |
3353                          LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
3354         }
3355
3356         i40iw_insert_wqe_hdr(wqe, header);
3357
3358         i40iw_debug_buf(qp->dev, I40IW_DEBUG_WQE, "RTR WQE",
3359                         wqe, I40IW_QP_WQE_MIN_SIZE);
3360 }
3361
3362 /**
3363  * i40iw_sc_post_wqe0 - send wqe with opcode
3364  * @qp: sc qp struct
3365  * @opcode: opcode to use for wqe0
3366  */
3367 static enum i40iw_status_code i40iw_sc_post_wqe0(struct i40iw_sc_qp *qp, u8 opcode)
3368 {
3369         u64 *wqe;
3370         u64 header;
3371         struct i40iw_qp_uk *qp_uk;
3372
3373         qp_uk = &qp->qp_uk;
3374         wqe = qp_uk->sq_base->elem;
3375
3376         if (!wqe)
3377                 return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
3378         switch (opcode) {
3379         case I40IWQP_OP_NOP:
3380                 set_64bit_val(wqe, 0, 0);
3381                 set_64bit_val(wqe, 8, 0);
3382                 set_64bit_val(wqe, 16, 0);
3383                 header = LS_64(I40IWQP_OP_NOP, I40IWQPSQ_OPCODE) |
3384                          LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
3385
3386                 i40iw_insert_wqe_hdr(wqe, header);
3387                 break;
3388         case I40IWQP_OP_RDMA_SEND:
3389                 set_64bit_val(wqe, 0, 0);
3390                 set_64bit_val(wqe, 8, 0);
3391                 set_64bit_val(wqe, 16, 0);
3392                 header = LS_64(I40IWQP_OP_RDMA_SEND, I40IWQPSQ_OPCODE) |
3393                          LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID) |
3394                          LS_64(1, I40IWQPSQ_STREAMMODE) |
3395                          LS_64(1, I40IWQPSQ_WAITFORRCVPDU);
3396
3397                 i40iw_insert_wqe_hdr(wqe, header);
3398                 break;
3399         default:
3400                 i40iw_debug(qp->dev, I40IW_DEBUG_QP, "%s: Invalid WQE zero opcode\n",
3401                             __func__);
3402                 break;
3403         }
3404         return 0;
3405 }
3406
3407 /**
3408  * i40iw_sc_init_iw_hmc() - queries fpm values using cqp and populates hmc_info
3409  * @dev : ptr to i40iw_dev struct
3410  * @hmc_fn_id: hmc function id
3411  */
3412 enum i40iw_status_code i40iw_sc_init_iw_hmc(struct i40iw_sc_dev *dev, u8 hmc_fn_id)
3413 {
3414         struct i40iw_hmc_info *hmc_info;
3415         struct i40iw_dma_mem query_fpm_mem;
3416         struct i40iw_virt_mem virt_mem;
3417         struct i40iw_vfdev *vf_dev = NULL;
3418         u32 mem_size;
3419         enum i40iw_status_code ret_code = 0;
3420         bool poll_registers = true;
3421         u16 iw_vf_idx;
3422         u8 wait_type;
3423
3424         if (hmc_fn_id >= I40IW_MAX_VF_FPM_ID ||
3425             (dev->hmc_fn_id != hmc_fn_id && hmc_fn_id < I40IW_FIRST_VF_FPM_ID))
3426                 return I40IW_ERR_INVALID_HMCFN_ID;
3427
3428         i40iw_debug(dev, I40IW_DEBUG_HMC, "hmc_fn_id %u, dev->hmc_fn_id %u\n", hmc_fn_id,
3429                     dev->hmc_fn_id);
3430         if (hmc_fn_id == dev->hmc_fn_id) {
3431                 hmc_info = dev->hmc_info;
3432                 query_fpm_mem.pa = dev->fpm_query_buf_pa;
3433                 query_fpm_mem.va = dev->fpm_query_buf;
3434         } else {
3435                 vf_dev = i40iw_vfdev_from_fpm(dev, hmc_fn_id);
3436                 if (!vf_dev)
3437                         return I40IW_ERR_INVALID_VF_ID;
3438
3439                 hmc_info = &vf_dev->hmc_info;
3440                 iw_vf_idx = vf_dev->iw_vf_idx;
3441                 i40iw_debug(dev, I40IW_DEBUG_HMC, "vf_dev %p, hmc_info %p, hmc_obj %p\n", vf_dev,
3442                             hmc_info, hmc_info->hmc_obj);
3443                 if (!vf_dev->fpm_query_buf) {
3444                         if (!dev->vf_fpm_query_buf[iw_vf_idx].va) {
3445                                 ret_code = i40iw_alloc_query_fpm_buf(dev,
3446                                                                      &dev->vf_fpm_query_buf[iw_vf_idx]);
3447                                 if (ret_code)
3448                                         return ret_code;
3449                         }
3450                         vf_dev->fpm_query_buf = dev->vf_fpm_query_buf[iw_vf_idx].va;
3451                         vf_dev->fpm_query_buf_pa = dev->vf_fpm_query_buf[iw_vf_idx].pa;
3452                 }
3453                 query_fpm_mem.pa = vf_dev->fpm_query_buf_pa;
3454                 query_fpm_mem.va = vf_dev->fpm_query_buf;
3455                 /**
3456                  * It is HARDWARE specific:
3457                  * this call is done by PF for VF and
3458                  * i40iw_sc_query_fpm_values needs ccq poll
3459                  * because PF ccq is already created.
3460                  */
3461                 poll_registers = false;
3462         }
3463
3464         hmc_info->hmc_fn_id = hmc_fn_id;
3465
3466         if (hmc_fn_id != dev->hmc_fn_id) {
3467                 ret_code =
3468                         i40iw_cqp_query_fpm_values_cmd(dev, &query_fpm_mem, hmc_fn_id);
3469         } else {
3470                 wait_type = poll_registers ? (u8)I40IW_CQP_WAIT_POLL_REGS :
3471                             (u8)I40IW_CQP_WAIT_POLL_CQ;
3472
3473                 ret_code = i40iw_sc_query_fpm_values(
3474                                         dev->cqp,
3475                                         0,
3476                                         hmc_info->hmc_fn_id,
3477                                         &query_fpm_mem,
3478                                         true,
3479                                         wait_type);
3480         }
3481         if (ret_code)
3482                 return ret_code;
3483
3484         /* parse the fpm_query_buf and fill hmc obj info */
3485         ret_code =
3486                 i40iw_sc_parse_fpm_query_buf((u64 *)query_fpm_mem.va,
3487                                              hmc_info,
3488                                              &dev->hmc_fpm_misc);
3489         if (ret_code)
3490                 return ret_code;
3491         i40iw_debug_buf(dev, I40IW_DEBUG_HMC, "QUERY FPM BUFFER",
3492                         query_fpm_mem.va, I40IW_QUERY_FPM_BUF_SIZE);
3493
3494         if (hmc_fn_id != dev->hmc_fn_id) {
3495                 i40iw_cqp_commit_fpm_values_cmd(dev, &query_fpm_mem, hmc_fn_id);
3496
3497                 /* parse the fpm_commit_buf and fill hmc obj info */
3498                 i40iw_sc_parse_fpm_commit_buf((u64 *)query_fpm_mem.va, hmc_info->hmc_obj, &hmc_info->sd_table.sd_cnt);
3499                 mem_size = sizeof(struct i40iw_hmc_sd_entry) *
3500                            (hmc_info->sd_table.sd_cnt + hmc_info->first_sd_index);
3501                 ret_code = i40iw_allocate_virt_mem(dev->hw, &virt_mem, mem_size);
3502                 if (ret_code)
3503                         return ret_code;
3504                 hmc_info->sd_table.sd_entry = virt_mem.va;
3505         }
3506
3507         return ret_code;
3508 }
3509
3510 /**
3511  * i40iw_sc_configure_iw_fpm() - commits hmc obj cnt values using cqp command and
3512  * populates fpm base address in hmc_info
3513  * @dev : ptr to i40iw_dev struct
3514  * @hmc_fn_id: hmc function id
3515  */
3516 static enum i40iw_status_code i40iw_sc_configure_iw_fpm(struct i40iw_sc_dev *dev,
3517                                                         u8 hmc_fn_id)
3518 {
3519         struct i40iw_hmc_info *hmc_info;
3520         struct i40iw_hmc_obj_info *obj_info;
3521         u64 *buf;
3522         struct i40iw_dma_mem commit_fpm_mem;
3523         u32 i, j;
3524         enum i40iw_status_code ret_code = 0;
3525         bool poll_registers = true;
3526         u8 wait_type;
3527
3528         if (hmc_fn_id >= I40IW_MAX_VF_FPM_ID ||
3529             (dev->hmc_fn_id != hmc_fn_id && hmc_fn_id < I40IW_FIRST_VF_FPM_ID))
3530                 return I40IW_ERR_INVALID_HMCFN_ID;
3531
3532         if (hmc_fn_id == dev->hmc_fn_id) {
3533                 hmc_info = dev->hmc_info;
3534         } else {
3535                 hmc_info = i40iw_vf_hmcinfo_from_fpm(dev, hmc_fn_id);
3536                 poll_registers = false;
3537         }
3538         if (!hmc_info)
3539                 return I40IW_ERR_BAD_PTR;
3540
3541         obj_info = hmc_info->hmc_obj;
3542         buf = dev->fpm_commit_buf;
3543
3544         /* copy cnt values in commit buf */
3545         for (i = I40IW_HMC_IW_QP, j = 0; i <= I40IW_HMC_IW_PBLE;
3546              i++, j += 8)
3547                 set_64bit_val(buf, j, (u64)obj_info[i].cnt);
3548
3549         set_64bit_val(buf, 40, 0);   /* APBVT rsvd */
3550
3551         commit_fpm_mem.pa = dev->fpm_commit_buf_pa;
3552         commit_fpm_mem.va = dev->fpm_commit_buf;
3553         wait_type = poll_registers ? (u8)I40IW_CQP_WAIT_POLL_REGS :
3554                         (u8)I40IW_CQP_WAIT_POLL_CQ;
3555         ret_code = i40iw_sc_commit_fpm_values(
3556                                         dev->cqp,
3557                                         0,
3558                                         hmc_info->hmc_fn_id,
3559                                         &commit_fpm_mem,
3560                                         true,
3561                                         wait_type);
3562
3563         /* parse the fpm_commit_buf and fill hmc obj info */
3564         if (!ret_code)
3565                 ret_code = i40iw_sc_parse_fpm_commit_buf(dev->fpm_commit_buf,
3566                                                          hmc_info->hmc_obj,
3567                                                          &hmc_info->sd_table.sd_cnt);
3568
3569         i40iw_debug_buf(dev, I40IW_DEBUG_HMC, "COMMIT FPM BUFFER",
3570                         commit_fpm_mem.va, I40IW_COMMIT_FPM_BUF_SIZE);
3571
3572         return ret_code;
3573 }
3574
3575 /**
3576  * cqp_sds_wqe_fill - fill cqp wqe doe sd
3577  * @cqp: struct for cqp hw
3578  * @info; sd info for wqe
3579  * @scratch: u64 saved to be used during cqp completion
3580  */
3581 static enum i40iw_status_code cqp_sds_wqe_fill(struct i40iw_sc_cqp *cqp,
3582                                                struct i40iw_update_sds_info *info,
3583                                                u64 scratch)
3584 {
3585         u64 data;
3586         u64 header;
3587         u64 *wqe;
3588         int mem_entries, wqe_entries;
3589         struct i40iw_dma_mem *sdbuf = &cqp->sdbuf;
3590
3591         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
3592         if (!wqe)
3593                 return I40IW_ERR_RING_FULL;
3594
3595         I40IW_CQP_INIT_WQE(wqe);
3596         wqe_entries = (info->cnt > 3) ? 3 : info->cnt;
3597         mem_entries = info->cnt - wqe_entries;
3598
3599         header = LS_64(I40IW_CQP_OP_UPDATE_PE_SDS, I40IW_CQPSQ_OPCODE) |
3600                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID) |
3601                  LS_64(mem_entries, I40IW_CQPSQ_UPESD_ENTRY_COUNT);
3602
3603         if (mem_entries) {
3604                 memcpy(sdbuf->va, &info->entry[3], (mem_entries << 4));
3605                 data = sdbuf->pa;
3606         } else {
3607                 data = 0;
3608         }
3609         data |= LS_64(info->hmc_fn_id, I40IW_CQPSQ_UPESD_HMCFNID);
3610
3611         set_64bit_val(wqe, 16, data);
3612
3613         switch (wqe_entries) {
3614         case 3:
3615                 set_64bit_val(wqe, 48,
3616                               (LS_64(info->entry[2].cmd, I40IW_CQPSQ_UPESD_SDCMD) |
3617                                         LS_64(1, I40IW_CQPSQ_UPESD_ENTRY_VALID)));
3618
3619                 set_64bit_val(wqe, 56, info->entry[2].data);
3620                 /* fallthrough */
3621         case 2:
3622                 set_64bit_val(wqe, 32,
3623                               (LS_64(info->entry[1].cmd, I40IW_CQPSQ_UPESD_SDCMD) |
3624                                         LS_64(1, I40IW_CQPSQ_UPESD_ENTRY_VALID)));
3625
3626                 set_64bit_val(wqe, 40, info->entry[1].data);
3627                 /* fallthrough */
3628         case 1:
3629                 set_64bit_val(wqe, 0,
3630                               LS_64(info->entry[0].cmd, I40IW_CQPSQ_UPESD_SDCMD));
3631
3632                 set_64bit_val(wqe, 8, info->entry[0].data);
3633                 break;
3634         default:
3635                 break;
3636         }
3637
3638         i40iw_insert_wqe_hdr(wqe, header);
3639
3640         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "UPDATE_PE_SDS WQE",
3641                         wqe, I40IW_CQP_WQE_SIZE * 8);
3642         return 0;
3643 }
3644
3645 /**
3646  * i40iw_update_pe_sds - cqp wqe for sd
3647  * @dev: ptr to i40iw_dev struct
3648  * @info: sd info for sd's
3649  * @scratch: u64 saved to be used during cqp completion
3650  */
3651 static enum i40iw_status_code i40iw_update_pe_sds(struct i40iw_sc_dev *dev,
3652                                                   struct i40iw_update_sds_info *info,
3653                                                   u64 scratch)
3654 {
3655         struct i40iw_sc_cqp *cqp = dev->cqp;
3656         enum i40iw_status_code ret_code;
3657
3658         ret_code = cqp_sds_wqe_fill(cqp, info, scratch);
3659         if (!ret_code)
3660                 i40iw_sc_cqp_post_sq(cqp);
3661
3662         return ret_code;
3663 }
3664
3665 /**
3666  * i40iw_update_sds_noccq - update sd before ccq created
3667  * @dev: sc device struct
3668  * @info: sd info for sd's
3669  */
3670 enum i40iw_status_code i40iw_update_sds_noccq(struct i40iw_sc_dev *dev,
3671                                               struct i40iw_update_sds_info *info)
3672 {
3673         u32 error, val, tail;
3674         struct i40iw_sc_cqp *cqp = dev->cqp;
3675         enum i40iw_status_code ret_code;
3676
3677         ret_code = cqp_sds_wqe_fill(cqp, info, 0);
3678         if (ret_code)
3679                 return ret_code;
3680         i40iw_get_cqp_reg_info(cqp, &val, &tail, &error);
3681         if (error)
3682                 return I40IW_ERR_CQP_COMPL_ERROR;
3683
3684         i40iw_sc_cqp_post_sq(cqp);
3685         ret_code = i40iw_cqp_poll_registers(cqp, tail, I40IW_DONE_COUNT);
3686
3687         return ret_code;
3688 }
3689
3690 /**
3691  * i40iw_sc_suspend_qp - suspend qp for param change
3692  * @cqp: struct for cqp hw
3693  * @qp: sc qp struct
3694  * @scratch: u64 saved to be used during cqp completion
3695  */
3696 enum i40iw_status_code i40iw_sc_suspend_qp(struct i40iw_sc_cqp *cqp,
3697                                            struct i40iw_sc_qp *qp,
3698                                            u64 scratch)
3699 {
3700         u64 header;
3701         u64 *wqe;
3702
3703         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
3704         if (!wqe)
3705                 return I40IW_ERR_RING_FULL;
3706         header = LS_64(qp->qp_uk.qp_id, I40IW_CQPSQ_SUSPENDQP_QPID) |
3707                  LS_64(I40IW_CQP_OP_SUSPEND_QP, I40IW_CQPSQ_OPCODE) |
3708                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
3709
3710         i40iw_insert_wqe_hdr(wqe, header);
3711
3712         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "SUSPEND_QP WQE",
3713                         wqe, I40IW_CQP_WQE_SIZE * 8);
3714
3715         i40iw_sc_cqp_post_sq(cqp);
3716         return 0;
3717 }
3718
3719 /**
3720  * i40iw_sc_resume_qp - resume qp after suspend
3721  * @cqp: struct for cqp hw
3722  * @qp: sc qp struct
3723  * @scratch: u64 saved to be used during cqp completion
3724  */
3725 enum i40iw_status_code i40iw_sc_resume_qp(struct i40iw_sc_cqp *cqp,
3726                                           struct i40iw_sc_qp *qp,
3727                                           u64 scratch)
3728 {
3729         u64 header;
3730         u64 *wqe;
3731
3732         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
3733         if (!wqe)
3734                 return I40IW_ERR_RING_FULL;
3735         set_64bit_val(wqe,
3736                       16,
3737                         LS_64(qp->qs_handle, I40IW_CQPSQ_RESUMEQP_QSHANDLE));
3738
3739         header = LS_64(qp->qp_uk.qp_id, I40IW_CQPSQ_RESUMEQP_QPID) |
3740                  LS_64(I40IW_CQP_OP_RESUME_QP, I40IW_CQPSQ_OPCODE) |
3741                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
3742
3743         i40iw_insert_wqe_hdr(wqe, header);
3744
3745         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "RESUME_QP WQE",
3746                         wqe, I40IW_CQP_WQE_SIZE * 8);
3747
3748         i40iw_sc_cqp_post_sq(cqp);
3749         return 0;
3750 }
3751
3752 /**
3753  * i40iw_sc_static_hmc_pages_allocated - cqp wqe to allocate hmc pages
3754  * @cqp: struct for cqp hw
3755  * @scratch: u64 saved to be used during cqp completion
3756  * @hmc_fn_id: hmc function id
3757  * @post_sq: flag for cqp db to ring
3758  * @poll_registers: flag to poll register for cqp completion
3759  */
3760 enum i40iw_status_code i40iw_sc_static_hmc_pages_allocated(
3761                                         struct i40iw_sc_cqp *cqp,
3762                                         u64 scratch,
3763                                         u8 hmc_fn_id,
3764                                         bool post_sq,
3765                                         bool poll_registers)
3766 {
3767         u64 header;
3768         u64 *wqe;
3769         u32 tail, val, error;
3770         enum i40iw_status_code ret_code = 0;
3771
3772         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
3773         if (!wqe)
3774                 return I40IW_ERR_RING_FULL;
3775         set_64bit_val(wqe,
3776                       16,
3777                       LS_64(hmc_fn_id, I40IW_SHMC_PAGE_ALLOCATED_HMC_FN_ID));
3778
3779         header = LS_64(I40IW_CQP_OP_SHMC_PAGES_ALLOCATED, I40IW_CQPSQ_OPCODE) |
3780                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
3781
3782         i40iw_insert_wqe_hdr(wqe, header);
3783
3784         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "SHMC_PAGES_ALLOCATED WQE",
3785                         wqe, I40IW_CQP_WQE_SIZE * 8);
3786         i40iw_get_cqp_reg_info(cqp, &val, &tail, &error);
3787         if (error) {
3788                 ret_code = I40IW_ERR_CQP_COMPL_ERROR;
3789                 return ret_code;
3790         }
3791         if (post_sq) {
3792                 i40iw_sc_cqp_post_sq(cqp);
3793                 if (poll_registers)
3794                         /* check for cqp sq tail update */
3795                         ret_code = i40iw_cqp_poll_registers(cqp, tail, 1000);
3796                 else
3797                         ret_code = i40iw_sc_poll_for_cqp_op_done(cqp,
3798                                                                  I40IW_CQP_OP_SHMC_PAGES_ALLOCATED,
3799                                                                  NULL);
3800         }
3801
3802         return ret_code;
3803 }
3804
3805 /**
3806  * i40iw_ring_full - check if cqp ring is full
3807  * @cqp: struct for cqp hw
3808  */
3809 static bool i40iw_ring_full(struct i40iw_sc_cqp *cqp)
3810 {
3811         return I40IW_RING_FULL_ERR(cqp->sq_ring);
3812 }
3813
3814 /**
3815  * i40iw_est_sd - returns approximate number of SDs for HMC
3816  * @dev: sc device struct
3817  * @hmc_info: hmc structure, size and count for HMC objects
3818  */
3819 static u64 i40iw_est_sd(struct i40iw_sc_dev *dev, struct i40iw_hmc_info *hmc_info)
3820 {
3821         int i;
3822         u64 size = 0;
3823         u64 sd;
3824
3825         for (i = I40IW_HMC_IW_QP; i < I40IW_HMC_IW_PBLE; i++)
3826                 size += hmc_info->hmc_obj[i].cnt * hmc_info->hmc_obj[i].size;
3827
3828         if (dev->is_pf)
3829                 size += hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt * hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].size;
3830
3831         if (size & 0x1FFFFF)
3832                 sd = (size >> 21) + 1; /* add 1 for remainder */
3833         else
3834                 sd = size >> 21;
3835
3836         if (!dev->is_pf) {
3837                 /* 2MB alignment for VF PBLE HMC */
3838                 size = hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt * hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].size;
3839                 if (size & 0x1FFFFF)
3840                         sd += (size >> 21) + 1; /* add 1 for remainder */
3841                 else
3842                         sd += size >> 21;
3843         }
3844
3845         return sd;
3846 }
3847
3848 /**
3849  * i40iw_config_fpm_values - configure HMC objects
3850  * @dev: sc device struct
3851  * @qp_count: desired qp count
3852  */
3853 enum i40iw_status_code i40iw_config_fpm_values(struct i40iw_sc_dev *dev, u32 qp_count)
3854 {
3855         struct i40iw_virt_mem virt_mem;
3856         u32 i, mem_size;
3857         u32 qpwantedoriginal, qpwanted, mrwanted, pblewanted;
3858         u32 powerof2;
3859         u64 sd_needed;
3860         u32 loop_count = 0;
3861
3862         struct i40iw_hmc_info *hmc_info;
3863         struct i40iw_hmc_fpm_misc *hmc_fpm_misc;
3864         enum i40iw_status_code ret_code = 0;
3865
3866         hmc_info = dev->hmc_info;
3867         hmc_fpm_misc = &dev->hmc_fpm_misc;
3868
3869         ret_code = i40iw_sc_init_iw_hmc(dev, dev->hmc_fn_id);
3870         if (ret_code) {
3871                 i40iw_debug(dev, I40IW_DEBUG_HMC,
3872                             "i40iw_sc_init_iw_hmc returned error_code = %d\n",
3873                             ret_code);
3874                 return ret_code;
3875         }
3876
3877         for (i = I40IW_HMC_IW_QP; i < I40IW_HMC_IW_MAX; i++)
3878                 hmc_info->hmc_obj[i].cnt = hmc_info->hmc_obj[i].max_cnt;
3879         sd_needed = i40iw_est_sd(dev, hmc_info);
3880         i40iw_debug(dev, I40IW_DEBUG_HMC,
3881                     "%s: FW initial max sd_count[%08lld] first_sd_index[%04d]\n",
3882                     __func__, sd_needed, hmc_info->first_sd_index);
3883         i40iw_debug(dev, I40IW_DEBUG_HMC,
3884                     "%s: sd count %d where max sd is %d\n",
3885                     __func__, hmc_info->sd_table.sd_cnt,
3886                     hmc_fpm_misc->max_sds);
3887
3888         qpwanted = min(qp_count, hmc_info->hmc_obj[I40IW_HMC_IW_QP].max_cnt);
3889         qpwantedoriginal = qpwanted;
3890         mrwanted = hmc_info->hmc_obj[I40IW_HMC_IW_MR].max_cnt;
3891         pblewanted = hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].max_cnt;
3892
3893         i40iw_debug(dev, I40IW_DEBUG_HMC,
3894                     "req_qp=%d max_sd=%d, max_qp = %d, max_cq=%d, max_mr=%d, max_pble=%d\n",
3895                     qp_count, hmc_fpm_misc->max_sds,
3896                     hmc_info->hmc_obj[I40IW_HMC_IW_QP].max_cnt,
3897                     hmc_info->hmc_obj[I40IW_HMC_IW_CQ].max_cnt,
3898                     hmc_info->hmc_obj[I40IW_HMC_IW_MR].max_cnt,
3899                     hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].max_cnt);
3900
3901         do {
3902                 ++loop_count;
3903                 hmc_info->hmc_obj[I40IW_HMC_IW_QP].cnt = qpwanted;
3904                 hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt =
3905                         min(2 * qpwanted, hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt);
3906                 hmc_info->hmc_obj[I40IW_HMC_IW_SRQ].cnt = 0x00; /* Reserved */
3907                 hmc_info->hmc_obj[I40IW_HMC_IW_HTE].cnt =
3908                                         qpwanted * hmc_fpm_misc->ht_multiplier;
3909                 hmc_info->hmc_obj[I40IW_HMC_IW_ARP].cnt =
3910                         hmc_info->hmc_obj[I40IW_HMC_IW_ARP].max_cnt;
3911                 hmc_info->hmc_obj[I40IW_HMC_IW_APBVT_ENTRY].cnt = 1;
3912                 hmc_info->hmc_obj[I40IW_HMC_IW_MR].cnt = mrwanted;
3913
3914                 hmc_info->hmc_obj[I40IW_HMC_IW_XF].cnt = I40IW_MAX_WQ_ENTRIES * qpwanted;
3915                 hmc_info->hmc_obj[I40IW_HMC_IW_Q1].cnt = 4 * I40IW_MAX_IRD_SIZE * qpwanted;
3916                 hmc_info->hmc_obj[I40IW_HMC_IW_XFFL].cnt =
3917                         hmc_info->hmc_obj[I40IW_HMC_IW_XF].cnt / hmc_fpm_misc->xf_block_size;
3918                 hmc_info->hmc_obj[I40IW_HMC_IW_Q1FL].cnt =
3919                         hmc_info->hmc_obj[I40IW_HMC_IW_Q1].cnt / hmc_fpm_misc->q1_block_size;
3920                 hmc_info->hmc_obj[I40IW_HMC_IW_TIMER].cnt =
3921                         ((qpwanted) / 512 + 1) * hmc_fpm_misc->timer_bucket;
3922                 hmc_info->hmc_obj[I40IW_HMC_IW_FSIMC].cnt = 0x00;
3923                 hmc_info->hmc_obj[I40IW_HMC_IW_FSIAV].cnt = 0x00;
3924                 hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt = pblewanted;
3925
3926                 /* How much memory is needed for all the objects. */
3927                 sd_needed = i40iw_est_sd(dev, hmc_info);
3928                 if ((loop_count > 1000) ||
3929                     ((!(loop_count % 10)) &&
3930                     (qpwanted > qpwantedoriginal * 2 / 3))) {
3931                         if (qpwanted > FPM_MULTIPLIER) {
3932                                 qpwanted -= FPM_MULTIPLIER;
3933                                 powerof2 = 1;
3934                                 while (powerof2 < qpwanted)
3935                                         powerof2 *= 2;
3936                                 powerof2 /= 2;
3937                                 qpwanted = powerof2;
3938                         } else {
3939                                 qpwanted /= 2;
3940                         }
3941                 }
3942                 if (mrwanted > FPM_MULTIPLIER * 10)
3943                         mrwanted -= FPM_MULTIPLIER * 10;
3944                 if (pblewanted > FPM_MULTIPLIER * 1000)
3945                         pblewanted -= FPM_MULTIPLIER * 1000;
3946         } while (sd_needed > hmc_fpm_misc->max_sds && loop_count < 2000);
3947
3948         sd_needed = i40iw_est_sd(dev, hmc_info);
3949
3950         i40iw_debug(dev, I40IW_DEBUG_HMC,
3951                     "loop_cnt=%d, sd_needed=%lld, qpcnt = %d, cqcnt=%d, mrcnt=%d, pblecnt=%d\n",
3952                     loop_count, sd_needed,
3953                     hmc_info->hmc_obj[I40IW_HMC_IW_QP].cnt,
3954                     hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt,
3955                     hmc_info->hmc_obj[I40IW_HMC_IW_MR].cnt,
3956                     hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt);
3957
3958         ret_code = i40iw_sc_configure_iw_fpm(dev, dev->hmc_fn_id);
3959         if (ret_code) {
3960                 i40iw_debug(dev, I40IW_DEBUG_HMC,
3961                             "configure_iw_fpm returned error_code[x%08X]\n",
3962                             i40iw_rd32(dev->hw, dev->is_pf ? I40E_PFPE_CQPERRCODES : I40E_VFPE_CQPERRCODES1));
3963                 return ret_code;
3964         }
3965
3966         mem_size = sizeof(struct i40iw_hmc_sd_entry) *
3967                    (hmc_info->sd_table.sd_cnt + hmc_info->first_sd_index + 1);
3968         ret_code = i40iw_allocate_virt_mem(dev->hw, &virt_mem, mem_size);
3969         if (ret_code) {
3970                 i40iw_debug(dev, I40IW_DEBUG_HMC,
3971                             "%s: failed to allocate memory for sd_entry buffer\n",
3972                             __func__);
3973                 return ret_code;
3974         }
3975         hmc_info->sd_table.sd_entry = virt_mem.va;
3976
3977         return ret_code;
3978 }
3979
3980 /**
3981  * i40iw_exec_cqp_cmd - execute cqp cmd when wqe are available
3982  * @dev: rdma device
3983  * @pcmdinfo: cqp command info
3984  */
3985 static enum i40iw_status_code i40iw_exec_cqp_cmd(struct i40iw_sc_dev *dev,
3986                                                  struct cqp_commands_info *pcmdinfo)
3987 {
3988         enum i40iw_status_code status;
3989         struct i40iw_dma_mem values_mem;
3990
3991         dev->cqp_cmd_stats[pcmdinfo->cqp_cmd]++;
3992         switch (pcmdinfo->cqp_cmd) {
3993         case OP_DELETE_LOCAL_MAC_IPADDR_ENTRY:
3994                 status = i40iw_sc_del_local_mac_ipaddr_entry(
3995                                 pcmdinfo->in.u.del_local_mac_ipaddr_entry.cqp,
3996                                 pcmdinfo->in.u.del_local_mac_ipaddr_entry.scratch,
3997                                 pcmdinfo->in.u.del_local_mac_ipaddr_entry.entry_idx,
3998                                 pcmdinfo->in.u.del_local_mac_ipaddr_entry.ignore_ref_count,
3999                                 pcmdinfo->post_sq);
4000                 break;
4001         case OP_CEQ_DESTROY:
4002                 status = i40iw_sc_ceq_destroy(pcmdinfo->in.u.ceq_destroy.ceq,
4003                                               pcmdinfo->in.u.ceq_destroy.scratch,
4004                                               pcmdinfo->post_sq);
4005                 break;
4006         case OP_AEQ_DESTROY:
4007                 status = i40iw_sc_aeq_destroy(pcmdinfo->in.u.aeq_destroy.aeq,
4008                                               pcmdinfo->in.u.aeq_destroy.scratch,
4009                                               pcmdinfo->post_sq);
4010
4011                 break;
4012         case OP_DELETE_ARP_CACHE_ENTRY:
4013                 status = i40iw_sc_del_arp_cache_entry(
4014                                 pcmdinfo->in.u.del_arp_cache_entry.cqp,
4015                                 pcmdinfo->in.u.del_arp_cache_entry.scratch,
4016                                 pcmdinfo->in.u.del_arp_cache_entry.arp_index,
4017                                 pcmdinfo->post_sq);
4018                 break;
4019         case OP_MANAGE_APBVT_ENTRY:
4020                 status = i40iw_sc_manage_apbvt_entry(
4021                                 pcmdinfo->in.u.manage_apbvt_entry.cqp,
4022                                 &pcmdinfo->in.u.manage_apbvt_entry.info,
4023                                 pcmdinfo->in.u.manage_apbvt_entry.scratch,
4024                                 pcmdinfo->post_sq);
4025                 break;
4026         case OP_CEQ_CREATE:
4027                 status = i40iw_sc_ceq_create(pcmdinfo->in.u.ceq_create.ceq,
4028                                              pcmdinfo->in.u.ceq_create.scratch,
4029                                              pcmdinfo->post_sq);
4030                 break;
4031         case OP_AEQ_CREATE:
4032                 status = i40iw_sc_aeq_create(pcmdinfo->in.u.aeq_create.aeq,
4033                                              pcmdinfo->in.u.aeq_create.scratch,
4034                                              pcmdinfo->post_sq);
4035                 break;
4036         case OP_ALLOC_LOCAL_MAC_IPADDR_ENTRY:
4037                 status = i40iw_sc_alloc_local_mac_ipaddr_entry(
4038                                 pcmdinfo->in.u.alloc_local_mac_ipaddr_entry.cqp,
4039                                 pcmdinfo->in.u.alloc_local_mac_ipaddr_entry.scratch,
4040                                 pcmdinfo->post_sq);
4041                 break;
4042         case OP_ADD_LOCAL_MAC_IPADDR_ENTRY:
4043                 status = i40iw_sc_add_local_mac_ipaddr_entry(
4044                                 pcmdinfo->in.u.add_local_mac_ipaddr_entry.cqp,
4045                                 &pcmdinfo->in.u.add_local_mac_ipaddr_entry.info,
4046                                 pcmdinfo->in.u.add_local_mac_ipaddr_entry.scratch,
4047                                 pcmdinfo->post_sq);
4048                 break;
4049         case OP_MANAGE_QHASH_TABLE_ENTRY:
4050                 status = i40iw_sc_manage_qhash_table_entry(
4051                                 pcmdinfo->in.u.manage_qhash_table_entry.cqp,
4052                                 &pcmdinfo->in.u.manage_qhash_table_entry.info,
4053                                 pcmdinfo->in.u.manage_qhash_table_entry.scratch,
4054                                 pcmdinfo->post_sq);
4055
4056                 break;
4057         case OP_QP_MODIFY:
4058                 status = i40iw_sc_qp_modify(
4059                                 pcmdinfo->in.u.qp_modify.qp,
4060                                 &pcmdinfo->in.u.qp_modify.info,
4061                                 pcmdinfo->in.u.qp_modify.scratch,
4062                                 pcmdinfo->post_sq);
4063
4064                 break;
4065         case OP_QP_UPLOAD_CONTEXT:
4066                 status = i40iw_sc_qp_upload_context(
4067                                 pcmdinfo->in.u.qp_upload_context.dev,
4068                                 &pcmdinfo->in.u.qp_upload_context.info,
4069                                 pcmdinfo->in.u.qp_upload_context.scratch,
4070                                 pcmdinfo->post_sq);
4071
4072                 break;
4073         case OP_CQ_CREATE:
4074                 status = i40iw_sc_cq_create(
4075                                 pcmdinfo->in.u.cq_create.cq,
4076                                 pcmdinfo->in.u.cq_create.scratch,
4077                                 pcmdinfo->in.u.cq_create.check_overflow,
4078                                 pcmdinfo->post_sq);
4079                 break;
4080         case OP_CQ_DESTROY:
4081                 status = i40iw_sc_cq_destroy(
4082                                 pcmdinfo->in.u.cq_destroy.cq,
4083                                 pcmdinfo->in.u.cq_destroy.scratch,
4084                                 pcmdinfo->post_sq);
4085
4086                 break;
4087         case OP_QP_CREATE:
4088                 status = i40iw_sc_qp_create(
4089                                 pcmdinfo->in.u.qp_create.qp,
4090                                 &pcmdinfo->in.u.qp_create.info,
4091                                 pcmdinfo->in.u.qp_create.scratch,
4092                                 pcmdinfo->post_sq);
4093                 break;
4094         case OP_QP_DESTROY:
4095                 status = i40iw_sc_qp_destroy(
4096                                 pcmdinfo->in.u.qp_destroy.qp,
4097                                 pcmdinfo->in.u.qp_destroy.scratch,
4098                                 pcmdinfo->in.u.qp_destroy.remove_hash_idx,
4099                                 pcmdinfo->in.u.qp_destroy.
4100                                 ignore_mw_bnd,
4101                                 pcmdinfo->post_sq);
4102
4103                 break;
4104         case OP_ALLOC_STAG:
4105                 status = i40iw_sc_alloc_stag(
4106                                 pcmdinfo->in.u.alloc_stag.dev,
4107                                 &pcmdinfo->in.u.alloc_stag.info,
4108                                 pcmdinfo->in.u.alloc_stag.scratch,
4109                                 pcmdinfo->post_sq);
4110                 break;
4111         case OP_MR_REG_NON_SHARED:
4112                 status = i40iw_sc_mr_reg_non_shared(
4113                                 pcmdinfo->in.u.mr_reg_non_shared.dev,
4114                                 &pcmdinfo->in.u.mr_reg_non_shared.info,
4115                                 pcmdinfo->in.u.mr_reg_non_shared.scratch,
4116                                 pcmdinfo->post_sq);
4117
4118                 break;
4119         case OP_DEALLOC_STAG:
4120                 status = i40iw_sc_dealloc_stag(
4121                                 pcmdinfo->in.u.dealloc_stag.dev,
4122                                 &pcmdinfo->in.u.dealloc_stag.info,
4123                                 pcmdinfo->in.u.dealloc_stag.scratch,
4124                                 pcmdinfo->post_sq);
4125
4126                 break;
4127         case OP_MW_ALLOC:
4128                 status = i40iw_sc_mw_alloc(
4129                                 pcmdinfo->in.u.mw_alloc.dev,
4130                                 pcmdinfo->in.u.mw_alloc.scratch,
4131                                 pcmdinfo->in.u.mw_alloc.mw_stag_index,
4132                                 pcmdinfo->in.u.mw_alloc.pd_id,
4133                                 pcmdinfo->post_sq);
4134
4135                 break;
4136         case OP_QP_FLUSH_WQES:
4137                 status = i40iw_sc_qp_flush_wqes(
4138                                 pcmdinfo->in.u.qp_flush_wqes.qp,
4139                                 &pcmdinfo->in.u.qp_flush_wqes.info,
4140                                 pcmdinfo->in.u.qp_flush_wqes.
4141                                 scratch, pcmdinfo->post_sq);
4142                 break;
4143         case OP_ADD_ARP_CACHE_ENTRY:
4144                 status = i40iw_sc_add_arp_cache_entry(
4145                                 pcmdinfo->in.u.add_arp_cache_entry.cqp,
4146                                 &pcmdinfo->in.u.add_arp_cache_entry.info,
4147                                 pcmdinfo->in.u.add_arp_cache_entry.scratch,
4148                                 pcmdinfo->post_sq);
4149                 break;
4150         case OP_MANAGE_PUSH_PAGE:
4151                 status = i40iw_sc_manage_push_page(
4152                                 pcmdinfo->in.u.manage_push_page.cqp,
4153                                 &pcmdinfo->in.u.manage_push_page.info,
4154                                 pcmdinfo->in.u.manage_push_page.scratch,
4155                                 pcmdinfo->post_sq);
4156                 break;
4157         case OP_UPDATE_PE_SDS:
4158                 /* case I40IW_CQP_OP_UPDATE_PE_SDS */
4159                 status = i40iw_update_pe_sds(
4160                                 pcmdinfo->in.u.update_pe_sds.dev,
4161                                 &pcmdinfo->in.u.update_pe_sds.info,
4162                                 pcmdinfo->in.u.update_pe_sds.
4163                                 scratch);
4164
4165                 break;
4166         case OP_MANAGE_HMC_PM_FUNC_TABLE:
4167                 status = i40iw_sc_manage_hmc_pm_func_table(
4168                                 pcmdinfo->in.u.manage_hmc_pm.dev->cqp,
4169                                 pcmdinfo->in.u.manage_hmc_pm.scratch,
4170                                 (u8)pcmdinfo->in.u.manage_hmc_pm.info.vf_id,
4171                                 pcmdinfo->in.u.manage_hmc_pm.info.free_fcn,
4172                                 true);
4173                 break;
4174         case OP_SUSPEND:
4175                 status = i40iw_sc_suspend_qp(
4176                                 pcmdinfo->in.u.suspend_resume.cqp,
4177                                 pcmdinfo->in.u.suspend_resume.qp,
4178                                 pcmdinfo->in.u.suspend_resume.scratch);
4179                 break;
4180         case OP_RESUME:
4181                 status = i40iw_sc_resume_qp(
4182                                 pcmdinfo->in.u.suspend_resume.cqp,
4183                                 pcmdinfo->in.u.suspend_resume.qp,
4184                                 pcmdinfo->in.u.suspend_resume.scratch);
4185                 break;
4186         case OP_MANAGE_VF_PBLE_BP:
4187                 status = i40iw_manage_vf_pble_bp(
4188                                 pcmdinfo->in.u.manage_vf_pble_bp.cqp,
4189                                 &pcmdinfo->in.u.manage_vf_pble_bp.info,
4190                                 pcmdinfo->in.u.manage_vf_pble_bp.scratch, true);
4191                 break;
4192         case OP_QUERY_FPM_VALUES:
4193                 values_mem.pa = pcmdinfo->in.u.query_fpm_values.fpm_values_pa;
4194                 values_mem.va = pcmdinfo->in.u.query_fpm_values.fpm_values_va;
4195                 status = i40iw_sc_query_fpm_values(
4196                                 pcmdinfo->in.u.query_fpm_values.cqp,
4197                                 pcmdinfo->in.u.query_fpm_values.scratch,
4198                                 pcmdinfo->in.u.query_fpm_values.hmc_fn_id,
4199                                 &values_mem, true, I40IW_CQP_WAIT_EVENT);
4200                 break;
4201         case OP_COMMIT_FPM_VALUES:
4202                 values_mem.pa = pcmdinfo->in.u.commit_fpm_values.fpm_values_pa;
4203                 values_mem.va = pcmdinfo->in.u.commit_fpm_values.fpm_values_va;
4204                 status = i40iw_sc_commit_fpm_values(
4205                                 pcmdinfo->in.u.commit_fpm_values.cqp,
4206                                 pcmdinfo->in.u.commit_fpm_values.scratch,
4207                                 pcmdinfo->in.u.commit_fpm_values.hmc_fn_id,
4208                                 &values_mem,
4209                                 true,
4210                                 I40IW_CQP_WAIT_EVENT);
4211                 break;
4212         default:
4213                 status = I40IW_NOT_SUPPORTED;
4214                 break;
4215         }
4216
4217         return status;
4218 }
4219
4220 /**
4221  * i40iw_process_cqp_cmd - process all cqp commands
4222  * @dev: sc device struct
4223  * @pcmdinfo: cqp command info
4224  */
4225 enum i40iw_status_code i40iw_process_cqp_cmd(struct i40iw_sc_dev *dev,
4226                                              struct cqp_commands_info *pcmdinfo)
4227 {
4228         enum i40iw_status_code status = 0;
4229         unsigned long flags;
4230
4231         spin_lock_irqsave(&dev->cqp_lock, flags);
4232         if (list_empty(&dev->cqp_cmd_head) && !i40iw_ring_full(dev->cqp))
4233                 status = i40iw_exec_cqp_cmd(dev, pcmdinfo);
4234         else
4235                 list_add_tail(&pcmdinfo->cqp_cmd_entry, &dev->cqp_cmd_head);
4236         spin_unlock_irqrestore(&dev->cqp_lock, flags);
4237         return status;
4238 }
4239
4240 /**
4241  * i40iw_process_bh - called from tasklet for cqp list
4242  * @dev: sc device struct
4243  */
4244 enum i40iw_status_code i40iw_process_bh(struct i40iw_sc_dev *dev)
4245 {
4246         enum i40iw_status_code status = 0;
4247         struct cqp_commands_info *pcmdinfo;
4248         unsigned long flags;
4249
4250         spin_lock_irqsave(&dev->cqp_lock, flags);
4251         while (!list_empty(&dev->cqp_cmd_head) && !i40iw_ring_full(dev->cqp)) {
4252                 pcmdinfo = (struct cqp_commands_info *)i40iw_remove_head(&dev->cqp_cmd_head);
4253
4254                 status = i40iw_exec_cqp_cmd(dev, pcmdinfo);
4255                 if (status)
4256                         break;
4257         }
4258         spin_unlock_irqrestore(&dev->cqp_lock, flags);
4259         return status;
4260 }
4261
4262 /**
4263  * i40iw_iwarp_opcode - determine if incoming is rdma layer
4264  * @info: aeq info for the packet
4265  * @pkt: packet for error
4266  */
4267 static u32 i40iw_iwarp_opcode(struct i40iw_aeqe_info *info, u8 *pkt)
4268 {
4269         __be16 *mpa;
4270         u32 opcode = 0xffffffff;
4271
4272         if (info->q2_data_written) {
4273                 mpa = (__be16 *)pkt;
4274                 opcode = ntohs(mpa[1]) & 0xf;
4275         }
4276         return opcode;
4277 }
4278
4279 /**
4280  * i40iw_locate_mpa - return pointer to mpa in the pkt
4281  * @pkt: packet with data
4282  */
4283 static u8 *i40iw_locate_mpa(u8 *pkt)
4284 {
4285         /* skip over ethernet header */
4286         pkt += I40IW_MAC_HLEN;
4287
4288         /* Skip over IP and TCP headers */
4289         pkt += 4 * (pkt[0] & 0x0f);
4290         pkt += 4 * ((pkt[12] >> 4) & 0x0f);
4291         return pkt;
4292 }
4293
4294 /**
4295  * i40iw_setup_termhdr - termhdr for terminate pkt
4296  * @qp: sc qp ptr for pkt
4297  * @hdr: term hdr
4298  * @opcode: flush opcode for termhdr
4299  * @layer_etype: error layer + error type
4300  * @err: error cod ein the header
4301  */
4302 static void i40iw_setup_termhdr(struct i40iw_sc_qp *qp,
4303                                 struct i40iw_terminate_hdr *hdr,
4304                                 enum i40iw_flush_opcode opcode,
4305                                 u8 layer_etype,
4306                                 u8 err)
4307 {
4308         qp->flush_code = opcode;
4309         hdr->layer_etype = layer_etype;
4310         hdr->error_code = err;
4311 }
4312
4313 /**
4314  * i40iw_bld_terminate_hdr - build terminate message header
4315  * @qp: qp associated with received terminate AE
4316  * @info: the struct contiaing AE information
4317  */
4318 static int i40iw_bld_terminate_hdr(struct i40iw_sc_qp *qp,
4319                                    struct i40iw_aeqe_info *info)
4320 {
4321         u8 *pkt = qp->q2_buf + Q2_BAD_FRAME_OFFSET;
4322         u16 ddp_seg_len;
4323         int copy_len = 0;
4324         u8 is_tagged = 0;
4325         u32 opcode;
4326         struct i40iw_terminate_hdr *termhdr;
4327
4328         termhdr = (struct i40iw_terminate_hdr *)qp->q2_buf;
4329         memset(termhdr, 0, Q2_BAD_FRAME_OFFSET);
4330
4331         if (info->q2_data_written) {
4332                 /* Use data from offending packet to fill in ddp & rdma hdrs */
4333                 pkt = i40iw_locate_mpa(pkt);
4334                 ddp_seg_len = ntohs(*(__be16 *)pkt);
4335                 if (ddp_seg_len) {
4336                         copy_len = 2;
4337                         termhdr->hdrct = DDP_LEN_FLAG;
4338                         if (pkt[2] & 0x80) {
4339                                 is_tagged = 1;
4340                                 if (ddp_seg_len >= TERM_DDP_LEN_TAGGED) {
4341                                         copy_len += TERM_DDP_LEN_TAGGED;
4342                                         termhdr->hdrct |= DDP_HDR_FLAG;
4343                                 }
4344                         } else {
4345                                 if (ddp_seg_len >= TERM_DDP_LEN_UNTAGGED) {
4346                                         copy_len += TERM_DDP_LEN_UNTAGGED;
4347                                         termhdr->hdrct |= DDP_HDR_FLAG;
4348                                 }
4349
4350                                 if (ddp_seg_len >= (TERM_DDP_LEN_UNTAGGED + TERM_RDMA_LEN)) {
4351                                         if ((pkt[3] & RDMA_OPCODE_MASK) == RDMA_READ_REQ_OPCODE) {
4352                                                 copy_len += TERM_RDMA_LEN;
4353                                                 termhdr->hdrct |= RDMA_HDR_FLAG;
4354                                         }
4355                                 }
4356                         }
4357                 }
4358         }
4359
4360         opcode = i40iw_iwarp_opcode(info, pkt);
4361
4362         switch (info->ae_id) {
4363         case I40IW_AE_AMP_UNALLOCATED_STAG:
4364                 qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
4365                 if (opcode == I40IW_OP_TYPE_RDMA_WRITE)
4366                         i40iw_setup_termhdr(qp, termhdr, FLUSH_PROT_ERR,
4367                                             (LAYER_DDP << 4) | DDP_TAGGED_BUFFER, DDP_TAGGED_INV_STAG);
4368                 else
4369                         i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
4370                                             (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_INV_STAG);
4371                 break;
4372         case I40IW_AE_AMP_BOUNDS_VIOLATION:
4373                 qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
4374                 if (info->q2_data_written)
4375                         i40iw_setup_termhdr(qp, termhdr, FLUSH_PROT_ERR,
4376                                             (LAYER_DDP << 4) | DDP_TAGGED_BUFFER, DDP_TAGGED_BOUNDS);
4377                 else
4378                         i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
4379                                             (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_INV_BOUNDS);
4380                 break;
4381         case I40IW_AE_AMP_BAD_PD:
4382                 switch (opcode) {
4383                 case I40IW_OP_TYPE_RDMA_WRITE:
4384                         i40iw_setup_termhdr(qp, termhdr, FLUSH_PROT_ERR,
4385                                             (LAYER_DDP << 4) | DDP_TAGGED_BUFFER, DDP_TAGGED_UNASSOC_STAG);
4386                         break;
4387                 case I40IW_OP_TYPE_SEND_INV:
4388                 case I40IW_OP_TYPE_SEND_SOL_INV:
4389                         i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
4390                                             (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_CANT_INV_STAG);
4391                         break;
4392                 default:
4393                         i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
4394                                             (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_UNASSOC_STAG);
4395                 }
4396                 break;
4397         case I40IW_AE_AMP_INVALID_STAG:
4398                 qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
4399                 i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
4400                                     (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_INV_STAG);
4401                 break;
4402         case I40IW_AE_AMP_BAD_QP:
4403                 i40iw_setup_termhdr(qp, termhdr, FLUSH_LOC_QP_OP_ERR,
4404                                     (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_QN);
4405                 break;
4406         case I40IW_AE_AMP_BAD_STAG_KEY:
4407         case I40IW_AE_AMP_BAD_STAG_INDEX:
4408                 qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
4409                 switch (opcode) {
4410                 case I40IW_OP_TYPE_SEND_INV:
4411                 case I40IW_OP_TYPE_SEND_SOL_INV:
4412                         i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_OP_ERR,
4413                                             (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_CANT_INV_STAG);
4414                         break;
4415                 default:
4416                         i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
4417                                             (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_INV_STAG);
4418                 }
4419                 break;
4420         case I40IW_AE_AMP_RIGHTS_VIOLATION:
4421         case I40IW_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS:
4422         case I40IW_AE_PRIV_OPERATION_DENIED:
4423                 qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
4424                 i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
4425                                     (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_ACCESS);
4426                 break;
4427         case I40IW_AE_AMP_TO_WRAP:
4428                 qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
4429                 i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
4430                                     (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_TO_WRAP);
4431                 break;
4432         case I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR:
4433                 i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
4434                                     (LAYER_MPA << 4) | DDP_LLP, MPA_CRC);
4435                 break;
4436         case I40IW_AE_LLP_SEGMENT_TOO_LARGE:
4437         case I40IW_AE_LLP_SEGMENT_TOO_SMALL:
4438                 i40iw_setup_termhdr(qp, termhdr, FLUSH_LOC_LEN_ERR,
4439                                     (LAYER_DDP << 4) | DDP_CATASTROPHIC, DDP_CATASTROPHIC_LOCAL);
4440                 break;
4441         case I40IW_AE_LCE_QP_CATASTROPHIC:
4442         case I40IW_AE_DDP_NO_L_BIT:
4443                 i40iw_setup_termhdr(qp, termhdr, FLUSH_FATAL_ERR,
4444                                     (LAYER_DDP << 4) | DDP_CATASTROPHIC, DDP_CATASTROPHIC_LOCAL);
4445                 break;
4446         case I40IW_AE_DDP_INVALID_MSN_GAP_IN_MSN:
4447                 i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
4448                                     (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_MSN_RANGE);
4449                 break;
4450         case I40IW_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
4451                 qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
4452                 i40iw_setup_termhdr(qp, termhdr, FLUSH_LOC_LEN_ERR,
4453                                     (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_TOO_LONG);
4454                 break;
4455         case I40IW_AE_DDP_UBE_INVALID_DDP_VERSION:
4456                 if (is_tagged)
4457                         i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
4458                                             (LAYER_DDP << 4) | DDP_TAGGED_BUFFER, DDP_TAGGED_INV_DDP_VER);
4459                 else
4460                         i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
4461                                             (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_DDP_VER);
4462                 break;
4463         case I40IW_AE_DDP_UBE_INVALID_MO:
4464                 i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
4465                                     (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_MO);
4466                 break;
4467         case I40IW_AE_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE:
4468                 i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_OP_ERR,
4469                                     (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_MSN_NO_BUF);
4470                 break;
4471         case I40IW_AE_DDP_UBE_INVALID_QN:
4472                 i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
4473                                     (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_QN);
4474                 break;
4475         case I40IW_AE_RDMAP_ROE_INVALID_RDMAP_VERSION:
4476                 i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
4477                                     (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_INV_RDMAP_VER);
4478                 break;
4479         case I40IW_AE_RDMAP_ROE_UNEXPECTED_OPCODE:
4480                 i40iw_setup_termhdr(qp, termhdr, FLUSH_LOC_QP_OP_ERR,
4481                                     (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_UNEXPECTED_OP);
4482                 break;
4483         default:
4484                 i40iw_setup_termhdr(qp, termhdr, FLUSH_FATAL_ERR,
4485                                     (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_UNSPECIFIED);
4486                 break;
4487         }
4488
4489         if (copy_len)
4490                 memcpy(termhdr + 1, pkt, copy_len);
4491
4492         return sizeof(struct i40iw_terminate_hdr) + copy_len;
4493 }
4494
4495 /**
4496  * i40iw_terminate_send_fin() - Send fin for terminate message
4497  * @qp: qp associated with received terminate AE
4498  */
4499 void i40iw_terminate_send_fin(struct i40iw_sc_qp *qp)
4500 {
4501         /* Send the fin only */
4502         i40iw_term_modify_qp(qp,
4503                              I40IW_QP_STATE_TERMINATE,
4504                              I40IWQP_TERM_SEND_FIN_ONLY,
4505                              0);
4506 }
4507
4508 /**
4509  * i40iw_terminate_connection() - Bad AE and send terminate to remote QP
4510  * @qp: qp associated with received terminate AE
4511  * @info: the struct contiaing AE information
4512  */
4513 void i40iw_terminate_connection(struct i40iw_sc_qp *qp, struct i40iw_aeqe_info *info)
4514 {
4515         u8 termlen = 0;
4516
4517         if (qp->term_flags & I40IW_TERM_SENT)
4518                 return;         /* Sanity check */
4519
4520         /* Eventtype can change from bld_terminate_hdr */
4521         qp->eventtype = TERM_EVENT_QP_FATAL;
4522         termlen = i40iw_bld_terminate_hdr(qp, info);
4523         i40iw_terminate_start_timer(qp);
4524         qp->term_flags |= I40IW_TERM_SENT;
4525         i40iw_term_modify_qp(qp, I40IW_QP_STATE_TERMINATE,
4526                              I40IWQP_TERM_SEND_TERM_ONLY, termlen);
4527 }
4528
4529 /**
4530  * i40iw_terminate_received - handle terminate received AE
4531  * @qp: qp associated with received terminate AE
4532  * @info: the struct contiaing AE information
4533  */
4534 void i40iw_terminate_received(struct i40iw_sc_qp *qp, struct i40iw_aeqe_info *info)
4535 {
4536         u8 *pkt = qp->q2_buf + Q2_BAD_FRAME_OFFSET;
4537         __be32 *mpa;
4538         u8 ddp_ctl;
4539         u8 rdma_ctl;
4540         u16 aeq_id = 0;
4541         struct i40iw_terminate_hdr *termhdr;
4542
4543         mpa = (__be32 *)i40iw_locate_mpa(pkt);
4544         if (info->q2_data_written) {
4545                 /* did not validate the frame - do it now */
4546                 ddp_ctl = (ntohl(mpa[0]) >> 8) & 0xff;
4547                 rdma_ctl = ntohl(mpa[0]) & 0xff;
4548                 if ((ddp_ctl & 0xc0) != 0x40)
4549                         aeq_id = I40IW_AE_LCE_QP_CATASTROPHIC;
4550                 else if ((ddp_ctl & 0x03) != 1)
4551                         aeq_id = I40IW_AE_DDP_UBE_INVALID_DDP_VERSION;
4552                 else if (ntohl(mpa[2]) != 2)
4553                         aeq_id = I40IW_AE_DDP_UBE_INVALID_QN;
4554                 else if (ntohl(mpa[3]) != 1)
4555                         aeq_id = I40IW_AE_DDP_INVALID_MSN_GAP_IN_MSN;
4556                 else if (ntohl(mpa[4]) != 0)
4557                         aeq_id = I40IW_AE_DDP_UBE_INVALID_MO;
4558                 else if ((rdma_ctl & 0xc0) != 0x40)
4559                         aeq_id = I40IW_AE_RDMAP_ROE_INVALID_RDMAP_VERSION;
4560
4561                 info->ae_id = aeq_id;
4562                 if (info->ae_id) {
4563                         /* Bad terminate recvd - send back a terminate */
4564                         i40iw_terminate_connection(qp, info);
4565                         return;
4566                 }
4567         }
4568
4569         qp->term_flags |= I40IW_TERM_RCVD;
4570         qp->eventtype = TERM_EVENT_QP_FATAL;
4571         termhdr = (struct i40iw_terminate_hdr *)&mpa[5];
4572         if (termhdr->layer_etype == RDMAP_REMOTE_PROT ||
4573             termhdr->layer_etype == RDMAP_REMOTE_OP) {
4574                 i40iw_terminate_done(qp, 0);
4575         } else {
4576                 i40iw_terminate_start_timer(qp);
4577                 i40iw_terminate_send_fin(qp);
4578         }
4579 }
4580
4581 /**
4582  * i40iw_sc_vsi_init - Initialize virtual device
4583  * @vsi: pointer to the vsi structure
4584  * @info: parameters to initialize vsi
4585  **/
4586 void i40iw_sc_vsi_init(struct i40iw_sc_vsi *vsi, struct i40iw_vsi_init_info *info)
4587 {
4588         int i;
4589
4590         vsi->dev = info->dev;
4591         vsi->back_vsi = info->back_vsi;
4592         vsi->mtu = info->params->mtu;
4593         vsi->exception_lan_queue = info->exception_lan_queue;
4594         i40iw_fill_qos_list(info->params->qs_handle_list);
4595
4596         for (i = 0; i < I40IW_MAX_USER_PRIORITY; i++) {
4597                 vsi->qos[i].qs_handle = info->params->qs_handle_list[i];
4598                 i40iw_debug(vsi->dev, I40IW_DEBUG_DCB, "qset[%d]: %d\n", i,
4599                             vsi->qos[i].qs_handle);
4600                 spin_lock_init(&vsi->qos[i].lock);
4601                 INIT_LIST_HEAD(&vsi->qos[i].qplist);
4602         }
4603 }
4604
4605 /**
4606  * i40iw_hw_stats_init - Initiliaze HW stats table
4607  * @stats: pestat struct
4608  * @fcn_idx: PCI fn id
4609  * @is_pf: Is it a PF?
4610  *
4611  * Populate the HW stats table with register offset addr for each
4612  * stats. And start the perioidic stats timer.
4613  */
4614 void i40iw_hw_stats_init(struct i40iw_vsi_pestat *stats, u8 fcn_idx, bool is_pf)
4615 {
4616         u32 stats_reg_offset;
4617         u32 stats_index;
4618         struct i40iw_dev_hw_stats_offsets *stats_table =
4619                 &stats->hw_stats_offsets;
4620         struct i40iw_dev_hw_stats *last_rd_stats = &stats->last_read_hw_stats;
4621
4622         if (is_pf) {
4623                 stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4RXDISCARD] =
4624                                 I40E_GLPES_PFIP4RXDISCARD(fcn_idx);
4625                 stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4RXTRUNC] =
4626                                 I40E_GLPES_PFIP4RXTRUNC(fcn_idx);
4627                 stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4TXNOROUTE] =
4628                                 I40E_GLPES_PFIP4TXNOROUTE(fcn_idx);
4629                 stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6RXDISCARD] =
4630                                 I40E_GLPES_PFIP6RXDISCARD(fcn_idx);
4631                 stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6RXTRUNC] =
4632                                 I40E_GLPES_PFIP6RXTRUNC(fcn_idx);
4633                 stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6TXNOROUTE] =
4634                                 I40E_GLPES_PFIP6TXNOROUTE(fcn_idx);
4635                 stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRTXSEG] =
4636                                 I40E_GLPES_PFTCPRTXSEG(fcn_idx);
4637                 stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRXOPTERR] =
4638                                 I40E_GLPES_PFTCPRXOPTERR(fcn_idx);
4639                 stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRXPROTOERR] =
4640                                 I40E_GLPES_PFTCPRXPROTOERR(fcn_idx);
4641
4642                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXOCTS] =
4643                                 I40E_GLPES_PFIP4RXOCTSLO(fcn_idx);
4644                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXPKTS] =
4645                                 I40E_GLPES_PFIP4RXPKTSLO(fcn_idx);
4646                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXFRAGS] =
4647                                 I40E_GLPES_PFIP4RXFRAGSLO(fcn_idx);
4648                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXMCPKTS] =
4649                                 I40E_GLPES_PFIP4RXMCPKTSLO(fcn_idx);
4650                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXOCTS] =
4651                                 I40E_GLPES_PFIP4TXOCTSLO(fcn_idx);
4652                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXPKTS] =
4653                                 I40E_GLPES_PFIP4TXPKTSLO(fcn_idx);
4654                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXFRAGS] =
4655                                 I40E_GLPES_PFIP4TXFRAGSLO(fcn_idx);
4656                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXMCPKTS] =
4657                                 I40E_GLPES_PFIP4TXMCPKTSLO(fcn_idx);
4658                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXOCTS] =
4659                                 I40E_GLPES_PFIP6RXOCTSLO(fcn_idx);
4660                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXPKTS] =
4661                                 I40E_GLPES_PFIP6RXPKTSLO(fcn_idx);
4662                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXFRAGS] =
4663                                 I40E_GLPES_PFIP6RXFRAGSLO(fcn_idx);
4664                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXMCPKTS] =
4665                                 I40E_GLPES_PFIP6RXMCPKTSLO(fcn_idx);
4666                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXOCTS] =
4667                                 I40E_GLPES_PFIP6TXOCTSLO(fcn_idx);
4668                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
4669                                 I40E_GLPES_PFIP6TXPKTSLO(fcn_idx);
4670                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
4671                                 I40E_GLPES_PFIP6TXPKTSLO(fcn_idx);
4672                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXFRAGS] =
4673                                 I40E_GLPES_PFIP6TXFRAGSLO(fcn_idx);
4674                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_TCPRXSEGS] =
4675                                 I40E_GLPES_PFTCPRXSEGSLO(fcn_idx);
4676                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_TCPTXSEG] =
4677                                 I40E_GLPES_PFTCPTXSEGLO(fcn_idx);
4678                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXRDS] =
4679                                 I40E_GLPES_PFRDMARXRDSLO(fcn_idx);
4680                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXSNDS] =
4681                                 I40E_GLPES_PFRDMARXSNDSLO(fcn_idx);
4682                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXWRS] =
4683                                 I40E_GLPES_PFRDMARXWRSLO(fcn_idx);
4684                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXRDS] =
4685                                 I40E_GLPES_PFRDMATXRDSLO(fcn_idx);
4686                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXSNDS] =
4687                                 I40E_GLPES_PFRDMATXSNDSLO(fcn_idx);
4688                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXWRS] =
4689                                 I40E_GLPES_PFRDMATXWRSLO(fcn_idx);
4690                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMAVBND] =
4691                                 I40E_GLPES_PFRDMAVBNDLO(fcn_idx);
4692                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMAVINV] =
4693                                 I40E_GLPES_PFRDMAVINVLO(fcn_idx);
4694         } else {
4695                 stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4RXDISCARD] =
4696                                 I40E_GLPES_VFIP4RXDISCARD(fcn_idx);
4697                 stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4RXTRUNC] =
4698                                 I40E_GLPES_VFIP4RXTRUNC(fcn_idx);
4699                 stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4TXNOROUTE] =
4700                                 I40E_GLPES_VFIP4TXNOROUTE(fcn_idx);
4701                 stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6RXDISCARD] =
4702                                 I40E_GLPES_VFIP6RXDISCARD(fcn_idx);
4703                 stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6RXTRUNC] =
4704                                 I40E_GLPES_VFIP6RXTRUNC(fcn_idx);
4705                 stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6TXNOROUTE] =
4706                                 I40E_GLPES_VFIP6TXNOROUTE(fcn_idx);
4707                 stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRTXSEG] =
4708                                 I40E_GLPES_VFTCPRTXSEG(fcn_idx);
4709                 stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRXOPTERR] =
4710                                 I40E_GLPES_VFTCPRXOPTERR(fcn_idx);
4711                 stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRXPROTOERR] =
4712                                 I40E_GLPES_VFTCPRXPROTOERR(fcn_idx);
4713
4714                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXOCTS] =
4715                                 I40E_GLPES_VFIP4RXOCTSLO(fcn_idx);
4716                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXPKTS] =
4717                                 I40E_GLPES_VFIP4RXPKTSLO(fcn_idx);
4718                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXFRAGS] =
4719                                 I40E_GLPES_VFIP4RXFRAGSLO(fcn_idx);
4720                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXMCPKTS] =
4721                                 I40E_GLPES_VFIP4RXMCPKTSLO(fcn_idx);
4722                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXOCTS] =
4723                                 I40E_GLPES_VFIP4TXOCTSLO(fcn_idx);
4724                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXPKTS] =
4725                                 I40E_GLPES_VFIP4TXPKTSLO(fcn_idx);
4726                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXFRAGS] =
4727                                 I40E_GLPES_VFIP4TXFRAGSLO(fcn_idx);
4728                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXMCPKTS] =
4729                                 I40E_GLPES_VFIP4TXMCPKTSLO(fcn_idx);
4730                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXOCTS] =
4731                                 I40E_GLPES_VFIP6RXOCTSLO(fcn_idx);
4732                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXPKTS] =
4733                                 I40E_GLPES_VFIP6RXPKTSLO(fcn_idx);
4734                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXFRAGS] =
4735                                 I40E_GLPES_VFIP6RXFRAGSLO(fcn_idx);
4736                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXMCPKTS] =
4737                                 I40E_GLPES_VFIP6RXMCPKTSLO(fcn_idx);
4738                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXOCTS] =
4739                                 I40E_GLPES_VFIP6TXOCTSLO(fcn_idx);
4740                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
4741                                 I40E_GLPES_VFIP6TXPKTSLO(fcn_idx);
4742                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
4743                                 I40E_GLPES_VFIP6TXPKTSLO(fcn_idx);
4744                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXFRAGS] =
4745                                 I40E_GLPES_VFIP6TXFRAGSLO(fcn_idx);
4746                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_TCPRXSEGS] =
4747                                 I40E_GLPES_VFTCPRXSEGSLO(fcn_idx);
4748                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_TCPTXSEG] =
4749                                 I40E_GLPES_VFTCPTXSEGLO(fcn_idx);
4750                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXRDS] =
4751                                 I40E_GLPES_VFRDMARXRDSLO(fcn_idx);
4752                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXSNDS] =
4753                                 I40E_GLPES_VFRDMARXSNDSLO(fcn_idx);
4754                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXWRS] =
4755                                 I40E_GLPES_VFRDMARXWRSLO(fcn_idx);
4756                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXRDS] =
4757                                 I40E_GLPES_VFRDMATXRDSLO(fcn_idx);
4758                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXSNDS] =
4759                                 I40E_GLPES_VFRDMATXSNDSLO(fcn_idx);
4760                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXWRS] =
4761                                 I40E_GLPES_VFRDMATXWRSLO(fcn_idx);
4762                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMAVBND] =
4763                                 I40E_GLPES_VFRDMAVBNDLO(fcn_idx);
4764                 stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMAVINV] =
4765                                 I40E_GLPES_VFRDMAVINVLO(fcn_idx);
4766         }
4767
4768         for (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_64;
4769              stats_index++) {
4770                 stats_reg_offset = stats_table->stats_offset_64[stats_index];
4771                 last_rd_stats->stats_value_64[stats_index] =
4772                         readq(stats->hw->hw_addr + stats_reg_offset);
4773         }
4774
4775         for (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_32;
4776              stats_index++) {
4777                 stats_reg_offset = stats_table->stats_offset_32[stats_index];
4778                 last_rd_stats->stats_value_32[stats_index] =
4779                         i40iw_rd32(stats->hw, stats_reg_offset);
4780         }
4781 }
4782
4783 /**
4784  * i40iw_hw_stats_read_32 - Read 32-bit HW stats counters and accommodates for roll-overs.
4785  * @stat: pestat struct
4786  * @index: index in HW stats table which contains offset reg-addr
4787  * @value: hw stats value
4788  */
4789 void i40iw_hw_stats_read_32(struct i40iw_vsi_pestat *stats,
4790                             enum i40iw_hw_stats_index_32b index,
4791                             u64 *value)
4792 {
4793         struct i40iw_dev_hw_stats_offsets *stats_table =
4794                 &stats->hw_stats_offsets;
4795         struct i40iw_dev_hw_stats *last_rd_stats = &stats->last_read_hw_stats;
4796         struct i40iw_dev_hw_stats *hw_stats = &stats->hw_stats;
4797         u64 new_stats_value = 0;
4798         u32 stats_reg_offset = stats_table->stats_offset_32[index];
4799
4800         new_stats_value = i40iw_rd32(stats->hw, stats_reg_offset);
4801         /*roll-over case */
4802         if (new_stats_value < last_rd_stats->stats_value_32[index])
4803                 hw_stats->stats_value_32[index] += new_stats_value;
4804         else
4805                 hw_stats->stats_value_32[index] +=
4806                         new_stats_value - last_rd_stats->stats_value_32[index];
4807         last_rd_stats->stats_value_32[index] = new_stats_value;
4808         *value = hw_stats->stats_value_32[index];
4809 }
4810
4811 /**
4812  * i40iw_hw_stats_read_64 - Read HW stats counters (greater than 32-bit) and accommodates for roll-overs.
4813  * @stats: pestat struct
4814  * @index: index in HW stats table which contains offset reg-addr
4815  * @value: hw stats value
4816  */
4817 void i40iw_hw_stats_read_64(struct i40iw_vsi_pestat *stats,
4818                             enum i40iw_hw_stats_index_64b index,
4819                             u64 *value)
4820 {
4821         struct i40iw_dev_hw_stats_offsets *stats_table =
4822                 &stats->hw_stats_offsets;
4823         struct i40iw_dev_hw_stats *last_rd_stats = &stats->last_read_hw_stats;
4824         struct i40iw_dev_hw_stats *hw_stats = &stats->hw_stats;
4825         u64 new_stats_value = 0;
4826         u32 stats_reg_offset = stats_table->stats_offset_64[index];
4827
4828         new_stats_value = readq(stats->hw->hw_addr + stats_reg_offset);
4829         /*roll-over case */
4830         if (new_stats_value < last_rd_stats->stats_value_64[index])
4831                 hw_stats->stats_value_64[index] += new_stats_value;
4832         else
4833                 hw_stats->stats_value_64[index] +=
4834                         new_stats_value - last_rd_stats->stats_value_64[index];
4835         last_rd_stats->stats_value_64[index] = new_stats_value;
4836         *value = hw_stats->stats_value_64[index];
4837 }
4838
4839 /**
4840  * i40iw_hw_stats_read_all - read all HW stat counters
4841  * @stats: pestat struct
4842  * @stats_values: hw stats structure
4843  *
4844  * Read all the HW stat counters and populates hw_stats structure
4845  * of passed-in vsi's pestat as well as copy created in stat_values.
4846  */
4847 void i40iw_hw_stats_read_all(struct i40iw_vsi_pestat *stats,
4848                              struct i40iw_dev_hw_stats *stats_values)
4849 {
4850         u32 stats_index;
4851         unsigned long flags;
4852
4853         spin_lock_irqsave(&stats->lock, flags);
4854
4855         for (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_32;
4856              stats_index++)
4857                 i40iw_hw_stats_read_32(stats, stats_index,
4858                                        &stats_values->stats_value_32[stats_index]);
4859         for (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_64;
4860              stats_index++)
4861                 i40iw_hw_stats_read_64(stats, stats_index,
4862                                        &stats_values->stats_value_64[stats_index]);
4863         spin_unlock_irqrestore(&stats->lock, flags);
4864 }
4865
4866 /**
4867  * i40iw_hw_stats_refresh_all - Update all HW stats structs
4868  * @stats: pestat struct
4869  *
4870  * Read all the HW stats counters to refresh values in hw_stats structure
4871  * of passed-in dev's pestat
4872  */
4873 void i40iw_hw_stats_refresh_all(struct i40iw_vsi_pestat *stats)
4874 {
4875         u64 stats_value;
4876         u32 stats_index;
4877         unsigned long flags;
4878
4879         spin_lock_irqsave(&stats->lock, flags);
4880
4881         for (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_32;
4882              stats_index++)
4883                 i40iw_hw_stats_read_32(stats, stats_index, &stats_value);
4884         for (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_64;
4885              stats_index++)
4886                 i40iw_hw_stats_read_64(stats, stats_index, &stats_value);
4887         spin_unlock_irqrestore(&stats->lock, flags);
4888 }
4889
4890 /**
4891  * i40iw_get_fcn_id - Return the function id
4892  * @dev: pointer to the device
4893  */
4894 static u8 i40iw_get_fcn_id(struct i40iw_sc_dev *dev)
4895 {
4896         u8 fcn_id = I40IW_INVALID_FCN_ID;
4897         u8 i;
4898
4899         for (i = I40IW_FIRST_NON_PF_STAT; i < I40IW_MAX_STATS_COUNT; i++)
4900                 if (!dev->fcn_id_array[i]) {
4901                         fcn_id = i;
4902                         dev->fcn_id_array[i] = true;
4903                         break;
4904                 }
4905         return fcn_id;
4906 }
4907
4908 /**
4909  * i40iw_vsi_stats_init - Initialize the vsi statistics
4910  * @vsi: pointer to the vsi structure
4911  * @info: The info structure used for initialization
4912  */
4913 enum i40iw_status_code i40iw_vsi_stats_init(struct i40iw_sc_vsi *vsi, struct i40iw_vsi_stats_info *info)
4914 {
4915         u8 fcn_id = info->fcn_id;
4916
4917         if (info->alloc_fcn_id)
4918                 fcn_id = i40iw_get_fcn_id(vsi->dev);
4919
4920         if (fcn_id == I40IW_INVALID_FCN_ID)
4921                 return I40IW_ERR_NOT_READY;
4922
4923         vsi->pestat = info->pestat;
4924         vsi->pestat->hw = vsi->dev->hw;
4925         vsi->pestat->vsi = vsi;
4926
4927         if (info->stats_initialize) {
4928                 i40iw_hw_stats_init(vsi->pestat, fcn_id, true);
4929                 spin_lock_init(&vsi->pestat->lock);
4930                 i40iw_hw_stats_start_timer(vsi);
4931         }
4932         vsi->stats_fcn_id_alloc = info->alloc_fcn_id;
4933         vsi->fcn_id = fcn_id;
4934         return I40IW_SUCCESS;
4935 }
4936
4937 /**
4938  * i40iw_vsi_stats_free - Free the vsi stats
4939  * @vsi: pointer to the vsi structure
4940  */
4941 void i40iw_vsi_stats_free(struct i40iw_sc_vsi *vsi)
4942 {
4943         u8 fcn_id = vsi->fcn_id;
4944
4945         if (vsi->stats_fcn_id_alloc && fcn_id < I40IW_MAX_STATS_COUNT)
4946                 vsi->dev->fcn_id_array[fcn_id] = false;
4947         i40iw_hw_stats_stop_timer(vsi);
4948 }
4949
4950 static struct i40iw_cqp_ops iw_cqp_ops = {
4951         .cqp_init = i40iw_sc_cqp_init,
4952         .cqp_create = i40iw_sc_cqp_create,
4953         .cqp_post_sq = i40iw_sc_cqp_post_sq,
4954         .cqp_get_next_send_wqe = i40iw_sc_cqp_get_next_send_wqe,
4955         .cqp_destroy = i40iw_sc_cqp_destroy,
4956         .poll_for_cqp_op_done = i40iw_sc_poll_for_cqp_op_done
4957 };
4958
4959 static struct i40iw_ccq_ops iw_ccq_ops = {
4960         .ccq_init = i40iw_sc_ccq_init,
4961         .ccq_create = i40iw_sc_ccq_create,
4962         .ccq_destroy = i40iw_sc_ccq_destroy,
4963         .ccq_create_done = i40iw_sc_ccq_create_done,
4964         .ccq_get_cqe_info = i40iw_sc_ccq_get_cqe_info,
4965         .ccq_arm = i40iw_sc_ccq_arm
4966 };
4967
4968 static struct i40iw_ceq_ops iw_ceq_ops = {
4969         .ceq_init = i40iw_sc_ceq_init,
4970         .ceq_create = i40iw_sc_ceq_create,
4971         .cceq_create_done = i40iw_sc_cceq_create_done,
4972         .cceq_destroy_done = i40iw_sc_cceq_destroy_done,
4973         .cceq_create = i40iw_sc_cceq_create,
4974         .ceq_destroy = i40iw_sc_ceq_destroy,
4975         .process_ceq = i40iw_sc_process_ceq
4976 };
4977
4978 static struct i40iw_aeq_ops iw_aeq_ops = {
4979         .aeq_init = i40iw_sc_aeq_init,
4980         .aeq_create = i40iw_sc_aeq_create,
4981         .aeq_destroy = i40iw_sc_aeq_destroy,
4982         .get_next_aeqe = i40iw_sc_get_next_aeqe,
4983         .repost_aeq_entries = i40iw_sc_repost_aeq_entries,
4984         .aeq_create_done = i40iw_sc_aeq_create_done,
4985         .aeq_destroy_done = i40iw_sc_aeq_destroy_done
4986 };
4987
4988 /* iwarp pd ops */
4989 static struct i40iw_pd_ops iw_pd_ops = {
4990         .pd_init = i40iw_sc_pd_init,
4991 };
4992
4993 static struct i40iw_priv_qp_ops iw_priv_qp_ops = {
4994         .qp_init = i40iw_sc_qp_init,
4995         .qp_create = i40iw_sc_qp_create,
4996         .qp_modify = i40iw_sc_qp_modify,
4997         .qp_destroy = i40iw_sc_qp_destroy,
4998         .qp_flush_wqes = i40iw_sc_qp_flush_wqes,
4999         .qp_upload_context = i40iw_sc_qp_upload_context,
5000         .qp_setctx = i40iw_sc_qp_setctx,
5001         .qp_send_lsmm = i40iw_sc_send_lsmm,
5002         .qp_send_lsmm_nostag = i40iw_sc_send_lsmm_nostag,
5003         .qp_send_rtt = i40iw_sc_send_rtt,
5004         .qp_post_wqe0 = i40iw_sc_post_wqe0,
5005         .iw_mr_fast_register = i40iw_sc_mr_fast_register
5006 };
5007
5008 static struct i40iw_priv_cq_ops iw_priv_cq_ops = {
5009         .cq_init = i40iw_sc_cq_init,
5010         .cq_create = i40iw_sc_cq_create,
5011         .cq_destroy = i40iw_sc_cq_destroy,
5012         .cq_modify = i40iw_sc_cq_modify,
5013 };
5014
5015 static struct i40iw_mr_ops iw_mr_ops = {
5016         .alloc_stag = i40iw_sc_alloc_stag,
5017         .mr_reg_non_shared = i40iw_sc_mr_reg_non_shared,
5018         .mr_reg_shared = i40iw_sc_mr_reg_shared,
5019         .dealloc_stag = i40iw_sc_dealloc_stag,
5020         .query_stag = i40iw_sc_query_stag,
5021         .mw_alloc = i40iw_sc_mw_alloc
5022 };
5023
5024 static struct i40iw_cqp_misc_ops iw_cqp_misc_ops = {
5025         .manage_push_page = i40iw_sc_manage_push_page,
5026         .manage_hmc_pm_func_table = i40iw_sc_manage_hmc_pm_func_table,
5027         .set_hmc_resource_profile = i40iw_sc_set_hmc_resource_profile,
5028         .commit_fpm_values = i40iw_sc_commit_fpm_values,
5029         .query_fpm_values = i40iw_sc_query_fpm_values,
5030         .static_hmc_pages_allocated = i40iw_sc_static_hmc_pages_allocated,
5031         .add_arp_cache_entry = i40iw_sc_add_arp_cache_entry,
5032         .del_arp_cache_entry = i40iw_sc_del_arp_cache_entry,
5033         .query_arp_cache_entry = i40iw_sc_query_arp_cache_entry,
5034         .manage_apbvt_entry = i40iw_sc_manage_apbvt_entry,
5035         .manage_qhash_table_entry = i40iw_sc_manage_qhash_table_entry,
5036         .alloc_local_mac_ipaddr_table_entry = i40iw_sc_alloc_local_mac_ipaddr_entry,
5037         .add_local_mac_ipaddr_entry = i40iw_sc_add_local_mac_ipaddr_entry,
5038         .del_local_mac_ipaddr_entry = i40iw_sc_del_local_mac_ipaddr_entry,
5039         .cqp_nop = i40iw_sc_cqp_nop,
5040         .commit_fpm_values_done = i40iw_sc_commit_fpm_values_done,
5041         .query_fpm_values_done = i40iw_sc_query_fpm_values_done,
5042         .manage_hmc_pm_func_table_done = i40iw_sc_manage_hmc_pm_func_table_done,
5043         .update_suspend_qp = i40iw_sc_suspend_qp,
5044         .update_resume_qp = i40iw_sc_resume_qp
5045 };
5046
5047 static struct i40iw_hmc_ops iw_hmc_ops = {
5048         .init_iw_hmc = i40iw_sc_init_iw_hmc,
5049         .parse_fpm_query_buf = i40iw_sc_parse_fpm_query_buf,
5050         .configure_iw_fpm = i40iw_sc_configure_iw_fpm,
5051         .parse_fpm_commit_buf = i40iw_sc_parse_fpm_commit_buf,
5052         .create_hmc_object = i40iw_sc_create_hmc_obj,
5053         .del_hmc_object = i40iw_sc_del_hmc_obj
5054 };
5055
5056 /**
5057  * i40iw_device_init - Initialize IWARP device
5058  * @dev: IWARP device pointer
5059  * @info: IWARP init info
5060  */
5061 enum i40iw_status_code i40iw_device_init(struct i40iw_sc_dev *dev,
5062                                          struct i40iw_device_init_info *info)
5063 {
5064         u32 val;
5065         u32 vchnl_ver = 0;
5066         u16 hmc_fcn = 0;
5067         enum i40iw_status_code ret_code = 0;
5068         u8 db_size;
5069
5070         spin_lock_init(&dev->cqp_lock);
5071
5072         i40iw_device_init_uk(&dev->dev_uk);
5073
5074         dev->debug_mask = info->debug_mask;
5075
5076         dev->hmc_fn_id = info->hmc_fn_id;
5077         dev->is_pf = info->is_pf;
5078
5079         dev->fpm_query_buf_pa = info->fpm_query_buf_pa;
5080         dev->fpm_query_buf = info->fpm_query_buf;
5081
5082         dev->fpm_commit_buf_pa = info->fpm_commit_buf_pa;
5083         dev->fpm_commit_buf = info->fpm_commit_buf;
5084
5085         dev->hw = info->hw;
5086         dev->hw->hw_addr = info->bar0;
5087
5088         if (dev->is_pf) {
5089                 val = i40iw_rd32(dev->hw, I40E_GLPCI_DREVID);
5090                 dev->hw_rev = (u8)RS_32(val, I40E_GLPCI_DREVID_DEFAULT_REVID);
5091
5092                 val = i40iw_rd32(dev->hw, I40E_GLPCI_LBARCTRL);
5093                 db_size = (u8)RS_32(val, I40E_GLPCI_LBARCTRL_PE_DB_SIZE);
5094                 if ((db_size != I40IW_PE_DB_SIZE_4M) &&
5095                     (db_size != I40IW_PE_DB_SIZE_8M)) {
5096                         i40iw_debug(dev, I40IW_DEBUG_DEV,
5097                                     "%s: PE doorbell is not enabled in CSR val 0x%x\n",
5098                                     __func__, val);
5099                         ret_code = I40IW_ERR_PE_DOORBELL_NOT_ENABLED;
5100                         return ret_code;
5101                 }
5102                 dev->db_addr = dev->hw->hw_addr + I40IW_DB_ADDR_OFFSET;
5103                 dev->vchnl_if.vchnl_recv = i40iw_vchnl_recv_pf;
5104         } else {
5105                 dev->db_addr = dev->hw->hw_addr + I40IW_VF_DB_ADDR_OFFSET;
5106         }
5107
5108         dev->cqp_ops = &iw_cqp_ops;
5109         dev->ccq_ops = &iw_ccq_ops;
5110         dev->ceq_ops = &iw_ceq_ops;
5111         dev->aeq_ops = &iw_aeq_ops;
5112         dev->cqp_misc_ops = &iw_cqp_misc_ops;
5113         dev->iw_pd_ops = &iw_pd_ops;
5114         dev->iw_priv_qp_ops = &iw_priv_qp_ops;
5115         dev->iw_priv_cq_ops = &iw_priv_cq_ops;
5116         dev->mr_ops = &iw_mr_ops;
5117         dev->hmc_ops = &iw_hmc_ops;
5118         dev->vchnl_if.vchnl_send = info->vchnl_send;
5119         if (dev->vchnl_if.vchnl_send)
5120                 dev->vchnl_up = true;
5121         else
5122                 dev->vchnl_up = false;
5123         if (!dev->is_pf) {
5124                 dev->vchnl_if.vchnl_recv = i40iw_vchnl_recv_vf;
5125                 ret_code = i40iw_vchnl_vf_get_ver(dev, &vchnl_ver);
5126                 if (!ret_code) {
5127                         i40iw_debug(dev, I40IW_DEBUG_DEV,
5128                                     "%s: Get Channel version rc = 0x%0x, version is %u\n",
5129                                 __func__, ret_code, vchnl_ver);
5130                         ret_code = i40iw_vchnl_vf_get_hmc_fcn(dev, &hmc_fcn);
5131                         if (!ret_code) {
5132                                 i40iw_debug(dev, I40IW_DEBUG_DEV,
5133                                             "%s Get HMC function rc = 0x%0x, hmc fcn is %u\n",
5134                                             __func__, ret_code, hmc_fcn);
5135                                 dev->hmc_fn_id = (u8)hmc_fcn;
5136                         }
5137                 }
5138         }
5139         dev->iw_vf_cqp_ops = &iw_vf_cqp_ops;
5140
5141         return ret_code;
5142 }