3d7d5617818f0c3f7f0a3ea02c1098cad7497d44
[linux-2.6-microblaze.git] / drivers / infiniband / hw / qedr / verbs.c
1 /* QLogic qedr NIC Driver
2  * Copyright (c) 2015-2016  QLogic Corporation
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and /or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <linux/dma-mapping.h>
33 #include <linux/crc32.h>
34 #include <net/ip.h>
35 #include <net/ipv6.h>
36 #include <net/udp.h>
37 #include <linux/iommu.h>
38
39 #include <rdma/ib_verbs.h>
40 #include <rdma/ib_user_verbs.h>
41 #include <rdma/iw_cm.h>
42 #include <rdma/ib_umem.h>
43 #include <rdma/ib_addr.h>
44 #include <rdma/ib_cache.h>
45 #include <rdma/uverbs_ioctl.h>
46
47 #include <linux/qed/common_hsi.h>
48 #include "qedr_hsi_rdma.h"
49 #include <linux/qed/qed_if.h>
50 #include "qedr.h"
51 #include "verbs.h"
52 #include <rdma/qedr-abi.h>
53 #include "qedr_roce_cm.h"
54 #include "qedr_iw_cm.h"
55
56 #define QEDR_SRQ_WQE_ELEM_SIZE  sizeof(union rdma_srq_elm)
57 #define RDMA_MAX_SGE_PER_SRQ    (4)
58 #define RDMA_MAX_SRQ_WQE_SIZE   (RDMA_MAX_SGE_PER_SRQ + 1)
59
60 #define DB_ADDR_SHIFT(addr)             ((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
61
62 enum {
63         QEDR_USER_MMAP_IO_WC = 0,
64         QEDR_USER_MMAP_PHYS_PAGE,
65 };
66
67 static inline int qedr_ib_copy_to_udata(struct ib_udata *udata, void *src,
68                                         size_t len)
69 {
70         size_t min_len = min_t(size_t, len, udata->outlen);
71
72         return ib_copy_to_udata(udata, src, min_len);
73 }
74
75 int qedr_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
76 {
77         if (index >= QEDR_ROCE_PKEY_TABLE_LEN)
78                 return -EINVAL;
79
80         *pkey = QEDR_ROCE_PKEY_DEFAULT;
81         return 0;
82 }
83
84 int qedr_iw_query_gid(struct ib_device *ibdev, u8 port,
85                       int index, union ib_gid *sgid)
86 {
87         struct qedr_dev *dev = get_qedr_dev(ibdev);
88
89         memset(sgid->raw, 0, sizeof(sgid->raw));
90         ether_addr_copy(sgid->raw, dev->ndev->dev_addr);
91
92         DP_DEBUG(dev, QEDR_MSG_INIT, "QUERY sgid[%d]=%llx:%llx\n", index,
93                  sgid->global.interface_id, sgid->global.subnet_prefix);
94
95         return 0;
96 }
97
98 int qedr_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
99 {
100         struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
101         struct qedr_device_attr *qattr = &dev->attr;
102         struct qedr_srq *srq = get_qedr_srq(ibsrq);
103
104         srq_attr->srq_limit = srq->srq_limit;
105         srq_attr->max_wr = qattr->max_srq_wr;
106         srq_attr->max_sge = qattr->max_sge;
107
108         return 0;
109 }
110
111 int qedr_query_device(struct ib_device *ibdev,
112                       struct ib_device_attr *attr, struct ib_udata *udata)
113 {
114         struct qedr_dev *dev = get_qedr_dev(ibdev);
115         struct qedr_device_attr *qattr = &dev->attr;
116
117         if (!dev->rdma_ctx) {
118                 DP_ERR(dev,
119                        "qedr_query_device called with invalid params rdma_ctx=%p\n",
120                        dev->rdma_ctx);
121                 return -EINVAL;
122         }
123
124         memset(attr, 0, sizeof(*attr));
125
126         attr->fw_ver = qattr->fw_ver;
127         attr->sys_image_guid = qattr->sys_image_guid;
128         attr->max_mr_size = qattr->max_mr_size;
129         attr->page_size_cap = qattr->page_size_caps;
130         attr->vendor_id = qattr->vendor_id;
131         attr->vendor_part_id = qattr->vendor_part_id;
132         attr->hw_ver = qattr->hw_ver;
133         attr->max_qp = qattr->max_qp;
134         attr->max_qp_wr = max_t(u32, qattr->max_sqe, qattr->max_rqe);
135         attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
136             IB_DEVICE_RC_RNR_NAK_GEN |
137             IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_MGT_EXTENSIONS;
138
139         attr->max_send_sge = qattr->max_sge;
140         attr->max_recv_sge = qattr->max_sge;
141         attr->max_sge_rd = qattr->max_sge;
142         attr->max_cq = qattr->max_cq;
143         attr->max_cqe = qattr->max_cqe;
144         attr->max_mr = qattr->max_mr;
145         attr->max_mw = qattr->max_mw;
146         attr->max_pd = qattr->max_pd;
147         attr->atomic_cap = dev->atomic_cap;
148         attr->max_qp_init_rd_atom =
149             1 << (fls(qattr->max_qp_req_rd_atomic_resc) - 1);
150         attr->max_qp_rd_atom =
151             min(1 << (fls(qattr->max_qp_resp_rd_atomic_resc) - 1),
152                 attr->max_qp_init_rd_atom);
153
154         attr->max_srq = qattr->max_srq;
155         attr->max_srq_sge = qattr->max_srq_sge;
156         attr->max_srq_wr = qattr->max_srq_wr;
157
158         attr->local_ca_ack_delay = qattr->dev_ack_delay;
159         attr->max_fast_reg_page_list_len = qattr->max_mr / 8;
160         attr->max_pkeys = QEDR_ROCE_PKEY_MAX;
161         attr->max_ah = qattr->max_ah;
162
163         return 0;
164 }
165
166 static inline void get_link_speed_and_width(int speed, u8 *ib_speed,
167                                             u8 *ib_width)
168 {
169         switch (speed) {
170         case 1000:
171                 *ib_speed = IB_SPEED_SDR;
172                 *ib_width = IB_WIDTH_1X;
173                 break;
174         case 10000:
175                 *ib_speed = IB_SPEED_QDR;
176                 *ib_width = IB_WIDTH_1X;
177                 break;
178
179         case 20000:
180                 *ib_speed = IB_SPEED_DDR;
181                 *ib_width = IB_WIDTH_4X;
182                 break;
183
184         case 25000:
185                 *ib_speed = IB_SPEED_EDR;
186                 *ib_width = IB_WIDTH_1X;
187                 break;
188
189         case 40000:
190                 *ib_speed = IB_SPEED_QDR;
191                 *ib_width = IB_WIDTH_4X;
192                 break;
193
194         case 50000:
195                 *ib_speed = IB_SPEED_HDR;
196                 *ib_width = IB_WIDTH_1X;
197                 break;
198
199         case 100000:
200                 *ib_speed = IB_SPEED_EDR;
201                 *ib_width = IB_WIDTH_4X;
202                 break;
203
204         default:
205                 /* Unsupported */
206                 *ib_speed = IB_SPEED_SDR;
207                 *ib_width = IB_WIDTH_1X;
208         }
209 }
210
211 int qedr_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *attr)
212 {
213         struct qedr_dev *dev;
214         struct qed_rdma_port *rdma_port;
215
216         dev = get_qedr_dev(ibdev);
217
218         if (!dev->rdma_ctx) {
219                 DP_ERR(dev, "rdma_ctx is NULL\n");
220                 return -EINVAL;
221         }
222
223         rdma_port = dev->ops->rdma_query_port(dev->rdma_ctx);
224
225         /* *attr being zeroed by the caller, avoid zeroing it here */
226         if (rdma_port->port_state == QED_RDMA_PORT_UP) {
227                 attr->state = IB_PORT_ACTIVE;
228                 attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
229         } else {
230                 attr->state = IB_PORT_DOWN;
231                 attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
232         }
233         attr->max_mtu = IB_MTU_4096;
234         attr->active_mtu = iboe_get_mtu(dev->ndev->mtu);
235         attr->lid = 0;
236         attr->lmc = 0;
237         attr->sm_lid = 0;
238         attr->sm_sl = 0;
239         attr->ip_gids = true;
240         if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
241                 attr->gid_tbl_len = 1;
242                 attr->pkey_tbl_len = 1;
243         } else {
244                 attr->gid_tbl_len = QEDR_MAX_SGID;
245                 attr->pkey_tbl_len = QEDR_ROCE_PKEY_TABLE_LEN;
246         }
247         attr->bad_pkey_cntr = rdma_port->pkey_bad_counter;
248         attr->qkey_viol_cntr = 0;
249         get_link_speed_and_width(rdma_port->link_speed,
250                                  &attr->active_speed, &attr->active_width);
251         attr->max_msg_sz = rdma_port->max_msg_size;
252         attr->max_vl_num = 4;
253
254         return 0;
255 }
256
257 int qedr_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
258 {
259         struct ib_device *ibdev = uctx->device;
260         int rc;
261         struct qedr_ucontext *ctx = get_qedr_ucontext(uctx);
262         struct qedr_alloc_ucontext_resp uresp = {};
263         struct qedr_alloc_ucontext_req ureq = {};
264         struct qedr_dev *dev = get_qedr_dev(ibdev);
265         struct qed_rdma_add_user_out_params oparams;
266         struct qedr_user_mmap_entry *entry;
267
268         if (!udata)
269                 return -EFAULT;
270
271         if (udata->inlen) {
272                 rc = ib_copy_from_udata(&ureq, udata,
273                                         min(sizeof(ureq), udata->inlen));
274                 if (rc) {
275                         DP_ERR(dev, "Problem copying data from user space\n");
276                         return -EFAULT;
277                 }
278
279                 ctx->db_rec = !!(ureq.context_flags & QEDR_ALLOC_UCTX_DB_REC);
280         }
281
282         rc = dev->ops->rdma_add_user(dev->rdma_ctx, &oparams);
283         if (rc) {
284                 DP_ERR(dev,
285                        "failed to allocate a DPI for a new RoCE application, rc=%d. To overcome this consider to increase the number of DPIs, increase the doorbell BAR size or just close unnecessary RoCE applications. In order to increase the number of DPIs consult the qedr readme\n",
286                        rc);
287                 return rc;
288         }
289
290         ctx->dpi = oparams.dpi;
291         ctx->dpi_addr = oparams.dpi_addr;
292         ctx->dpi_phys_addr = oparams.dpi_phys_addr;
293         ctx->dpi_size = oparams.dpi_size;
294         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
295         if (!entry) {
296                 rc = -ENOMEM;
297                 goto err;
298         }
299
300         entry->io_address = ctx->dpi_phys_addr;
301         entry->length = ctx->dpi_size;
302         entry->mmap_flag = QEDR_USER_MMAP_IO_WC;
303         entry->dpi = ctx->dpi;
304         entry->dev = dev;
305         rc = rdma_user_mmap_entry_insert(uctx, &entry->rdma_entry,
306                                          ctx->dpi_size);
307         if (rc) {
308                 kfree(entry);
309                 goto err;
310         }
311         ctx->db_mmap_entry = &entry->rdma_entry;
312
313         if (!dev->user_dpm_enabled)
314                 uresp.dpm_flags = 0;
315         else if (rdma_protocol_iwarp(&dev->ibdev, 1))
316                 uresp.dpm_flags = QEDR_DPM_TYPE_IWARP_LEGACY;
317         else
318                 uresp.dpm_flags = QEDR_DPM_TYPE_ROCE_ENHANCED |
319                                   QEDR_DPM_TYPE_ROCE_LEGACY;
320
321         uresp.dpm_flags |= QEDR_DPM_SIZES_SET;
322         uresp.ldpm_limit_size = QEDR_LDPM_MAX_SIZE;
323         uresp.edpm_trans_size = QEDR_EDPM_TRANS_SIZE;
324
325         uresp.wids_enabled = 1;
326         uresp.wid_count = oparams.wid_count;
327         uresp.db_pa = rdma_user_mmap_get_offset(ctx->db_mmap_entry);
328         uresp.db_size = ctx->dpi_size;
329         uresp.max_send_wr = dev->attr.max_sqe;
330         uresp.max_recv_wr = dev->attr.max_rqe;
331         uresp.max_srq_wr = dev->attr.max_srq_wr;
332         uresp.sges_per_send_wr = QEDR_MAX_SQE_ELEMENTS_PER_SQE;
333         uresp.sges_per_recv_wr = QEDR_MAX_RQE_ELEMENTS_PER_RQE;
334         uresp.sges_per_srq_wr = dev->attr.max_srq_sge;
335         uresp.max_cqes = QEDR_MAX_CQES;
336
337         rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
338         if (rc)
339                 goto err;
340
341         ctx->dev = dev;
342
343         DP_DEBUG(dev, QEDR_MSG_INIT, "Allocating user context %p\n",
344                  &ctx->ibucontext);
345         return 0;
346
347 err:
348         if (!ctx->db_mmap_entry)
349                 dev->ops->rdma_remove_user(dev->rdma_ctx, ctx->dpi);
350         else
351                 rdma_user_mmap_entry_remove(ctx->db_mmap_entry);
352
353         return rc;
354 }
355
356 void qedr_dealloc_ucontext(struct ib_ucontext *ibctx)
357 {
358         struct qedr_ucontext *uctx = get_qedr_ucontext(ibctx);
359
360         DP_DEBUG(uctx->dev, QEDR_MSG_INIT, "Deallocating user context %p\n",
361                  uctx);
362
363         rdma_user_mmap_entry_remove(uctx->db_mmap_entry);
364 }
365
366 void qedr_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
367 {
368         struct qedr_user_mmap_entry *entry = get_qedr_mmap_entry(rdma_entry);
369         struct qedr_dev *dev = entry->dev;
370
371         if (entry->mmap_flag == QEDR_USER_MMAP_PHYS_PAGE)
372                 free_page((unsigned long)entry->address);
373         else if (entry->mmap_flag == QEDR_USER_MMAP_IO_WC)
374                 dev->ops->rdma_remove_user(dev->rdma_ctx, entry->dpi);
375
376         kfree(entry);
377 }
378
379 int qedr_mmap(struct ib_ucontext *ucontext, struct vm_area_struct *vma)
380 {
381         struct ib_device *dev = ucontext->device;
382         size_t length = vma->vm_end - vma->vm_start;
383         struct rdma_user_mmap_entry *rdma_entry;
384         struct qedr_user_mmap_entry *entry;
385         int rc = 0;
386         u64 pfn;
387
388         ibdev_dbg(dev,
389                   "start %#lx, end %#lx, length = %#zx, pgoff = %#lx\n",
390                   vma->vm_start, vma->vm_end, length, vma->vm_pgoff);
391
392         rdma_entry = rdma_user_mmap_entry_get(ucontext, vma);
393         if (!rdma_entry) {
394                 ibdev_dbg(dev, "pgoff[%#lx] does not have valid entry\n",
395                           vma->vm_pgoff);
396                 return -EINVAL;
397         }
398         entry = get_qedr_mmap_entry(rdma_entry);
399         ibdev_dbg(dev,
400                   "Mapping address[%#llx], length[%#zx], mmap_flag[%d]\n",
401                   entry->io_address, length, entry->mmap_flag);
402
403         switch (entry->mmap_flag) {
404         case QEDR_USER_MMAP_IO_WC:
405                 pfn = entry->io_address >> PAGE_SHIFT;
406                 rc = rdma_user_mmap_io(ucontext, vma, pfn, length,
407                                        pgprot_writecombine(vma->vm_page_prot),
408                                        rdma_entry);
409                 break;
410         case QEDR_USER_MMAP_PHYS_PAGE:
411                 rc = vm_insert_page(vma, vma->vm_start,
412                                     virt_to_page(entry->address));
413                 break;
414         default:
415                 rc = -EINVAL;
416         }
417
418         if (rc)
419                 ibdev_dbg(dev,
420                           "Couldn't mmap address[%#llx] length[%#zx] mmap_flag[%d] err[%d]\n",
421                           entry->io_address, length, entry->mmap_flag, rc);
422
423         rdma_user_mmap_entry_put(rdma_entry);
424         return rc;
425 }
426
427 int qedr_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
428 {
429         struct ib_device *ibdev = ibpd->device;
430         struct qedr_dev *dev = get_qedr_dev(ibdev);
431         struct qedr_pd *pd = get_qedr_pd(ibpd);
432         u16 pd_id;
433         int rc;
434
435         DP_DEBUG(dev, QEDR_MSG_INIT, "Function called from: %s\n",
436                  udata ? "User Lib" : "Kernel");
437
438         if (!dev->rdma_ctx) {
439                 DP_ERR(dev, "invalid RDMA context\n");
440                 return -EINVAL;
441         }
442
443         rc = dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
444         if (rc)
445                 return rc;
446
447         pd->pd_id = pd_id;
448
449         if (udata) {
450                 struct qedr_alloc_pd_uresp uresp = {
451                         .pd_id = pd_id,
452                 };
453                 struct qedr_ucontext *context = rdma_udata_to_drv_context(
454                         udata, struct qedr_ucontext, ibucontext);
455
456                 rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
457                 if (rc) {
458                         DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id);
459                         dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd_id);
460                         return rc;
461                 }
462
463                 pd->uctx = context;
464                 pd->uctx->pd = pd;
465         }
466
467         return 0;
468 }
469
470 void qedr_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
471 {
472         struct qedr_dev *dev = get_qedr_dev(ibpd->device);
473         struct qedr_pd *pd = get_qedr_pd(ibpd);
474
475         DP_DEBUG(dev, QEDR_MSG_INIT, "Deallocating PD %d\n", pd->pd_id);
476         dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd->pd_id);
477 }
478
479 static void qedr_free_pbl(struct qedr_dev *dev,
480                           struct qedr_pbl_info *pbl_info, struct qedr_pbl *pbl)
481 {
482         struct pci_dev *pdev = dev->pdev;
483         int i;
484
485         for (i = 0; i < pbl_info->num_pbls; i++) {
486                 if (!pbl[i].va)
487                         continue;
488                 dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
489                                   pbl[i].va, pbl[i].pa);
490         }
491
492         kfree(pbl);
493 }
494
495 #define MIN_FW_PBL_PAGE_SIZE (4 * 1024)
496 #define MAX_FW_PBL_PAGE_SIZE (64 * 1024)
497
498 #define NUM_PBES_ON_PAGE(_page_size) (_page_size / sizeof(u64))
499 #define MAX_PBES_ON_PAGE NUM_PBES_ON_PAGE(MAX_FW_PBL_PAGE_SIZE)
500 #define MAX_PBES_TWO_LAYER (MAX_PBES_ON_PAGE * MAX_PBES_ON_PAGE)
501
502 static struct qedr_pbl *qedr_alloc_pbl_tbl(struct qedr_dev *dev,
503                                            struct qedr_pbl_info *pbl_info,
504                                            gfp_t flags)
505 {
506         struct pci_dev *pdev = dev->pdev;
507         struct qedr_pbl *pbl_table;
508         dma_addr_t *pbl_main_tbl;
509         dma_addr_t pa;
510         void *va;
511         int i;
512
513         pbl_table = kcalloc(pbl_info->num_pbls, sizeof(*pbl_table), flags);
514         if (!pbl_table)
515                 return ERR_PTR(-ENOMEM);
516
517         for (i = 0; i < pbl_info->num_pbls; i++) {
518                 va = dma_alloc_coherent(&pdev->dev, pbl_info->pbl_size, &pa,
519                                         flags);
520                 if (!va)
521                         goto err;
522
523                 pbl_table[i].va = va;
524                 pbl_table[i].pa = pa;
525         }
526
527         /* Two-Layer PBLs, if we have more than one pbl we need to initialize
528          * the first one with physical pointers to all of the rest
529          */
530         pbl_main_tbl = (dma_addr_t *)pbl_table[0].va;
531         for (i = 0; i < pbl_info->num_pbls - 1; i++)
532                 pbl_main_tbl[i] = pbl_table[i + 1].pa;
533
534         return pbl_table;
535
536 err:
537         for (i--; i >= 0; i--)
538                 dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
539                                   pbl_table[i].va, pbl_table[i].pa);
540
541         qedr_free_pbl(dev, pbl_info, pbl_table);
542
543         return ERR_PTR(-ENOMEM);
544 }
545
546 static int qedr_prepare_pbl_tbl(struct qedr_dev *dev,
547                                 struct qedr_pbl_info *pbl_info,
548                                 u32 num_pbes, int two_layer_capable)
549 {
550         u32 pbl_capacity;
551         u32 pbl_size;
552         u32 num_pbls;
553
554         if ((num_pbes > MAX_PBES_ON_PAGE) && two_layer_capable) {
555                 if (num_pbes > MAX_PBES_TWO_LAYER) {
556                         DP_ERR(dev, "prepare pbl table: too many pages %d\n",
557                                num_pbes);
558                         return -EINVAL;
559                 }
560
561                 /* calculate required pbl page size */
562                 pbl_size = MIN_FW_PBL_PAGE_SIZE;
563                 pbl_capacity = NUM_PBES_ON_PAGE(pbl_size) *
564                                NUM_PBES_ON_PAGE(pbl_size);
565
566                 while (pbl_capacity < num_pbes) {
567                         pbl_size *= 2;
568                         pbl_capacity = pbl_size / sizeof(u64);
569                         pbl_capacity = pbl_capacity * pbl_capacity;
570                 }
571
572                 num_pbls = DIV_ROUND_UP(num_pbes, NUM_PBES_ON_PAGE(pbl_size));
573                 num_pbls++;     /* One for the layer0 ( points to the pbls) */
574                 pbl_info->two_layered = true;
575         } else {
576                 /* One layered PBL */
577                 num_pbls = 1;
578                 pbl_size = max_t(u32, MIN_FW_PBL_PAGE_SIZE,
579                                  roundup_pow_of_two((num_pbes * sizeof(u64))));
580                 pbl_info->two_layered = false;
581         }
582
583         pbl_info->num_pbls = num_pbls;
584         pbl_info->pbl_size = pbl_size;
585         pbl_info->num_pbes = num_pbes;
586
587         DP_DEBUG(dev, QEDR_MSG_MR,
588                  "prepare pbl table: num_pbes=%d, num_pbls=%d, pbl_size=%d\n",
589                  pbl_info->num_pbes, pbl_info->num_pbls, pbl_info->pbl_size);
590
591         return 0;
592 }
593
594 static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
595                                struct qedr_pbl *pbl,
596                                struct qedr_pbl_info *pbl_info, u32 pg_shift)
597 {
598         int pbe_cnt, total_num_pbes = 0;
599         u32 fw_pg_cnt, fw_pg_per_umem_pg;
600         struct qedr_pbl *pbl_tbl;
601         struct sg_dma_page_iter sg_iter;
602         struct regpair *pbe;
603         u64 pg_addr;
604
605         if (!pbl_info->num_pbes)
606                 return;
607
608         /* If we have a two layered pbl, the first pbl points to the rest
609          * of the pbls and the first entry lays on the second pbl in the table
610          */
611         if (pbl_info->two_layered)
612                 pbl_tbl = &pbl[1];
613         else
614                 pbl_tbl = pbl;
615
616         pbe = (struct regpair *)pbl_tbl->va;
617         if (!pbe) {
618                 DP_ERR(dev, "cannot populate PBL due to a NULL PBE\n");
619                 return;
620         }
621
622         pbe_cnt = 0;
623
624         fw_pg_per_umem_pg = BIT(PAGE_SHIFT - pg_shift);
625
626         for_each_sg_dma_page (umem->sg_head.sgl, &sg_iter, umem->nmap, 0) {
627                 pg_addr = sg_page_iter_dma_address(&sg_iter);
628                 for (fw_pg_cnt = 0; fw_pg_cnt < fw_pg_per_umem_pg;) {
629                         pbe->lo = cpu_to_le32(pg_addr);
630                         pbe->hi = cpu_to_le32(upper_32_bits(pg_addr));
631
632                         pg_addr += BIT(pg_shift);
633                         pbe_cnt++;
634                         total_num_pbes++;
635                         pbe++;
636
637                         if (total_num_pbes == pbl_info->num_pbes)
638                                 return;
639
640                         /* If the given pbl is full storing the pbes,
641                          * move to next pbl.
642                          */
643                         if (pbe_cnt == (pbl_info->pbl_size / sizeof(u64))) {
644                                 pbl_tbl++;
645                                 pbe = (struct regpair *)pbl_tbl->va;
646                                 pbe_cnt = 0;
647                         }
648
649                         fw_pg_cnt++;
650                 }
651         }
652 }
653
654 static int qedr_db_recovery_add(struct qedr_dev *dev,
655                                 void __iomem *db_addr,
656                                 void *db_data,
657                                 enum qed_db_rec_width db_width,
658                                 enum qed_db_rec_space db_space)
659 {
660         if (!db_data) {
661                 DP_DEBUG(dev, QEDR_MSG_INIT, "avoiding db rec since old lib\n");
662                 return 0;
663         }
664
665         return dev->ops->common->db_recovery_add(dev->cdev, db_addr, db_data,
666                                                  db_width, db_space);
667 }
668
669 static void qedr_db_recovery_del(struct qedr_dev *dev,
670                                  void __iomem *db_addr,
671                                  void *db_data)
672 {
673         if (!db_data) {
674                 DP_DEBUG(dev, QEDR_MSG_INIT, "avoiding db rec since old lib\n");
675                 return;
676         }
677
678         /* Ignore return code as there is not much we can do about it. Error
679          * log will be printed inside.
680          */
681         dev->ops->common->db_recovery_del(dev->cdev, db_addr, db_data);
682 }
683
684 static int qedr_copy_cq_uresp(struct qedr_dev *dev,
685                               struct qedr_cq *cq, struct ib_udata *udata,
686                               u32 db_offset)
687 {
688         struct qedr_create_cq_uresp uresp;
689         int rc;
690
691         memset(&uresp, 0, sizeof(uresp));
692
693         uresp.db_offset = db_offset;
694         uresp.icid = cq->icid;
695         if (cq->q.db_mmap_entry)
696                 uresp.db_rec_addr =
697                         rdma_user_mmap_get_offset(cq->q.db_mmap_entry);
698
699         rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
700         if (rc)
701                 DP_ERR(dev, "copy error cqid=0x%x.\n", cq->icid);
702
703         return rc;
704 }
705
706 static void consume_cqe(struct qedr_cq *cq)
707 {
708         if (cq->latest_cqe == cq->toggle_cqe)
709                 cq->pbl_toggle ^= RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
710
711         cq->latest_cqe = qed_chain_consume(&cq->pbl);
712 }
713
714 static inline int qedr_align_cq_entries(int entries)
715 {
716         u64 size, aligned_size;
717
718         /* We allocate an extra entry that we don't report to the FW. */
719         size = (entries + 1) * QEDR_CQE_SIZE;
720         aligned_size = ALIGN(size, PAGE_SIZE);
721
722         return aligned_size / QEDR_CQE_SIZE;
723 }
724
725 static int qedr_init_user_db_rec(struct ib_udata *udata,
726                                  struct qedr_dev *dev, struct qedr_userq *q,
727                                  bool requires_db_rec)
728 {
729         struct qedr_ucontext *uctx =
730                 rdma_udata_to_drv_context(udata, struct qedr_ucontext,
731                                           ibucontext);
732         struct qedr_user_mmap_entry *entry;
733         int rc;
734
735         /* Aborting for non doorbell userqueue (SRQ) or non-supporting lib */
736         if (requires_db_rec == 0 || !uctx->db_rec)
737                 return 0;
738
739         /* Allocate a page for doorbell recovery, add to mmap */
740         q->db_rec_data = (void *)get_zeroed_page(GFP_USER);
741         if (!q->db_rec_data) {
742                 DP_ERR(dev, "get_zeroed_page failed\n");
743                 return -ENOMEM;
744         }
745
746         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
747         if (!entry)
748                 goto err_free_db_data;
749
750         entry->address = q->db_rec_data;
751         entry->length = PAGE_SIZE;
752         entry->mmap_flag = QEDR_USER_MMAP_PHYS_PAGE;
753         rc = rdma_user_mmap_entry_insert(&uctx->ibucontext,
754                                          &entry->rdma_entry,
755                                          PAGE_SIZE);
756         if (rc)
757                 goto err_free_entry;
758
759         q->db_mmap_entry = &entry->rdma_entry;
760
761         return 0;
762
763 err_free_entry:
764         kfree(entry);
765
766 err_free_db_data:
767         free_page((unsigned long)q->db_rec_data);
768         q->db_rec_data = NULL;
769         return -ENOMEM;
770 }
771
772 static inline int qedr_init_user_queue(struct ib_udata *udata,
773                                        struct qedr_dev *dev,
774                                        struct qedr_userq *q, u64 buf_addr,
775                                        size_t buf_len, bool requires_db_rec,
776                                        int access,
777                                        int alloc_and_init)
778 {
779         u32 fw_pages;
780         int rc;
781
782         q->buf_addr = buf_addr;
783         q->buf_len = buf_len;
784         q->umem = ib_umem_get(&dev->ibdev, q->buf_addr, q->buf_len, access);
785         if (IS_ERR(q->umem)) {
786                 DP_ERR(dev, "create user queue: failed ib_umem_get, got %ld\n",
787                        PTR_ERR(q->umem));
788                 return PTR_ERR(q->umem);
789         }
790
791         fw_pages = ib_umem_page_count(q->umem) <<
792             (PAGE_SHIFT - FW_PAGE_SHIFT);
793
794         rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, fw_pages, 0);
795         if (rc)
796                 goto err0;
797
798         if (alloc_and_init) {
799                 q->pbl_tbl = qedr_alloc_pbl_tbl(dev, &q->pbl_info, GFP_KERNEL);
800                 if (IS_ERR(q->pbl_tbl)) {
801                         rc = PTR_ERR(q->pbl_tbl);
802                         goto err0;
803                 }
804                 qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info,
805                                    FW_PAGE_SHIFT);
806         } else {
807                 q->pbl_tbl = kzalloc(sizeof(*q->pbl_tbl), GFP_KERNEL);
808                 if (!q->pbl_tbl) {
809                         rc = -ENOMEM;
810                         goto err0;
811                 }
812         }
813
814         /* mmap the user address used to store doorbell data for recovery */
815         return qedr_init_user_db_rec(udata, dev, q, requires_db_rec);
816
817 err0:
818         ib_umem_release(q->umem);
819         q->umem = NULL;
820
821         return rc;
822 }
823
824 static inline void qedr_init_cq_params(struct qedr_cq *cq,
825                                        struct qedr_ucontext *ctx,
826                                        struct qedr_dev *dev, int vector,
827                                        int chain_entries, int page_cnt,
828                                        u64 pbl_ptr,
829                                        struct qed_rdma_create_cq_in_params
830                                        *params)
831 {
832         memset(params, 0, sizeof(*params));
833         params->cq_handle_hi = upper_32_bits((uintptr_t)cq);
834         params->cq_handle_lo = lower_32_bits((uintptr_t)cq);
835         params->cnq_id = vector;
836         params->cq_size = chain_entries - 1;
837         params->dpi = (ctx) ? ctx->dpi : dev->dpi;
838         params->pbl_num_pages = page_cnt;
839         params->pbl_ptr = pbl_ptr;
840         params->pbl_two_level = 0;
841 }
842
843 static void doorbell_cq(struct qedr_cq *cq, u32 cons, u8 flags)
844 {
845         cq->db.data.agg_flags = flags;
846         cq->db.data.value = cpu_to_le32(cons);
847         writeq(cq->db.raw, cq->db_addr);
848 }
849
850 int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
851 {
852         struct qedr_cq *cq = get_qedr_cq(ibcq);
853         unsigned long sflags;
854         struct qedr_dev *dev;
855
856         dev = get_qedr_dev(ibcq->device);
857
858         if (cq->destroyed) {
859                 DP_ERR(dev,
860                        "warning: arm was invoked after destroy for cq %p (icid=%d)\n",
861                        cq, cq->icid);
862                 return -EINVAL;
863         }
864
865
866         if (cq->cq_type == QEDR_CQ_TYPE_GSI)
867                 return 0;
868
869         spin_lock_irqsave(&cq->cq_lock, sflags);
870
871         cq->arm_flags = 0;
872
873         if (flags & IB_CQ_SOLICITED)
874                 cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD;
875
876         if (flags & IB_CQ_NEXT_COMP)
877                 cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_CF_CMD;
878
879         doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
880
881         spin_unlock_irqrestore(&cq->cq_lock, sflags);
882
883         return 0;
884 }
885
886 int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
887                    struct ib_udata *udata)
888 {
889         struct ib_device *ibdev = ibcq->device;
890         struct qedr_ucontext *ctx = rdma_udata_to_drv_context(
891                 udata, struct qedr_ucontext, ibucontext);
892         struct qed_rdma_destroy_cq_out_params destroy_oparams;
893         struct qed_rdma_destroy_cq_in_params destroy_iparams;
894         struct qedr_dev *dev = get_qedr_dev(ibdev);
895         struct qed_rdma_create_cq_in_params params;
896         struct qedr_create_cq_ureq ureq = {};
897         int vector = attr->comp_vector;
898         int entries = attr->cqe;
899         struct qedr_cq *cq = get_qedr_cq(ibcq);
900         int chain_entries;
901         u32 db_offset;
902         int page_cnt;
903         u64 pbl_ptr;
904         u16 icid;
905         int rc;
906
907         DP_DEBUG(dev, QEDR_MSG_INIT,
908                  "create_cq: called from %s. entries=%d, vector=%d\n",
909                  udata ? "User Lib" : "Kernel", entries, vector);
910
911         if (entries > QEDR_MAX_CQES) {
912                 DP_ERR(dev,
913                        "create cq: the number of entries %d is too high. Must be equal or below %d.\n",
914                        entries, QEDR_MAX_CQES);
915                 return -EINVAL;
916         }
917
918         chain_entries = qedr_align_cq_entries(entries);
919         chain_entries = min_t(int, chain_entries, QEDR_MAX_CQES);
920
921         /* calc db offset. user will add DPI base, kernel will add db addr */
922         db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
923
924         if (udata) {
925                 if (ib_copy_from_udata(&ureq, udata, min(sizeof(ureq),
926                                                          udata->inlen))) {
927                         DP_ERR(dev,
928                                "create cq: problem copying data from user space\n");
929                         goto err0;
930                 }
931
932                 if (!ureq.len) {
933                         DP_ERR(dev,
934                                "create cq: cannot create a cq with 0 entries\n");
935                         goto err0;
936                 }
937
938                 cq->cq_type = QEDR_CQ_TYPE_USER;
939
940                 rc = qedr_init_user_queue(udata, dev, &cq->q, ureq.addr,
941                                           ureq.len, true, IB_ACCESS_LOCAL_WRITE,
942                                           1);
943                 if (rc)
944                         goto err0;
945
946                 pbl_ptr = cq->q.pbl_tbl->pa;
947                 page_cnt = cq->q.pbl_info.num_pbes;
948
949                 cq->ibcq.cqe = chain_entries;
950                 cq->q.db_addr = ctx->dpi_addr + db_offset;
951         } else {
952                 cq->cq_type = QEDR_CQ_TYPE_KERNEL;
953
954                 rc = dev->ops->common->chain_alloc(dev->cdev,
955                                                    QED_CHAIN_USE_TO_CONSUME,
956                                                    QED_CHAIN_MODE_PBL,
957                                                    QED_CHAIN_CNT_TYPE_U32,
958                                                    chain_entries,
959                                                    sizeof(union rdma_cqe),
960                                                    &cq->pbl, NULL);
961                 if (rc)
962                         goto err0;
963
964                 page_cnt = qed_chain_get_page_cnt(&cq->pbl);
965                 pbl_ptr = qed_chain_get_pbl_phys(&cq->pbl);
966                 cq->ibcq.cqe = cq->pbl.capacity;
967         }
968
969         qedr_init_cq_params(cq, ctx, dev, vector, chain_entries, page_cnt,
970                             pbl_ptr, &params);
971
972         rc = dev->ops->rdma_create_cq(dev->rdma_ctx, &params, &icid);
973         if (rc)
974                 goto err1;
975
976         cq->icid = icid;
977         cq->sig = QEDR_CQ_MAGIC_NUMBER;
978         spin_lock_init(&cq->cq_lock);
979
980         if (udata) {
981                 rc = qedr_copy_cq_uresp(dev, cq, udata, db_offset);
982                 if (rc)
983                         goto err2;
984
985                 rc = qedr_db_recovery_add(dev, cq->q.db_addr,
986                                           &cq->q.db_rec_data->db_data,
987                                           DB_REC_WIDTH_64B,
988                                           DB_REC_USER);
989                 if (rc)
990                         goto err2;
991
992         } else {
993                 /* Generate doorbell address. */
994                 cq->db.data.icid = cq->icid;
995                 cq->db_addr = dev->db_addr + db_offset;
996                 cq->db.data.params = DB_AGG_CMD_SET <<
997                     RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT;
998
999                 /* point to the very last element, passing it we will toggle */
1000                 cq->toggle_cqe = qed_chain_get_last_elem(&cq->pbl);
1001                 cq->pbl_toggle = RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
1002                 cq->latest_cqe = NULL;
1003                 consume_cqe(cq);
1004                 cq->cq_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
1005
1006                 rc = qedr_db_recovery_add(dev, cq->db_addr, &cq->db.data,
1007                                           DB_REC_WIDTH_64B, DB_REC_KERNEL);
1008                 if (rc)
1009                         goto err2;
1010         }
1011
1012         DP_DEBUG(dev, QEDR_MSG_CQ,
1013                  "create cq: icid=0x%0x, addr=%p, size(entries)=0x%0x\n",
1014                  cq->icid, cq, params.cq_size);
1015
1016         return 0;
1017
1018 err2:
1019         destroy_iparams.icid = cq->icid;
1020         dev->ops->rdma_destroy_cq(dev->rdma_ctx, &destroy_iparams,
1021                                   &destroy_oparams);
1022 err1:
1023         if (udata) {
1024                 qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
1025                 ib_umem_release(cq->q.umem);
1026                 if (cq->q.db_mmap_entry)
1027                         rdma_user_mmap_entry_remove(cq->q.db_mmap_entry);
1028         } else {
1029                 dev->ops->common->chain_free(dev->cdev, &cq->pbl);
1030         }
1031 err0:
1032         return -EINVAL;
1033 }
1034
1035 int qedr_resize_cq(struct ib_cq *ibcq, int new_cnt, struct ib_udata *udata)
1036 {
1037         struct qedr_dev *dev = get_qedr_dev(ibcq->device);
1038         struct qedr_cq *cq = get_qedr_cq(ibcq);
1039
1040         DP_ERR(dev, "cq %p RESIZE NOT SUPPORTED\n", cq);
1041
1042         return 0;
1043 }
1044
1045 #define QEDR_DESTROY_CQ_MAX_ITERATIONS          (10)
1046 #define QEDR_DESTROY_CQ_ITER_DURATION           (10)
1047
1048 void qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
1049 {
1050         struct qedr_dev *dev = get_qedr_dev(ibcq->device);
1051         struct qed_rdma_destroy_cq_out_params oparams;
1052         struct qed_rdma_destroy_cq_in_params iparams;
1053         struct qedr_cq *cq = get_qedr_cq(ibcq);
1054         int iter;
1055
1056         DP_DEBUG(dev, QEDR_MSG_CQ, "destroy cq %p (icid=%d)\n", cq, cq->icid);
1057
1058         cq->destroyed = 1;
1059
1060         /* GSIs CQs are handled by driver, so they don't exist in the FW */
1061         if (cq->cq_type == QEDR_CQ_TYPE_GSI) {
1062                 qedr_db_recovery_del(dev, cq->db_addr, &cq->db.data);
1063                 return;
1064         }
1065
1066         iparams.icid = cq->icid;
1067         dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
1068         dev->ops->common->chain_free(dev->cdev, &cq->pbl);
1069
1070         if (udata) {
1071                 qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
1072                 ib_umem_release(cq->q.umem);
1073
1074                 if (cq->q.db_rec_data) {
1075                         qedr_db_recovery_del(dev, cq->q.db_addr,
1076                                              &cq->q.db_rec_data->db_data);
1077                         rdma_user_mmap_entry_remove(cq->q.db_mmap_entry);
1078                 }
1079         } else {
1080                 qedr_db_recovery_del(dev, cq->db_addr, &cq->db.data);
1081         }
1082
1083         /* We don't want the IRQ handler to handle a non-existing CQ so we
1084          * wait until all CNQ interrupts, if any, are received. This will always
1085          * happen and will always happen very fast. If not, then a serious error
1086          * has occured. That is why we can use a long delay.
1087          * We spin for a short time so we don’t lose time on context switching
1088          * in case all the completions are handled in that span. Otherwise
1089          * we sleep for a while and check again. Since the CNQ may be
1090          * associated with (only) the current CPU we use msleep to allow the
1091          * current CPU to be freed.
1092          * The CNQ notification is increased in qedr_irq_handler().
1093          */
1094         iter = QEDR_DESTROY_CQ_MAX_ITERATIONS;
1095         while (oparams.num_cq_notif != READ_ONCE(cq->cnq_notif) && iter) {
1096                 udelay(QEDR_DESTROY_CQ_ITER_DURATION);
1097                 iter--;
1098         }
1099
1100         iter = QEDR_DESTROY_CQ_MAX_ITERATIONS;
1101         while (oparams.num_cq_notif != READ_ONCE(cq->cnq_notif) && iter) {
1102                 msleep(QEDR_DESTROY_CQ_ITER_DURATION);
1103                 iter--;
1104         }
1105
1106         /* Note that we don't need to have explicit code to wait for the
1107          * completion of the event handler because it is invoked from the EQ.
1108          * Since the destroy CQ ramrod has also been received on the EQ we can
1109          * be certain that there's no event handler in process.
1110          */
1111 }
1112
1113 static inline int get_gid_info_from_table(struct ib_qp *ibqp,
1114                                           struct ib_qp_attr *attr,
1115                                           int attr_mask,
1116                                           struct qed_rdma_modify_qp_in_params
1117                                           *qp_params)
1118 {
1119         const struct ib_gid_attr *gid_attr;
1120         enum rdma_network_type nw_type;
1121         const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
1122         u32 ipv4_addr;
1123         int ret;
1124         int i;
1125
1126         gid_attr = grh->sgid_attr;
1127         ret = rdma_read_gid_l2_fields(gid_attr, &qp_params->vlan_id, NULL);
1128         if (ret)
1129                 return ret;
1130
1131         nw_type = rdma_gid_attr_network_type(gid_attr);
1132         switch (nw_type) {
1133         case RDMA_NETWORK_IPV6:
1134                 memcpy(&qp_params->sgid.bytes[0], &gid_attr->gid.raw[0],
1135                        sizeof(qp_params->sgid));
1136                 memcpy(&qp_params->dgid.bytes[0],
1137                        &grh->dgid,
1138                        sizeof(qp_params->dgid));
1139                 qp_params->roce_mode = ROCE_V2_IPV6;
1140                 SET_FIELD(qp_params->modify_flags,
1141                           QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
1142                 break;
1143         case RDMA_NETWORK_IB:
1144                 memcpy(&qp_params->sgid.bytes[0], &gid_attr->gid.raw[0],
1145                        sizeof(qp_params->sgid));
1146                 memcpy(&qp_params->dgid.bytes[0],
1147                        &grh->dgid,
1148                        sizeof(qp_params->dgid));
1149                 qp_params->roce_mode = ROCE_V1;
1150                 break;
1151         case RDMA_NETWORK_IPV4:
1152                 memset(&qp_params->sgid, 0, sizeof(qp_params->sgid));
1153                 memset(&qp_params->dgid, 0, sizeof(qp_params->dgid));
1154                 ipv4_addr = qedr_get_ipv4_from_gid(gid_attr->gid.raw);
1155                 qp_params->sgid.ipv4_addr = ipv4_addr;
1156                 ipv4_addr =
1157                     qedr_get_ipv4_from_gid(grh->dgid.raw);
1158                 qp_params->dgid.ipv4_addr = ipv4_addr;
1159                 SET_FIELD(qp_params->modify_flags,
1160                           QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
1161                 qp_params->roce_mode = ROCE_V2_IPV4;
1162                 break;
1163         }
1164
1165         for (i = 0; i < 4; i++) {
1166                 qp_params->sgid.dwords[i] = ntohl(qp_params->sgid.dwords[i]);
1167                 qp_params->dgid.dwords[i] = ntohl(qp_params->dgid.dwords[i]);
1168         }
1169
1170         if (qp_params->vlan_id >= VLAN_CFI_MASK)
1171                 qp_params->vlan_id = 0;
1172
1173         return 0;
1174 }
1175
1176 static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev,
1177                                struct ib_qp_init_attr *attrs,
1178                                struct ib_udata *udata)
1179 {
1180         struct qedr_device_attr *qattr = &dev->attr;
1181
1182         /* QP0... attrs->qp_type == IB_QPT_GSI */
1183         if (attrs->qp_type != IB_QPT_RC && attrs->qp_type != IB_QPT_GSI) {
1184                 DP_DEBUG(dev, QEDR_MSG_QP,
1185                          "create qp: unsupported qp type=0x%x requested\n",
1186                          attrs->qp_type);
1187                 return -EOPNOTSUPP;
1188         }
1189
1190         if (attrs->cap.max_send_wr > qattr->max_sqe) {
1191                 DP_ERR(dev,
1192                        "create qp: cannot create a SQ with %d elements (max_send_wr=0x%x)\n",
1193                        attrs->cap.max_send_wr, qattr->max_sqe);
1194                 return -EINVAL;
1195         }
1196
1197         if (attrs->cap.max_inline_data > qattr->max_inline) {
1198                 DP_ERR(dev,
1199                        "create qp: unsupported inline data size=0x%x requested (max_inline=0x%x)\n",
1200                        attrs->cap.max_inline_data, qattr->max_inline);
1201                 return -EINVAL;
1202         }
1203
1204         if (attrs->cap.max_send_sge > qattr->max_sge) {
1205                 DP_ERR(dev,
1206                        "create qp: unsupported send_sge=0x%x requested (max_send_sge=0x%x)\n",
1207                        attrs->cap.max_send_sge, qattr->max_sge);
1208                 return -EINVAL;
1209         }
1210
1211         if (attrs->cap.max_recv_sge > qattr->max_sge) {
1212                 DP_ERR(dev,
1213                        "create qp: unsupported recv_sge=0x%x requested (max_recv_sge=0x%x)\n",
1214                        attrs->cap.max_recv_sge, qattr->max_sge);
1215                 return -EINVAL;
1216         }
1217
1218         /* Unprivileged user space cannot create special QP */
1219         if (udata && attrs->qp_type == IB_QPT_GSI) {
1220                 DP_ERR(dev,
1221                        "create qp: userspace can't create special QPs of type=0x%x\n",
1222                        attrs->qp_type);
1223                 return -EINVAL;
1224         }
1225
1226         return 0;
1227 }
1228
1229 static int qedr_copy_srq_uresp(struct qedr_dev *dev,
1230                                struct qedr_srq *srq, struct ib_udata *udata)
1231 {
1232         struct qedr_create_srq_uresp uresp = {};
1233         int rc;
1234
1235         uresp.srq_id = srq->srq_id;
1236
1237         rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1238         if (rc)
1239                 DP_ERR(dev, "create srq: problem copying data to user space\n");
1240
1241         return rc;
1242 }
1243
1244 static void qedr_copy_rq_uresp(struct qedr_dev *dev,
1245                               struct qedr_create_qp_uresp *uresp,
1246                               struct qedr_qp *qp)
1247 {
1248         /* iWARP requires two doorbells per RQ. */
1249         if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
1250                 uresp->rq_db_offset =
1251                     DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD);
1252                 uresp->rq_db2_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_FLAGS);
1253         } else {
1254                 uresp->rq_db_offset =
1255                     DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
1256         }
1257
1258         uresp->rq_icid = qp->icid;
1259         if (qp->urq.db_mmap_entry)
1260                 uresp->rq_db_rec_addr =
1261                         rdma_user_mmap_get_offset(qp->urq.db_mmap_entry);
1262 }
1263
1264 static void qedr_copy_sq_uresp(struct qedr_dev *dev,
1265                                struct qedr_create_qp_uresp *uresp,
1266                                struct qedr_qp *qp)
1267 {
1268         uresp->sq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1269
1270         /* iWARP uses the same cid for rq and sq */
1271         if (rdma_protocol_iwarp(&dev->ibdev, 1))
1272                 uresp->sq_icid = qp->icid;
1273         else
1274                 uresp->sq_icid = qp->icid + 1;
1275
1276         if (qp->usq.db_mmap_entry)
1277                 uresp->sq_db_rec_addr =
1278                         rdma_user_mmap_get_offset(qp->usq.db_mmap_entry);
1279 }
1280
1281 static int qedr_copy_qp_uresp(struct qedr_dev *dev,
1282                               struct qedr_qp *qp, struct ib_udata *udata,
1283                               struct qedr_create_qp_uresp *uresp)
1284 {
1285         int rc;
1286
1287         memset(uresp, 0, sizeof(*uresp));
1288         qedr_copy_sq_uresp(dev, uresp, qp);
1289         qedr_copy_rq_uresp(dev, uresp, qp);
1290
1291         uresp->atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE;
1292         uresp->qp_id = qp->qp_id;
1293
1294         rc = qedr_ib_copy_to_udata(udata, uresp, sizeof(*uresp));
1295         if (rc)
1296                 DP_ERR(dev,
1297                        "create qp: failed a copy to user space with qp icid=0x%x.\n",
1298                        qp->icid);
1299
1300         return rc;
1301 }
1302
1303 static void qedr_set_common_qp_params(struct qedr_dev *dev,
1304                                       struct qedr_qp *qp,
1305                                       struct qedr_pd *pd,
1306                                       struct ib_qp_init_attr *attrs)
1307 {
1308         spin_lock_init(&qp->q_lock);
1309         if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
1310                 kref_init(&qp->refcnt);
1311                 init_completion(&qp->iwarp_cm_comp);
1312         }
1313         qp->pd = pd;
1314         qp->qp_type = attrs->qp_type;
1315         qp->max_inline_data = attrs->cap.max_inline_data;
1316         qp->sq.max_sges = attrs->cap.max_send_sge;
1317         qp->state = QED_ROCE_QP_STATE_RESET;
1318         qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
1319         qp->sq_cq = get_qedr_cq(attrs->send_cq);
1320         qp->dev = dev;
1321
1322         if (attrs->srq) {
1323                 qp->srq = get_qedr_srq(attrs->srq);
1324         } else {
1325                 qp->rq_cq = get_qedr_cq(attrs->recv_cq);
1326                 qp->rq.max_sges = attrs->cap.max_recv_sge;
1327                 DP_DEBUG(dev, QEDR_MSG_QP,
1328                          "RQ params:\trq_max_sges = %d, rq_cq_id = %d\n",
1329                          qp->rq.max_sges, qp->rq_cq->icid);
1330         }
1331
1332         DP_DEBUG(dev, QEDR_MSG_QP,
1333                  "QP params:\tpd = %d, qp_type = %d, max_inline_data = %d, state = %d, signaled = %d, use_srq=%d\n",
1334                  pd->pd_id, qp->qp_type, qp->max_inline_data,
1335                  qp->state, qp->signaled, (attrs->srq) ? 1 : 0);
1336         DP_DEBUG(dev, QEDR_MSG_QP,
1337                  "SQ params:\tsq_max_sges = %d, sq_cq_id = %d\n",
1338                  qp->sq.max_sges, qp->sq_cq->icid);
1339 }
1340
1341 static int qedr_set_roce_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
1342 {
1343         int rc;
1344
1345         qp->sq.db = dev->db_addr +
1346                     DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1347         qp->sq.db_data.data.icid = qp->icid + 1;
1348         rc = qedr_db_recovery_add(dev, qp->sq.db,
1349                                   &qp->sq.db_data,
1350                                   DB_REC_WIDTH_32B,
1351                                   DB_REC_KERNEL);
1352         if (rc)
1353                 return rc;
1354
1355         if (!qp->srq) {
1356                 qp->rq.db = dev->db_addr +
1357                             DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
1358                 qp->rq.db_data.data.icid = qp->icid;
1359
1360                 rc = qedr_db_recovery_add(dev, qp->rq.db,
1361                                           &qp->rq.db_data,
1362                                           DB_REC_WIDTH_32B,
1363                                           DB_REC_KERNEL);
1364                 if (rc)
1365                         qedr_db_recovery_del(dev, qp->sq.db,
1366                                              &qp->sq.db_data);
1367         }
1368
1369         return rc;
1370 }
1371
1372 static int qedr_check_srq_params(struct qedr_dev *dev,
1373                                  struct ib_srq_init_attr *attrs,
1374                                  struct ib_udata *udata)
1375 {
1376         struct qedr_device_attr *qattr = &dev->attr;
1377
1378         if (attrs->attr.max_wr > qattr->max_srq_wr) {
1379                 DP_ERR(dev,
1380                        "create srq: unsupported srq_wr=0x%x requested (max_srq_wr=0x%x)\n",
1381                        attrs->attr.max_wr, qattr->max_srq_wr);
1382                 return -EINVAL;
1383         }
1384
1385         if (attrs->attr.max_sge > qattr->max_sge) {
1386                 DP_ERR(dev,
1387                        "create srq: unsupported sge=0x%x requested (max_srq_sge=0x%x)\n",
1388                        attrs->attr.max_sge, qattr->max_sge);
1389                 return -EINVAL;
1390         }
1391
1392         return 0;
1393 }
1394
1395 static void qedr_free_srq_user_params(struct qedr_srq *srq)
1396 {
1397         qedr_free_pbl(srq->dev, &srq->usrq.pbl_info, srq->usrq.pbl_tbl);
1398         ib_umem_release(srq->usrq.umem);
1399         ib_umem_release(srq->prod_umem);
1400 }
1401
1402 static void qedr_free_srq_kernel_params(struct qedr_srq *srq)
1403 {
1404         struct qedr_srq_hwq_info *hw_srq = &srq->hw_srq;
1405         struct qedr_dev *dev = srq->dev;
1406
1407         dev->ops->common->chain_free(dev->cdev, &hw_srq->pbl);
1408
1409         dma_free_coherent(&dev->pdev->dev, sizeof(struct rdma_srq_producers),
1410                           hw_srq->virt_prod_pair_addr,
1411                           hw_srq->phy_prod_pair_addr);
1412 }
1413
1414 static int qedr_init_srq_user_params(struct ib_udata *udata,
1415                                      struct qedr_srq *srq,
1416                                      struct qedr_create_srq_ureq *ureq,
1417                                      int access)
1418 {
1419         struct scatterlist *sg;
1420         int rc;
1421
1422         rc = qedr_init_user_queue(udata, srq->dev, &srq->usrq, ureq->srq_addr,
1423                                   ureq->srq_len, false, access, 1);
1424         if (rc)
1425                 return rc;
1426
1427         srq->prod_umem = ib_umem_get(srq->ibsrq.device, ureq->prod_pair_addr,
1428                                      sizeof(struct rdma_srq_producers), access);
1429         if (IS_ERR(srq->prod_umem)) {
1430                 qedr_free_pbl(srq->dev, &srq->usrq.pbl_info, srq->usrq.pbl_tbl);
1431                 ib_umem_release(srq->usrq.umem);
1432                 DP_ERR(srq->dev,
1433                        "create srq: failed ib_umem_get for producer, got %ld\n",
1434                        PTR_ERR(srq->prod_umem));
1435                 return PTR_ERR(srq->prod_umem);
1436         }
1437
1438         sg = srq->prod_umem->sg_head.sgl;
1439         srq->hw_srq.phy_prod_pair_addr = sg_dma_address(sg);
1440
1441         return 0;
1442 }
1443
1444 static int qedr_alloc_srq_kernel_params(struct qedr_srq *srq,
1445                                         struct qedr_dev *dev,
1446                                         struct ib_srq_init_attr *init_attr)
1447 {
1448         struct qedr_srq_hwq_info *hw_srq = &srq->hw_srq;
1449         dma_addr_t phy_prod_pair_addr;
1450         u32 num_elems;
1451         void *va;
1452         int rc;
1453
1454         va = dma_alloc_coherent(&dev->pdev->dev,
1455                                 sizeof(struct rdma_srq_producers),
1456                                 &phy_prod_pair_addr, GFP_KERNEL);
1457         if (!va) {
1458                 DP_ERR(dev,
1459                        "create srq: failed to allocate dma memory for producer\n");
1460                 return -ENOMEM;
1461         }
1462
1463         hw_srq->phy_prod_pair_addr = phy_prod_pair_addr;
1464         hw_srq->virt_prod_pair_addr = va;
1465
1466         num_elems = init_attr->attr.max_wr * RDMA_MAX_SRQ_WQE_SIZE;
1467         rc = dev->ops->common->chain_alloc(dev->cdev,
1468                                            QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1469                                            QED_CHAIN_MODE_PBL,
1470                                            QED_CHAIN_CNT_TYPE_U32,
1471                                            num_elems,
1472                                            QEDR_SRQ_WQE_ELEM_SIZE,
1473                                            &hw_srq->pbl, NULL);
1474         if (rc)
1475                 goto err0;
1476
1477         hw_srq->num_elems = num_elems;
1478
1479         return 0;
1480
1481 err0:
1482         dma_free_coherent(&dev->pdev->dev, sizeof(struct rdma_srq_producers),
1483                           va, phy_prod_pair_addr);
1484         return rc;
1485 }
1486
1487 int qedr_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr,
1488                     struct ib_udata *udata)
1489 {
1490         struct qed_rdma_destroy_srq_in_params destroy_in_params;
1491         struct qed_rdma_create_srq_in_params in_params = {};
1492         struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
1493         struct qed_rdma_create_srq_out_params out_params;
1494         struct qedr_pd *pd = get_qedr_pd(ibsrq->pd);
1495         struct qedr_create_srq_ureq ureq = {};
1496         u64 pbl_base_addr, phy_prod_pair_addr;
1497         struct qedr_srq_hwq_info *hw_srq;
1498         u32 page_cnt, page_size;
1499         struct qedr_srq *srq = get_qedr_srq(ibsrq);
1500         int rc = 0;
1501
1502         DP_DEBUG(dev, QEDR_MSG_QP,
1503                  "create SRQ called from %s (pd %p)\n",
1504                  (udata) ? "User lib" : "kernel", pd);
1505
1506         rc = qedr_check_srq_params(dev, init_attr, udata);
1507         if (rc)
1508                 return -EINVAL;
1509
1510         srq->dev = dev;
1511         hw_srq = &srq->hw_srq;
1512         spin_lock_init(&srq->lock);
1513
1514         hw_srq->max_wr = init_attr->attr.max_wr;
1515         hw_srq->max_sges = init_attr->attr.max_sge;
1516
1517         if (udata) {
1518                 if (ib_copy_from_udata(&ureq, udata, min(sizeof(ureq),
1519                                                          udata->inlen))) {
1520                         DP_ERR(dev,
1521                                "create srq: problem copying data from user space\n");
1522                         goto err0;
1523                 }
1524
1525                 rc = qedr_init_srq_user_params(udata, srq, &ureq, 0);
1526                 if (rc)
1527                         goto err0;
1528
1529                 page_cnt = srq->usrq.pbl_info.num_pbes;
1530                 pbl_base_addr = srq->usrq.pbl_tbl->pa;
1531                 phy_prod_pair_addr = hw_srq->phy_prod_pair_addr;
1532                 page_size = PAGE_SIZE;
1533         } else {
1534                 struct qed_chain *pbl;
1535
1536                 rc = qedr_alloc_srq_kernel_params(srq, dev, init_attr);
1537                 if (rc)
1538                         goto err0;
1539
1540                 pbl = &hw_srq->pbl;
1541                 page_cnt = qed_chain_get_page_cnt(pbl);
1542                 pbl_base_addr = qed_chain_get_pbl_phys(pbl);
1543                 phy_prod_pair_addr = hw_srq->phy_prod_pair_addr;
1544                 page_size = QED_CHAIN_PAGE_SIZE;
1545         }
1546
1547         in_params.pd_id = pd->pd_id;
1548         in_params.pbl_base_addr = pbl_base_addr;
1549         in_params.prod_pair_addr = phy_prod_pair_addr;
1550         in_params.num_pages = page_cnt;
1551         in_params.page_size = page_size;
1552
1553         rc = dev->ops->rdma_create_srq(dev->rdma_ctx, &in_params, &out_params);
1554         if (rc)
1555                 goto err1;
1556
1557         srq->srq_id = out_params.srq_id;
1558
1559         if (udata) {
1560                 rc = qedr_copy_srq_uresp(dev, srq, udata);
1561                 if (rc)
1562                         goto err2;
1563         }
1564
1565         rc = xa_insert_irq(&dev->srqs, srq->srq_id, srq, GFP_KERNEL);
1566         if (rc)
1567                 goto err2;
1568
1569         DP_DEBUG(dev, QEDR_MSG_SRQ,
1570                  "create srq: created srq with srq_id=0x%0x\n", srq->srq_id);
1571         return 0;
1572
1573 err2:
1574         destroy_in_params.srq_id = srq->srq_id;
1575
1576         dev->ops->rdma_destroy_srq(dev->rdma_ctx, &destroy_in_params);
1577 err1:
1578         if (udata)
1579                 qedr_free_srq_user_params(srq);
1580         else
1581                 qedr_free_srq_kernel_params(srq);
1582 err0:
1583         return -EFAULT;
1584 }
1585
1586 void qedr_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
1587 {
1588         struct qed_rdma_destroy_srq_in_params in_params = {};
1589         struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
1590         struct qedr_srq *srq = get_qedr_srq(ibsrq);
1591
1592         xa_erase_irq(&dev->srqs, srq->srq_id);
1593         in_params.srq_id = srq->srq_id;
1594         dev->ops->rdma_destroy_srq(dev->rdma_ctx, &in_params);
1595
1596         if (ibsrq->uobject)
1597                 qedr_free_srq_user_params(srq);
1598         else
1599                 qedr_free_srq_kernel_params(srq);
1600
1601         DP_DEBUG(dev, QEDR_MSG_SRQ,
1602                  "destroy srq: destroyed srq with srq_id=0x%0x\n",
1603                  srq->srq_id);
1604 }
1605
1606 int qedr_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
1607                     enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
1608 {
1609         struct qed_rdma_modify_srq_in_params in_params = {};
1610         struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
1611         struct qedr_srq *srq = get_qedr_srq(ibsrq);
1612         int rc;
1613
1614         if (attr_mask & IB_SRQ_MAX_WR) {
1615                 DP_ERR(dev,
1616                        "modify srq: invalid attribute mask=0x%x specified for %p\n",
1617                        attr_mask, srq);
1618                 return -EINVAL;
1619         }
1620
1621         if (attr_mask & IB_SRQ_LIMIT) {
1622                 if (attr->srq_limit >= srq->hw_srq.max_wr) {
1623                         DP_ERR(dev,
1624                                "modify srq: invalid srq_limit=0x%x (max_srq_limit=0x%x)\n",
1625                                attr->srq_limit, srq->hw_srq.max_wr);
1626                         return -EINVAL;
1627                 }
1628
1629                 in_params.srq_id = srq->srq_id;
1630                 in_params.wqe_limit = attr->srq_limit;
1631                 rc = dev->ops->rdma_modify_srq(dev->rdma_ctx, &in_params);
1632                 if (rc)
1633                         return rc;
1634         }
1635
1636         srq->srq_limit = attr->srq_limit;
1637
1638         DP_DEBUG(dev, QEDR_MSG_SRQ,
1639                  "modify srq: modified srq with srq_id=0x%0x\n", srq->srq_id);
1640
1641         return 0;
1642 }
1643
1644 static inline void
1645 qedr_init_common_qp_in_params(struct qedr_dev *dev,
1646                               struct qedr_pd *pd,
1647                               struct qedr_qp *qp,
1648                               struct ib_qp_init_attr *attrs,
1649                               bool fmr_and_reserved_lkey,
1650                               struct qed_rdma_create_qp_in_params *params)
1651 {
1652         /* QP handle to be written in an async event */
1653         params->qp_handle_async_lo = lower_32_bits((uintptr_t) qp);
1654         params->qp_handle_async_hi = upper_32_bits((uintptr_t) qp);
1655
1656         params->signal_all = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR);
1657         params->fmr_and_reserved_lkey = fmr_and_reserved_lkey;
1658         params->pd = pd->pd_id;
1659         params->dpi = pd->uctx ? pd->uctx->dpi : dev->dpi;
1660         params->sq_cq_id = get_qedr_cq(attrs->send_cq)->icid;
1661         params->stats_queue = 0;
1662         params->srq_id = 0;
1663         params->use_srq = false;
1664
1665         if (!qp->srq) {
1666                 params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
1667
1668         } else {
1669                 params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
1670                 params->srq_id = qp->srq->srq_id;
1671                 params->use_srq = true;
1672         }
1673 }
1674
1675 static inline void qedr_qp_user_print(struct qedr_dev *dev, struct qedr_qp *qp)
1676 {
1677         DP_DEBUG(dev, QEDR_MSG_QP, "create qp: successfully created user QP. "
1678                  "qp=%p. "
1679                  "sq_addr=0x%llx, "
1680                  "sq_len=%zd, "
1681                  "rq_addr=0x%llx, "
1682                  "rq_len=%zd"
1683                  "\n",
1684                  qp,
1685                  qp->usq.buf_addr,
1686                  qp->usq.buf_len, qp->urq.buf_addr, qp->urq.buf_len);
1687 }
1688
1689 static inline void
1690 qedr_iwarp_populate_user_qp(struct qedr_dev *dev,
1691                             struct qedr_qp *qp,
1692                             struct qed_rdma_create_qp_out_params *out_params)
1693 {
1694         qp->usq.pbl_tbl->va = out_params->sq_pbl_virt;
1695         qp->usq.pbl_tbl->pa = out_params->sq_pbl_phys;
1696
1697         qedr_populate_pbls(dev, qp->usq.umem, qp->usq.pbl_tbl,
1698                            &qp->usq.pbl_info, FW_PAGE_SHIFT);
1699         if (!qp->srq) {
1700                 qp->urq.pbl_tbl->va = out_params->rq_pbl_virt;
1701                 qp->urq.pbl_tbl->pa = out_params->rq_pbl_phys;
1702         }
1703
1704         qedr_populate_pbls(dev, qp->urq.umem, qp->urq.pbl_tbl,
1705                            &qp->urq.pbl_info, FW_PAGE_SHIFT);
1706 }
1707
1708 static void qedr_cleanup_user(struct qedr_dev *dev,
1709                               struct qedr_ucontext *ctx,
1710                               struct qedr_qp *qp)
1711 {
1712         ib_umem_release(qp->usq.umem);
1713         qp->usq.umem = NULL;
1714
1715         ib_umem_release(qp->urq.umem);
1716         qp->urq.umem = NULL;
1717
1718         if (rdma_protocol_roce(&dev->ibdev, 1)) {
1719                 qedr_free_pbl(dev, &qp->usq.pbl_info, qp->usq.pbl_tbl);
1720                 qedr_free_pbl(dev, &qp->urq.pbl_info, qp->urq.pbl_tbl);
1721         } else {
1722                 kfree(qp->usq.pbl_tbl);
1723                 kfree(qp->urq.pbl_tbl);
1724         }
1725
1726         if (qp->usq.db_rec_data) {
1727                 qedr_db_recovery_del(dev, qp->usq.db_addr,
1728                                      &qp->usq.db_rec_data->db_data);
1729                 rdma_user_mmap_entry_remove(qp->usq.db_mmap_entry);
1730         }
1731
1732         if (qp->urq.db_rec_data) {
1733                 qedr_db_recovery_del(dev, qp->urq.db_addr,
1734                                      &qp->urq.db_rec_data->db_data);
1735                 rdma_user_mmap_entry_remove(qp->urq.db_mmap_entry);
1736         }
1737
1738         if (rdma_protocol_iwarp(&dev->ibdev, 1))
1739                 qedr_db_recovery_del(dev, qp->urq.db_rec_db2_addr,
1740                                      &qp->urq.db_rec_db2_data);
1741 }
1742
1743 static int qedr_create_user_qp(struct qedr_dev *dev,
1744                                struct qedr_qp *qp,
1745                                struct ib_pd *ibpd,
1746                                struct ib_udata *udata,
1747                                struct ib_qp_init_attr *attrs)
1748 {
1749         struct qed_rdma_create_qp_in_params in_params;
1750         struct qed_rdma_create_qp_out_params out_params;
1751         struct qedr_pd *pd = get_qedr_pd(ibpd);
1752         struct qedr_create_qp_uresp uresp;
1753         struct qedr_ucontext *ctx = NULL;
1754         struct qedr_create_qp_ureq ureq;
1755         int alloc_and_init = rdma_protocol_roce(&dev->ibdev, 1);
1756         int rc = -EINVAL;
1757
1758         qp->create_type = QEDR_QP_CREATE_USER;
1759         memset(&ureq, 0, sizeof(ureq));
1760         rc = ib_copy_from_udata(&ureq, udata, min(sizeof(ureq), udata->inlen));
1761         if (rc) {
1762                 DP_ERR(dev, "Problem copying data from user space\n");
1763                 return rc;
1764         }
1765
1766         /* SQ - read access only (0) */
1767         rc = qedr_init_user_queue(udata, dev, &qp->usq, ureq.sq_addr,
1768                                   ureq.sq_len, true, 0, alloc_and_init);
1769         if (rc)
1770                 return rc;
1771
1772         if (!qp->srq) {
1773                 /* RQ - read access only (0) */
1774                 rc = qedr_init_user_queue(udata, dev, &qp->urq, ureq.rq_addr,
1775                                           ureq.rq_len, true, 0, alloc_and_init);
1776                 if (rc)
1777                         return rc;
1778         }
1779
1780         memset(&in_params, 0, sizeof(in_params));
1781         qedr_init_common_qp_in_params(dev, pd, qp, attrs, false, &in_params);
1782         in_params.qp_handle_lo = ureq.qp_handle_lo;
1783         in_params.qp_handle_hi = ureq.qp_handle_hi;
1784         in_params.sq_num_pages = qp->usq.pbl_info.num_pbes;
1785         in_params.sq_pbl_ptr = qp->usq.pbl_tbl->pa;
1786         if (!qp->srq) {
1787                 in_params.rq_num_pages = qp->urq.pbl_info.num_pbes;
1788                 in_params.rq_pbl_ptr = qp->urq.pbl_tbl->pa;
1789         }
1790
1791         qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
1792                                               &in_params, &out_params);
1793
1794         if (!qp->qed_qp) {
1795                 rc = -ENOMEM;
1796                 goto err1;
1797         }
1798
1799         if (rdma_protocol_iwarp(&dev->ibdev, 1))
1800                 qedr_iwarp_populate_user_qp(dev, qp, &out_params);
1801
1802         qp->qp_id = out_params.qp_id;
1803         qp->icid = out_params.icid;
1804
1805         rc = qedr_copy_qp_uresp(dev, qp, udata, &uresp);
1806         if (rc)
1807                 goto err;
1808
1809         /* db offset was calculated in copy_qp_uresp, now set in the user q */
1810         ctx = pd->uctx;
1811         qp->usq.db_addr = ctx->dpi_addr + uresp.sq_db_offset;
1812         qp->urq.db_addr = ctx->dpi_addr + uresp.rq_db_offset;
1813
1814         if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
1815                 qp->urq.db_rec_db2_addr = ctx->dpi_addr + uresp.rq_db2_offset;
1816
1817                 /* calculate the db_rec_db2 data since it is constant so no
1818                  *  need to reflect from user
1819                  */
1820                 qp->urq.db_rec_db2_data.data.icid = cpu_to_le16(qp->icid);
1821                 qp->urq.db_rec_db2_data.data.value =
1822                         cpu_to_le16(DQ_TCM_IWARP_POST_RQ_CF_CMD);
1823         }
1824
1825         rc = qedr_db_recovery_add(dev, qp->usq.db_addr,
1826                                   &qp->usq.db_rec_data->db_data,
1827                                   DB_REC_WIDTH_32B,
1828                                   DB_REC_USER);
1829         if (rc)
1830                 goto err;
1831
1832         rc = qedr_db_recovery_add(dev, qp->urq.db_addr,
1833                                   &qp->urq.db_rec_data->db_data,
1834                                   DB_REC_WIDTH_32B,
1835                                   DB_REC_USER);
1836         if (rc)
1837                 goto err;
1838
1839         if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
1840                 rc = qedr_db_recovery_add(dev, qp->urq.db_rec_db2_addr,
1841                                           &qp->urq.db_rec_db2_data,
1842                                           DB_REC_WIDTH_32B,
1843                                           DB_REC_USER);
1844                 if (rc)
1845                         goto err;
1846         }
1847         qedr_qp_user_print(dev, qp);
1848
1849         return rc;
1850 err:
1851         rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
1852         if (rc)
1853                 DP_ERR(dev, "create qp: fatal fault. rc=%d", rc);
1854
1855 err1:
1856         qedr_cleanup_user(dev, ctx, qp);
1857         return rc;
1858 }
1859
1860 static int qedr_set_iwarp_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
1861 {
1862         int rc;
1863
1864         qp->sq.db = dev->db_addr +
1865             DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1866         qp->sq.db_data.data.icid = qp->icid;
1867
1868         rc = qedr_db_recovery_add(dev, qp->sq.db,
1869                                   &qp->sq.db_data,
1870                                   DB_REC_WIDTH_32B,
1871                                   DB_REC_KERNEL);
1872         if (rc)
1873                 return rc;
1874
1875         qp->rq.db = dev->db_addr +
1876                     DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD);
1877         qp->rq.db_data.data.icid = qp->icid;
1878         qp->rq.iwarp_db2 = dev->db_addr +
1879                            DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_FLAGS);
1880         qp->rq.iwarp_db2_data.data.icid = qp->icid;
1881         qp->rq.iwarp_db2_data.data.value = DQ_TCM_IWARP_POST_RQ_CF_CMD;
1882
1883         rc = qedr_db_recovery_add(dev, qp->rq.db,
1884                                   &qp->rq.db_data,
1885                                   DB_REC_WIDTH_32B,
1886                                   DB_REC_KERNEL);
1887         if (rc)
1888                 return rc;
1889
1890         rc = qedr_db_recovery_add(dev, qp->rq.iwarp_db2,
1891                                   &qp->rq.iwarp_db2_data,
1892                                   DB_REC_WIDTH_32B,
1893                                   DB_REC_KERNEL);
1894         return rc;
1895 }
1896
1897 static int
1898 qedr_roce_create_kernel_qp(struct qedr_dev *dev,
1899                            struct qedr_qp *qp,
1900                            struct qed_rdma_create_qp_in_params *in_params,
1901                            u32 n_sq_elems, u32 n_rq_elems)
1902 {
1903         struct qed_rdma_create_qp_out_params out_params;
1904         int rc;
1905
1906         rc = dev->ops->common->chain_alloc(dev->cdev,
1907                                            QED_CHAIN_USE_TO_PRODUCE,
1908                                            QED_CHAIN_MODE_PBL,
1909                                            QED_CHAIN_CNT_TYPE_U32,
1910                                            n_sq_elems,
1911                                            QEDR_SQE_ELEMENT_SIZE,
1912                                            &qp->sq.pbl, NULL);
1913
1914         if (rc)
1915                 return rc;
1916
1917         in_params->sq_num_pages = qed_chain_get_page_cnt(&qp->sq.pbl);
1918         in_params->sq_pbl_ptr = qed_chain_get_pbl_phys(&qp->sq.pbl);
1919
1920         rc = dev->ops->common->chain_alloc(dev->cdev,
1921                                            QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1922                                            QED_CHAIN_MODE_PBL,
1923                                            QED_CHAIN_CNT_TYPE_U32,
1924                                            n_rq_elems,
1925                                            QEDR_RQE_ELEMENT_SIZE,
1926                                            &qp->rq.pbl, NULL);
1927         if (rc)
1928                 return rc;
1929
1930         in_params->rq_num_pages = qed_chain_get_page_cnt(&qp->rq.pbl);
1931         in_params->rq_pbl_ptr = qed_chain_get_pbl_phys(&qp->rq.pbl);
1932
1933         qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
1934                                               in_params, &out_params);
1935
1936         if (!qp->qed_qp)
1937                 return -EINVAL;
1938
1939         qp->qp_id = out_params.qp_id;
1940         qp->icid = out_params.icid;
1941
1942         return qedr_set_roce_db_info(dev, qp);
1943 }
1944
1945 static int
1946 qedr_iwarp_create_kernel_qp(struct qedr_dev *dev,
1947                             struct qedr_qp *qp,
1948                             struct qed_rdma_create_qp_in_params *in_params,
1949                             u32 n_sq_elems, u32 n_rq_elems)
1950 {
1951         struct qed_rdma_create_qp_out_params out_params;
1952         struct qed_chain_ext_pbl ext_pbl;
1953         int rc;
1954
1955         in_params->sq_num_pages = QED_CHAIN_PAGE_CNT(n_sq_elems,
1956                                                      QEDR_SQE_ELEMENT_SIZE,
1957                                                      QED_CHAIN_MODE_PBL);
1958         in_params->rq_num_pages = QED_CHAIN_PAGE_CNT(n_rq_elems,
1959                                                      QEDR_RQE_ELEMENT_SIZE,
1960                                                      QED_CHAIN_MODE_PBL);
1961
1962         qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
1963                                               in_params, &out_params);
1964
1965         if (!qp->qed_qp)
1966                 return -EINVAL;
1967
1968         /* Now we allocate the chain */
1969         ext_pbl.p_pbl_virt = out_params.sq_pbl_virt;
1970         ext_pbl.p_pbl_phys = out_params.sq_pbl_phys;
1971
1972         rc = dev->ops->common->chain_alloc(dev->cdev,
1973                                            QED_CHAIN_USE_TO_PRODUCE,
1974                                            QED_CHAIN_MODE_PBL,
1975                                            QED_CHAIN_CNT_TYPE_U32,
1976                                            n_sq_elems,
1977                                            QEDR_SQE_ELEMENT_SIZE,
1978                                            &qp->sq.pbl, &ext_pbl);
1979
1980         if (rc)
1981                 goto err;
1982
1983         ext_pbl.p_pbl_virt = out_params.rq_pbl_virt;
1984         ext_pbl.p_pbl_phys = out_params.rq_pbl_phys;
1985
1986         rc = dev->ops->common->chain_alloc(dev->cdev,
1987                                            QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1988                                            QED_CHAIN_MODE_PBL,
1989                                            QED_CHAIN_CNT_TYPE_U32,
1990                                            n_rq_elems,
1991                                            QEDR_RQE_ELEMENT_SIZE,
1992                                            &qp->rq.pbl, &ext_pbl);
1993
1994         if (rc)
1995                 goto err;
1996
1997         qp->qp_id = out_params.qp_id;
1998         qp->icid = out_params.icid;
1999
2000         return qedr_set_iwarp_db_info(dev, qp);
2001
2002 err:
2003         dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
2004
2005         return rc;
2006 }
2007
2008 static void qedr_cleanup_kernel(struct qedr_dev *dev, struct qedr_qp *qp)
2009 {
2010         dev->ops->common->chain_free(dev->cdev, &qp->sq.pbl);
2011         kfree(qp->wqe_wr_id);
2012
2013         dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl);
2014         kfree(qp->rqe_wr_id);
2015
2016         /* GSI qp is not registered to db mechanism so no need to delete */
2017         if (qp->qp_type == IB_QPT_GSI)
2018                 return;
2019
2020         qedr_db_recovery_del(dev, qp->sq.db, &qp->sq.db_data);
2021
2022         if (!qp->srq) {
2023                 qedr_db_recovery_del(dev, qp->rq.db, &qp->rq.db_data);
2024
2025                 if (rdma_protocol_iwarp(&dev->ibdev, 1))
2026                         qedr_db_recovery_del(dev, qp->rq.iwarp_db2,
2027                                              &qp->rq.iwarp_db2_data);
2028         }
2029 }
2030
2031 static int qedr_create_kernel_qp(struct qedr_dev *dev,
2032                                  struct qedr_qp *qp,
2033                                  struct ib_pd *ibpd,
2034                                  struct ib_qp_init_attr *attrs)
2035 {
2036         struct qed_rdma_create_qp_in_params in_params;
2037         struct qedr_pd *pd = get_qedr_pd(ibpd);
2038         int rc = -EINVAL;
2039         u32 n_rq_elems;
2040         u32 n_sq_elems;
2041         u32 n_sq_entries;
2042
2043         memset(&in_params, 0, sizeof(in_params));
2044         qp->create_type = QEDR_QP_CREATE_KERNEL;
2045
2046         /* A single work request may take up to QEDR_MAX_SQ_WQE_SIZE elements in
2047          * the ring. The ring should allow at least a single WR, even if the
2048          * user requested none, due to allocation issues.
2049          * We should add an extra WR since the prod and cons indices of
2050          * wqe_wr_id are managed in such a way that the WQ is considered full
2051          * when (prod+1)%max_wr==cons. We currently don't do that because we
2052          * double the number of entries due an iSER issue that pushes far more
2053          * WRs than indicated. If we decline its ib_post_send() then we get
2054          * error prints in the dmesg we'd like to avoid.
2055          */
2056         qp->sq.max_wr = min_t(u32, attrs->cap.max_send_wr * dev->wq_multiplier,
2057                               dev->attr.max_sqe);
2058
2059         qp->wqe_wr_id = kcalloc(qp->sq.max_wr, sizeof(*qp->wqe_wr_id),
2060                                 GFP_KERNEL);
2061         if (!qp->wqe_wr_id) {
2062                 DP_ERR(dev, "create qp: failed SQ shadow memory allocation\n");
2063                 return -ENOMEM;
2064         }
2065
2066         /* QP handle to be written in CQE */
2067         in_params.qp_handle_lo = lower_32_bits((uintptr_t) qp);
2068         in_params.qp_handle_hi = upper_32_bits((uintptr_t) qp);
2069
2070         /* A single work request may take up to QEDR_MAX_RQ_WQE_SIZE elements in
2071          * the ring. There ring should allow at least a single WR, even if the
2072          * user requested none, due to allocation issues.
2073          */
2074         qp->rq.max_wr = (u16) max_t(u32, attrs->cap.max_recv_wr, 1);
2075
2076         /* Allocate driver internal RQ array */
2077         qp->rqe_wr_id = kcalloc(qp->rq.max_wr, sizeof(*qp->rqe_wr_id),
2078                                 GFP_KERNEL);
2079         if (!qp->rqe_wr_id) {
2080                 DP_ERR(dev,
2081                        "create qp: failed RQ shadow memory allocation\n");
2082                 kfree(qp->wqe_wr_id);
2083                 return -ENOMEM;
2084         }
2085
2086         qedr_init_common_qp_in_params(dev, pd, qp, attrs, true, &in_params);
2087
2088         n_sq_entries = attrs->cap.max_send_wr;
2089         n_sq_entries = min_t(u32, n_sq_entries, dev->attr.max_sqe);
2090         n_sq_entries = max_t(u32, n_sq_entries, 1);
2091         n_sq_elems = n_sq_entries * QEDR_MAX_SQE_ELEMENTS_PER_SQE;
2092
2093         n_rq_elems = qp->rq.max_wr * QEDR_MAX_RQE_ELEMENTS_PER_RQE;
2094
2095         if (rdma_protocol_iwarp(&dev->ibdev, 1))
2096                 rc = qedr_iwarp_create_kernel_qp(dev, qp, &in_params,
2097                                                  n_sq_elems, n_rq_elems);
2098         else
2099                 rc = qedr_roce_create_kernel_qp(dev, qp, &in_params,
2100                                                 n_sq_elems, n_rq_elems);
2101         if (rc)
2102                 qedr_cleanup_kernel(dev, qp);
2103
2104         return rc;
2105 }
2106
2107 struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
2108                              struct ib_qp_init_attr *attrs,
2109                              struct ib_udata *udata)
2110 {
2111         struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2112         struct qedr_pd *pd = get_qedr_pd(ibpd);
2113         struct qedr_qp *qp;
2114         struct ib_qp *ibqp;
2115         int rc = 0;
2116
2117         DP_DEBUG(dev, QEDR_MSG_QP, "create qp: called from %s, pd=%p\n",
2118                  udata ? "user library" : "kernel", pd);
2119
2120         rc = qedr_check_qp_attrs(ibpd, dev, attrs, udata);
2121         if (rc)
2122                 return ERR_PTR(rc);
2123
2124         DP_DEBUG(dev, QEDR_MSG_QP,
2125                  "create qp: called from %s, event_handler=%p, eepd=%p sq_cq=%p, sq_icid=%d, rq_cq=%p, rq_icid=%d\n",
2126                  udata ? "user library" : "kernel", attrs->event_handler, pd,
2127                  get_qedr_cq(attrs->send_cq),
2128                  get_qedr_cq(attrs->send_cq)->icid,
2129                  get_qedr_cq(attrs->recv_cq),
2130                  attrs->recv_cq ? get_qedr_cq(attrs->recv_cq)->icid : 0);
2131
2132         qp = kzalloc(sizeof(*qp), GFP_KERNEL);
2133         if (!qp) {
2134                 DP_ERR(dev, "create qp: failed allocating memory\n");
2135                 return ERR_PTR(-ENOMEM);
2136         }
2137
2138         qedr_set_common_qp_params(dev, qp, pd, attrs);
2139
2140         if (attrs->qp_type == IB_QPT_GSI) {
2141                 ibqp = qedr_create_gsi_qp(dev, attrs, qp);
2142                 if (IS_ERR(ibqp))
2143                         kfree(qp);
2144                 return ibqp;
2145         }
2146
2147         if (udata)
2148                 rc = qedr_create_user_qp(dev, qp, ibpd, udata, attrs);
2149         else
2150                 rc = qedr_create_kernel_qp(dev, qp, ibpd, attrs);
2151
2152         if (rc)
2153                 goto err;
2154
2155         qp->ibqp.qp_num = qp->qp_id;
2156
2157         if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
2158                 rc = xa_insert(&dev->qps, qp->qp_id, qp, GFP_KERNEL);
2159                 if (rc)
2160                         goto err;
2161         }
2162
2163         return &qp->ibqp;
2164
2165 err:
2166         kfree(qp);
2167
2168         return ERR_PTR(-EFAULT);
2169 }
2170
2171 static enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
2172 {
2173         switch (qp_state) {
2174         case QED_ROCE_QP_STATE_RESET:
2175                 return IB_QPS_RESET;
2176         case QED_ROCE_QP_STATE_INIT:
2177                 return IB_QPS_INIT;
2178         case QED_ROCE_QP_STATE_RTR:
2179                 return IB_QPS_RTR;
2180         case QED_ROCE_QP_STATE_RTS:
2181                 return IB_QPS_RTS;
2182         case QED_ROCE_QP_STATE_SQD:
2183                 return IB_QPS_SQD;
2184         case QED_ROCE_QP_STATE_ERR:
2185                 return IB_QPS_ERR;
2186         case QED_ROCE_QP_STATE_SQE:
2187                 return IB_QPS_SQE;
2188         }
2189         return IB_QPS_ERR;
2190 }
2191
2192 static enum qed_roce_qp_state qedr_get_state_from_ibqp(
2193                                         enum ib_qp_state qp_state)
2194 {
2195         switch (qp_state) {
2196         case IB_QPS_RESET:
2197                 return QED_ROCE_QP_STATE_RESET;
2198         case IB_QPS_INIT:
2199                 return QED_ROCE_QP_STATE_INIT;
2200         case IB_QPS_RTR:
2201                 return QED_ROCE_QP_STATE_RTR;
2202         case IB_QPS_RTS:
2203                 return QED_ROCE_QP_STATE_RTS;
2204         case IB_QPS_SQD:
2205                 return QED_ROCE_QP_STATE_SQD;
2206         case IB_QPS_ERR:
2207                 return QED_ROCE_QP_STATE_ERR;
2208         default:
2209                 return QED_ROCE_QP_STATE_ERR;
2210         }
2211 }
2212
2213 static void qedr_reset_qp_hwq_info(struct qedr_qp_hwq_info *qph)
2214 {
2215         qed_chain_reset(&qph->pbl);
2216         qph->prod = 0;
2217         qph->cons = 0;
2218         qph->wqe_cons = 0;
2219         qph->db_data.data.value = cpu_to_le16(0);
2220 }
2221
2222 static int qedr_update_qp_state(struct qedr_dev *dev,
2223                                 struct qedr_qp *qp,
2224                                 enum qed_roce_qp_state cur_state,
2225                                 enum qed_roce_qp_state new_state)
2226 {
2227         int status = 0;
2228
2229         if (new_state == cur_state)
2230                 return 0;
2231
2232         switch (cur_state) {
2233         case QED_ROCE_QP_STATE_RESET:
2234                 switch (new_state) {
2235                 case QED_ROCE_QP_STATE_INIT:
2236                         qp->prev_wqe_size = 0;
2237                         qedr_reset_qp_hwq_info(&qp->sq);
2238                         qedr_reset_qp_hwq_info(&qp->rq);
2239                         break;
2240                 default:
2241                         status = -EINVAL;
2242                         break;
2243                 }
2244                 break;
2245         case QED_ROCE_QP_STATE_INIT:
2246                 switch (new_state) {
2247                 case QED_ROCE_QP_STATE_RTR:
2248                         /* Update doorbell (in case post_recv was
2249                          * done before move to RTR)
2250                          */
2251
2252                         if (rdma_protocol_roce(&dev->ibdev, 1)) {
2253                                 writel(qp->rq.db_data.raw, qp->rq.db);
2254                         }
2255                         break;
2256                 case QED_ROCE_QP_STATE_ERR:
2257                         break;
2258                 default:
2259                         /* Invalid state change. */
2260                         status = -EINVAL;
2261                         break;
2262                 }
2263                 break;
2264         case QED_ROCE_QP_STATE_RTR:
2265                 /* RTR->XXX */
2266                 switch (new_state) {
2267                 case QED_ROCE_QP_STATE_RTS:
2268                         break;
2269                 case QED_ROCE_QP_STATE_ERR:
2270                         break;
2271                 default:
2272                         /* Invalid state change. */
2273                         status = -EINVAL;
2274                         break;
2275                 }
2276                 break;
2277         case QED_ROCE_QP_STATE_RTS:
2278                 /* RTS->XXX */
2279                 switch (new_state) {
2280                 case QED_ROCE_QP_STATE_SQD:
2281                         break;
2282                 case QED_ROCE_QP_STATE_ERR:
2283                         break;
2284                 default:
2285                         /* Invalid state change. */
2286                         status = -EINVAL;
2287                         break;
2288                 }
2289                 break;
2290         case QED_ROCE_QP_STATE_SQD:
2291                 /* SQD->XXX */
2292                 switch (new_state) {
2293                 case QED_ROCE_QP_STATE_RTS:
2294                 case QED_ROCE_QP_STATE_ERR:
2295                         break;
2296                 default:
2297                         /* Invalid state change. */
2298                         status = -EINVAL;
2299                         break;
2300                 }
2301                 break;
2302         case QED_ROCE_QP_STATE_ERR:
2303                 /* ERR->XXX */
2304                 switch (new_state) {
2305                 case QED_ROCE_QP_STATE_RESET:
2306                         if ((qp->rq.prod != qp->rq.cons) ||
2307                             (qp->sq.prod != qp->sq.cons)) {
2308                                 DP_NOTICE(dev,
2309                                           "Error->Reset with rq/sq not empty rq.prod=%x rq.cons=%x sq.prod=%x sq.cons=%x\n",
2310                                           qp->rq.prod, qp->rq.cons, qp->sq.prod,
2311                                           qp->sq.cons);
2312                                 status = -EINVAL;
2313                         }
2314                         break;
2315                 default:
2316                         status = -EINVAL;
2317                         break;
2318                 }
2319                 break;
2320         default:
2321                 status = -EINVAL;
2322                 break;
2323         }
2324
2325         return status;
2326 }
2327
2328 int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
2329                    int attr_mask, struct ib_udata *udata)
2330 {
2331         struct qedr_qp *qp = get_qedr_qp(ibqp);
2332         struct qed_rdma_modify_qp_in_params qp_params = { 0 };
2333         struct qedr_dev *dev = get_qedr_dev(&qp->dev->ibdev);
2334         const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
2335         enum ib_qp_state old_qp_state, new_qp_state;
2336         enum qed_roce_qp_state cur_state;
2337         int rc = 0;
2338
2339         DP_DEBUG(dev, QEDR_MSG_QP,
2340                  "modify qp: qp %p attr_mask=0x%x, state=%d", qp, attr_mask,
2341                  attr->qp_state);
2342
2343         old_qp_state = qedr_get_ibqp_state(qp->state);
2344         if (attr_mask & IB_QP_STATE)
2345                 new_qp_state = attr->qp_state;
2346         else
2347                 new_qp_state = old_qp_state;
2348
2349         if (rdma_protocol_roce(&dev->ibdev, 1)) {
2350                 if (!ib_modify_qp_is_ok(old_qp_state, new_qp_state,
2351                                         ibqp->qp_type, attr_mask)) {
2352                         DP_ERR(dev,
2353                                "modify qp: invalid attribute mask=0x%x specified for\n"
2354                                "qpn=0x%x of type=0x%x old_qp_state=0x%x, new_qp_state=0x%x\n",
2355                                attr_mask, qp->qp_id, ibqp->qp_type,
2356                                old_qp_state, new_qp_state);
2357                         rc = -EINVAL;
2358                         goto err;
2359                 }
2360         }
2361
2362         /* Translate the masks... */
2363         if (attr_mask & IB_QP_STATE) {
2364                 SET_FIELD(qp_params.modify_flags,
2365                           QED_RDMA_MODIFY_QP_VALID_NEW_STATE, 1);
2366                 qp_params.new_state = qedr_get_state_from_ibqp(attr->qp_state);
2367         }
2368
2369         if (attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
2370                 qp_params.sqd_async = true;
2371
2372         if (attr_mask & IB_QP_PKEY_INDEX) {
2373                 SET_FIELD(qp_params.modify_flags,
2374                           QED_ROCE_MODIFY_QP_VALID_PKEY, 1);
2375                 if (attr->pkey_index >= QEDR_ROCE_PKEY_TABLE_LEN) {
2376                         rc = -EINVAL;
2377                         goto err;
2378                 }
2379
2380                 qp_params.pkey = QEDR_ROCE_PKEY_DEFAULT;
2381         }
2382
2383         if (attr_mask & IB_QP_QKEY)
2384                 qp->qkey = attr->qkey;
2385
2386         if (attr_mask & IB_QP_ACCESS_FLAGS) {
2387                 SET_FIELD(qp_params.modify_flags,
2388                           QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN, 1);
2389                 qp_params.incoming_rdma_read_en = attr->qp_access_flags &
2390                                                   IB_ACCESS_REMOTE_READ;
2391                 qp_params.incoming_rdma_write_en = attr->qp_access_flags &
2392                                                    IB_ACCESS_REMOTE_WRITE;
2393                 qp_params.incoming_atomic_en = attr->qp_access_flags &
2394                                                IB_ACCESS_REMOTE_ATOMIC;
2395         }
2396
2397         if (attr_mask & (IB_QP_AV | IB_QP_PATH_MTU)) {
2398                 if (rdma_protocol_iwarp(&dev->ibdev, 1))
2399                         return -EINVAL;
2400
2401                 if (attr_mask & IB_QP_PATH_MTU) {
2402                         if (attr->path_mtu < IB_MTU_256 ||
2403                             attr->path_mtu > IB_MTU_4096) {
2404                                 pr_err("error: Only MTU sizes of 256, 512, 1024, 2048 and 4096 are supported by RoCE\n");
2405                                 rc = -EINVAL;
2406                                 goto err;
2407                         }
2408                         qp->mtu = min(ib_mtu_enum_to_int(attr->path_mtu),
2409                                       ib_mtu_enum_to_int(iboe_get_mtu
2410                                                          (dev->ndev->mtu)));
2411                 }
2412
2413                 if (!qp->mtu) {
2414                         qp->mtu =
2415                         ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
2416                         pr_err("Fixing zeroed MTU to qp->mtu = %d\n", qp->mtu);
2417                 }
2418
2419                 SET_FIELD(qp_params.modify_flags,
2420                           QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR, 1);
2421
2422                 qp_params.traffic_class_tos = grh->traffic_class;
2423                 qp_params.flow_label = grh->flow_label;
2424                 qp_params.hop_limit_ttl = grh->hop_limit;
2425
2426                 qp->sgid_idx = grh->sgid_index;
2427
2428                 rc = get_gid_info_from_table(ibqp, attr, attr_mask, &qp_params);
2429                 if (rc) {
2430                         DP_ERR(dev,
2431                                "modify qp: problems with GID index %d (rc=%d)\n",
2432                                grh->sgid_index, rc);
2433                         return rc;
2434                 }
2435
2436                 rc = qedr_get_dmac(dev, &attr->ah_attr,
2437                                    qp_params.remote_mac_addr);
2438                 if (rc)
2439                         return rc;
2440
2441                 qp_params.use_local_mac = true;
2442                 ether_addr_copy(qp_params.local_mac_addr, dev->ndev->dev_addr);
2443
2444                 DP_DEBUG(dev, QEDR_MSG_QP, "dgid=%x:%x:%x:%x\n",
2445                          qp_params.dgid.dwords[0], qp_params.dgid.dwords[1],
2446                          qp_params.dgid.dwords[2], qp_params.dgid.dwords[3]);
2447                 DP_DEBUG(dev, QEDR_MSG_QP, "sgid=%x:%x:%x:%x\n",
2448                          qp_params.sgid.dwords[0], qp_params.sgid.dwords[1],
2449                          qp_params.sgid.dwords[2], qp_params.sgid.dwords[3]);
2450                 DP_DEBUG(dev, QEDR_MSG_QP, "remote_mac=[%pM]\n",
2451                          qp_params.remote_mac_addr);
2452
2453                 qp_params.mtu = qp->mtu;
2454                 qp_params.lb_indication = false;
2455         }
2456
2457         if (!qp_params.mtu) {
2458                 /* Stay with current MTU */
2459                 if (qp->mtu)
2460                         qp_params.mtu = qp->mtu;
2461                 else
2462                         qp_params.mtu =
2463                             ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
2464         }
2465
2466         if (attr_mask & IB_QP_TIMEOUT) {
2467                 SET_FIELD(qp_params.modify_flags,
2468                           QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT, 1);
2469
2470                 /* The received timeout value is an exponent used like this:
2471                  *    "12.7.34 LOCAL ACK TIMEOUT
2472                  *    Value representing the transport (ACK) timeout for use by
2473                  *    the remote, expressed as: 4.096 * 2^timeout [usec]"
2474                  * The FW expects timeout in msec so we need to divide the usec
2475                  * result by 1000. We'll approximate 1000~2^10, and 4.096 ~ 2^2,
2476                  * so we get: 2^2 * 2^timeout / 2^10 = 2^(timeout - 8).
2477                  * The value of zero means infinite so we use a 'max_t' to make
2478                  * sure that sub 1 msec values will be configured as 1 msec.
2479                  */
2480                 if (attr->timeout)
2481                         qp_params.ack_timeout =
2482                                         1 << max_t(int, attr->timeout - 8, 0);
2483                 else
2484                         qp_params.ack_timeout = 0;
2485         }
2486
2487         if (attr_mask & IB_QP_RETRY_CNT) {
2488                 SET_FIELD(qp_params.modify_flags,
2489                           QED_ROCE_MODIFY_QP_VALID_RETRY_CNT, 1);
2490                 qp_params.retry_cnt = attr->retry_cnt;
2491         }
2492
2493         if (attr_mask & IB_QP_RNR_RETRY) {
2494                 SET_FIELD(qp_params.modify_flags,
2495                           QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT, 1);
2496                 qp_params.rnr_retry_cnt = attr->rnr_retry;
2497         }
2498
2499         if (attr_mask & IB_QP_RQ_PSN) {
2500                 SET_FIELD(qp_params.modify_flags,
2501                           QED_ROCE_MODIFY_QP_VALID_RQ_PSN, 1);
2502                 qp_params.rq_psn = attr->rq_psn;
2503                 qp->rq_psn = attr->rq_psn;
2504         }
2505
2506         if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
2507                 if (attr->max_rd_atomic > dev->attr.max_qp_req_rd_atomic_resc) {
2508                         rc = -EINVAL;
2509                         DP_ERR(dev,
2510                                "unsupported max_rd_atomic=%d, supported=%d\n",
2511                                attr->max_rd_atomic,
2512                                dev->attr.max_qp_req_rd_atomic_resc);
2513                         goto err;
2514                 }
2515
2516                 SET_FIELD(qp_params.modify_flags,
2517                           QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ, 1);
2518                 qp_params.max_rd_atomic_req = attr->max_rd_atomic;
2519         }
2520
2521         if (attr_mask & IB_QP_MIN_RNR_TIMER) {
2522                 SET_FIELD(qp_params.modify_flags,
2523                           QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER, 1);
2524                 qp_params.min_rnr_nak_timer = attr->min_rnr_timer;
2525         }
2526
2527         if (attr_mask & IB_QP_SQ_PSN) {
2528                 SET_FIELD(qp_params.modify_flags,
2529                           QED_ROCE_MODIFY_QP_VALID_SQ_PSN, 1);
2530                 qp_params.sq_psn = attr->sq_psn;
2531                 qp->sq_psn = attr->sq_psn;
2532         }
2533
2534         if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
2535                 if (attr->max_dest_rd_atomic >
2536                     dev->attr.max_qp_resp_rd_atomic_resc) {
2537                         DP_ERR(dev,
2538                                "unsupported max_dest_rd_atomic=%d, supported=%d\n",
2539                                attr->max_dest_rd_atomic,
2540                                dev->attr.max_qp_resp_rd_atomic_resc);
2541
2542                         rc = -EINVAL;
2543                         goto err;
2544                 }
2545
2546                 SET_FIELD(qp_params.modify_flags,
2547                           QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP, 1);
2548                 qp_params.max_rd_atomic_resp = attr->max_dest_rd_atomic;
2549         }
2550
2551         if (attr_mask & IB_QP_DEST_QPN) {
2552                 SET_FIELD(qp_params.modify_flags,
2553                           QED_ROCE_MODIFY_QP_VALID_DEST_QP, 1);
2554
2555                 qp_params.dest_qp = attr->dest_qp_num;
2556                 qp->dest_qp_num = attr->dest_qp_num;
2557         }
2558
2559         cur_state = qp->state;
2560
2561         /* Update the QP state before the actual ramrod to prevent a race with
2562          * fast path. Modifying the QP state to error will cause the device to
2563          * flush the CQEs and while polling the flushed CQEs will considered as
2564          * a potential issue if the QP isn't in error state.
2565          */
2566         if ((attr_mask & IB_QP_STATE) && qp->qp_type != IB_QPT_GSI &&
2567             !udata && qp_params.new_state == QED_ROCE_QP_STATE_ERR)
2568                 qp->state = QED_ROCE_QP_STATE_ERR;
2569
2570         if (qp->qp_type != IB_QPT_GSI)
2571                 rc = dev->ops->rdma_modify_qp(dev->rdma_ctx,
2572                                               qp->qed_qp, &qp_params);
2573
2574         if (attr_mask & IB_QP_STATE) {
2575                 if ((qp->qp_type != IB_QPT_GSI) && (!udata))
2576                         rc = qedr_update_qp_state(dev, qp, cur_state,
2577                                                   qp_params.new_state);
2578                 qp->state = qp_params.new_state;
2579         }
2580
2581 err:
2582         return rc;
2583 }
2584
2585 static int qedr_to_ib_qp_acc_flags(struct qed_rdma_query_qp_out_params *params)
2586 {
2587         int ib_qp_acc_flags = 0;
2588
2589         if (params->incoming_rdma_write_en)
2590                 ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE;
2591         if (params->incoming_rdma_read_en)
2592                 ib_qp_acc_flags |= IB_ACCESS_REMOTE_READ;
2593         if (params->incoming_atomic_en)
2594                 ib_qp_acc_flags |= IB_ACCESS_REMOTE_ATOMIC;
2595         ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE;
2596         return ib_qp_acc_flags;
2597 }
2598
2599 int qedr_query_qp(struct ib_qp *ibqp,
2600                   struct ib_qp_attr *qp_attr,
2601                   int attr_mask, struct ib_qp_init_attr *qp_init_attr)
2602 {
2603         struct qed_rdma_query_qp_out_params params;
2604         struct qedr_qp *qp = get_qedr_qp(ibqp);
2605         struct qedr_dev *dev = qp->dev;
2606         int rc = 0;
2607
2608         memset(&params, 0, sizeof(params));
2609
2610         rc = dev->ops->rdma_query_qp(dev->rdma_ctx, qp->qed_qp, &params);
2611         if (rc)
2612                 goto err;
2613
2614         memset(qp_attr, 0, sizeof(*qp_attr));
2615         memset(qp_init_attr, 0, sizeof(*qp_init_attr));
2616
2617         qp_attr->qp_state = qedr_get_ibqp_state(params.state);
2618         qp_attr->cur_qp_state = qedr_get_ibqp_state(params.state);
2619         qp_attr->path_mtu = ib_mtu_int_to_enum(params.mtu);
2620         qp_attr->path_mig_state = IB_MIG_MIGRATED;
2621         qp_attr->rq_psn = params.rq_psn;
2622         qp_attr->sq_psn = params.sq_psn;
2623         qp_attr->dest_qp_num = params.dest_qp;
2624
2625         qp_attr->qp_access_flags = qedr_to_ib_qp_acc_flags(&params);
2626
2627         qp_attr->cap.max_send_wr = qp->sq.max_wr;
2628         qp_attr->cap.max_recv_wr = qp->rq.max_wr;
2629         qp_attr->cap.max_send_sge = qp->sq.max_sges;
2630         qp_attr->cap.max_recv_sge = qp->rq.max_sges;
2631         qp_attr->cap.max_inline_data = ROCE_REQ_MAX_INLINE_DATA_SIZE;
2632         qp_init_attr->cap = qp_attr->cap;
2633
2634         qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
2635         rdma_ah_set_grh(&qp_attr->ah_attr, NULL,
2636                         params.flow_label, qp->sgid_idx,
2637                         params.hop_limit_ttl, params.traffic_class_tos);
2638         rdma_ah_set_dgid_raw(&qp_attr->ah_attr, &params.dgid.bytes[0]);
2639         rdma_ah_set_port_num(&qp_attr->ah_attr, 1);
2640         rdma_ah_set_sl(&qp_attr->ah_attr, 0);
2641         qp_attr->timeout = params.timeout;
2642         qp_attr->rnr_retry = params.rnr_retry;
2643         qp_attr->retry_cnt = params.retry_cnt;
2644         qp_attr->min_rnr_timer = params.min_rnr_nak_timer;
2645         qp_attr->pkey_index = params.pkey_index;
2646         qp_attr->port_num = 1;
2647         rdma_ah_set_path_bits(&qp_attr->ah_attr, 0);
2648         rdma_ah_set_static_rate(&qp_attr->ah_attr, 0);
2649         qp_attr->alt_pkey_index = 0;
2650         qp_attr->alt_port_num = 0;
2651         qp_attr->alt_timeout = 0;
2652         memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
2653
2654         qp_attr->sq_draining = (params.state == QED_ROCE_QP_STATE_SQD) ? 1 : 0;
2655         qp_attr->max_dest_rd_atomic = params.max_dest_rd_atomic;
2656         qp_attr->max_rd_atomic = params.max_rd_atomic;
2657         qp_attr->en_sqd_async_notify = (params.sqd_async) ? 1 : 0;
2658
2659         DP_DEBUG(dev, QEDR_MSG_QP, "QEDR_QUERY_QP: max_inline_data=%d\n",
2660                  qp_attr->cap.max_inline_data);
2661
2662 err:
2663         return rc;
2664 }
2665
2666 static int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp,
2667                                   struct ib_udata *udata)
2668 {
2669         struct qedr_ucontext *ctx =
2670                 rdma_udata_to_drv_context(udata, struct qedr_ucontext,
2671                                           ibucontext);
2672         int rc;
2673
2674         if (qp->qp_type != IB_QPT_GSI) {
2675                 rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
2676                 if (rc)
2677                         return rc;
2678         }
2679
2680         if (qp->create_type == QEDR_QP_CREATE_USER)
2681                 qedr_cleanup_user(dev, ctx, qp);
2682         else
2683                 qedr_cleanup_kernel(dev, qp);
2684
2685         return 0;
2686 }
2687
2688 int qedr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
2689 {
2690         struct qedr_qp *qp = get_qedr_qp(ibqp);
2691         struct qedr_dev *dev = qp->dev;
2692         struct ib_qp_attr attr;
2693         int attr_mask = 0;
2694
2695         DP_DEBUG(dev, QEDR_MSG_QP, "destroy qp: destroying %p, qp type=%d\n",
2696                  qp, qp->qp_type);
2697
2698         if (rdma_protocol_roce(&dev->ibdev, 1)) {
2699                 if ((qp->state != QED_ROCE_QP_STATE_RESET) &&
2700                     (qp->state != QED_ROCE_QP_STATE_ERR) &&
2701                     (qp->state != QED_ROCE_QP_STATE_INIT)) {
2702
2703                         attr.qp_state = IB_QPS_ERR;
2704                         attr_mask |= IB_QP_STATE;
2705
2706                         /* Change the QP state to ERROR */
2707                         qedr_modify_qp(ibqp, &attr, attr_mask, NULL);
2708                 }
2709         } else {
2710                 /* If connection establishment started the WAIT_FOR_CONNECT
2711                  * bit will be on and we need to Wait for the establishment
2712                  * to complete before destroying the qp.
2713                  */
2714                 if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_CONNECT,
2715                                      &qp->iwarp_cm_flags))
2716                         wait_for_completion(&qp->iwarp_cm_comp);
2717
2718                 /* If graceful disconnect started, the WAIT_FOR_DISCONNECT
2719                  * bit will be on, and we need to wait for the disconnect to
2720                  * complete before continuing. We can use the same completion,
2721                  * iwarp_cm_comp, since this is the only place that waits for
2722                  * this completion and it is sequential. In addition,
2723                  * disconnect can't occur before the connection is fully
2724                  * established, therefore if WAIT_FOR_DISCONNECT is on it
2725                  * means WAIT_FOR_CONNECT is also on and the completion for
2726                  * CONNECT already occurred.
2727                  */
2728                 if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_DISCONNECT,
2729                                      &qp->iwarp_cm_flags))
2730                         wait_for_completion(&qp->iwarp_cm_comp);
2731         }
2732
2733         if (qp->qp_type == IB_QPT_GSI)
2734                 qedr_destroy_gsi_qp(dev);
2735
2736         /* We need to remove the entry from the xarray before we release the
2737          * qp_id to avoid a race of the qp_id being reallocated and failing
2738          * on xa_insert
2739          */
2740         if (rdma_protocol_iwarp(&dev->ibdev, 1))
2741                 xa_erase(&dev->qps, qp->qp_id);
2742
2743         qedr_free_qp_resources(dev, qp, udata);
2744
2745         if (rdma_protocol_iwarp(&dev->ibdev, 1))
2746                 qedr_iw_qp_rem_ref(&qp->ibqp);
2747
2748         return 0;
2749 }
2750
2751 int qedr_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
2752                    struct ib_udata *udata)
2753 {
2754         struct qedr_ah *ah = get_qedr_ah(ibah);
2755
2756         rdma_copy_ah_attr(&ah->attr, init_attr->ah_attr);
2757
2758         return 0;
2759 }
2760
2761 void qedr_destroy_ah(struct ib_ah *ibah, u32 flags)
2762 {
2763         struct qedr_ah *ah = get_qedr_ah(ibah);
2764
2765         rdma_destroy_ah_attr(&ah->attr);
2766 }
2767
2768 static void free_mr_info(struct qedr_dev *dev, struct mr_info *info)
2769 {
2770         struct qedr_pbl *pbl, *tmp;
2771
2772         if (info->pbl_table)
2773                 list_add_tail(&info->pbl_table->list_entry,
2774                               &info->free_pbl_list);
2775
2776         if (!list_empty(&info->inuse_pbl_list))
2777                 list_splice(&info->inuse_pbl_list, &info->free_pbl_list);
2778
2779         list_for_each_entry_safe(pbl, tmp, &info->free_pbl_list, list_entry) {
2780                 list_del(&pbl->list_entry);
2781                 qedr_free_pbl(dev, &info->pbl_info, pbl);
2782         }
2783 }
2784
2785 static int init_mr_info(struct qedr_dev *dev, struct mr_info *info,
2786                         size_t page_list_len, bool two_layered)
2787 {
2788         struct qedr_pbl *tmp;
2789         int rc;
2790
2791         INIT_LIST_HEAD(&info->free_pbl_list);
2792         INIT_LIST_HEAD(&info->inuse_pbl_list);
2793
2794         rc = qedr_prepare_pbl_tbl(dev, &info->pbl_info,
2795                                   page_list_len, two_layered);
2796         if (rc)
2797                 goto done;
2798
2799         info->pbl_table = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
2800         if (IS_ERR(info->pbl_table)) {
2801                 rc = PTR_ERR(info->pbl_table);
2802                 goto done;
2803         }
2804
2805         DP_DEBUG(dev, QEDR_MSG_MR, "pbl_table_pa = %pa\n",
2806                  &info->pbl_table->pa);
2807
2808         /* in usual case we use 2 PBLs, so we add one to free
2809          * list and allocating another one
2810          */
2811         tmp = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
2812         if (IS_ERR(tmp)) {
2813                 DP_DEBUG(dev, QEDR_MSG_MR, "Extra PBL is not allocated\n");
2814                 goto done;
2815         }
2816
2817         list_add_tail(&tmp->list_entry, &info->free_pbl_list);
2818
2819         DP_DEBUG(dev, QEDR_MSG_MR, "extra pbl_table_pa = %pa\n", &tmp->pa);
2820
2821 done:
2822         if (rc)
2823                 free_mr_info(dev, info);
2824
2825         return rc;
2826 }
2827
2828 struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
2829                                u64 usr_addr, int acc, struct ib_udata *udata)
2830 {
2831         struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2832         struct qedr_mr *mr;
2833         struct qedr_pd *pd;
2834         int rc = -ENOMEM;
2835
2836         pd = get_qedr_pd(ibpd);
2837         DP_DEBUG(dev, QEDR_MSG_MR,
2838                  "qedr_register user mr pd = %d start = %lld, len = %lld, usr_addr = %lld, acc = %d\n",
2839                  pd->pd_id, start, len, usr_addr, acc);
2840
2841         if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE))
2842                 return ERR_PTR(-EINVAL);
2843
2844         mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2845         if (!mr)
2846                 return ERR_PTR(rc);
2847
2848         mr->type = QEDR_MR_USER;
2849
2850         mr->umem = ib_umem_get(ibpd->device, start, len, acc);
2851         if (IS_ERR(mr->umem)) {
2852                 rc = -EFAULT;
2853                 goto err0;
2854         }
2855
2856         rc = init_mr_info(dev, &mr->info, ib_umem_page_count(mr->umem), 1);
2857         if (rc)
2858                 goto err1;
2859
2860         qedr_populate_pbls(dev, mr->umem, mr->info.pbl_table,
2861                            &mr->info.pbl_info, PAGE_SHIFT);
2862
2863         rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2864         if (rc) {
2865                 DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2866                 goto err1;
2867         }
2868
2869         /* Index only, 18 bit long, lkey = itid << 8 | key */
2870         mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
2871         mr->hw_mr.key = 0;
2872         mr->hw_mr.pd = pd->pd_id;
2873         mr->hw_mr.local_read = 1;
2874         mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
2875         mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
2876         mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
2877         mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
2878         mr->hw_mr.mw_bind = false;
2879         mr->hw_mr.pbl_ptr = mr->info.pbl_table[0].pa;
2880         mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
2881         mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
2882         mr->hw_mr.page_size_log = PAGE_SHIFT;
2883         mr->hw_mr.fbo = ib_umem_offset(mr->umem);
2884         mr->hw_mr.length = len;
2885         mr->hw_mr.vaddr = usr_addr;
2886         mr->hw_mr.zbva = false;
2887         mr->hw_mr.phy_mr = false;
2888         mr->hw_mr.dma_mr = false;
2889
2890         rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
2891         if (rc) {
2892                 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
2893                 goto err2;
2894         }
2895
2896         mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2897         if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
2898             mr->hw_mr.remote_atomic)
2899                 mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2900
2901         DP_DEBUG(dev, QEDR_MSG_MR, "register user mr lkey: %x\n",
2902                  mr->ibmr.lkey);
2903         return &mr->ibmr;
2904
2905 err2:
2906         dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2907 err1:
2908         qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
2909 err0:
2910         kfree(mr);
2911         return ERR_PTR(rc);
2912 }
2913
2914 int qedr_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
2915 {
2916         struct qedr_mr *mr = get_qedr_mr(ib_mr);
2917         struct qedr_dev *dev = get_qedr_dev(ib_mr->device);
2918         int rc = 0;
2919
2920         rc = dev->ops->rdma_deregister_tid(dev->rdma_ctx, mr->hw_mr.itid);
2921         if (rc)
2922                 return rc;
2923
2924         dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2925
2926         if (mr->type != QEDR_MR_DMA)
2927                 free_mr_info(dev, &mr->info);
2928
2929         /* it could be user registered memory. */
2930         ib_umem_release(mr->umem);
2931
2932         kfree(mr);
2933
2934         return rc;
2935 }
2936
2937 static struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd,
2938                                        int max_page_list_len)
2939 {
2940         struct qedr_pd *pd = get_qedr_pd(ibpd);
2941         struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2942         struct qedr_mr *mr;
2943         int rc = -ENOMEM;
2944
2945         DP_DEBUG(dev, QEDR_MSG_MR,
2946                  "qedr_alloc_frmr pd = %d max_page_list_len= %d\n", pd->pd_id,
2947                  max_page_list_len);
2948
2949         mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2950         if (!mr)
2951                 return ERR_PTR(rc);
2952
2953         mr->dev = dev;
2954         mr->type = QEDR_MR_FRMR;
2955
2956         rc = init_mr_info(dev, &mr->info, max_page_list_len, 1);
2957         if (rc)
2958                 goto err0;
2959
2960         rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2961         if (rc) {
2962                 DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2963                 goto err0;
2964         }
2965
2966         /* Index only, 18 bit long, lkey = itid << 8 | key */
2967         mr->hw_mr.tid_type = QED_RDMA_TID_FMR;
2968         mr->hw_mr.key = 0;
2969         mr->hw_mr.pd = pd->pd_id;
2970         mr->hw_mr.local_read = 1;
2971         mr->hw_mr.local_write = 0;
2972         mr->hw_mr.remote_read = 0;
2973         mr->hw_mr.remote_write = 0;
2974         mr->hw_mr.remote_atomic = 0;
2975         mr->hw_mr.mw_bind = false;
2976         mr->hw_mr.pbl_ptr = 0;
2977         mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
2978         mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
2979         mr->hw_mr.fbo = 0;
2980         mr->hw_mr.length = 0;
2981         mr->hw_mr.vaddr = 0;
2982         mr->hw_mr.zbva = false;
2983         mr->hw_mr.phy_mr = true;
2984         mr->hw_mr.dma_mr = false;
2985
2986         rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
2987         if (rc) {
2988                 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
2989                 goto err1;
2990         }
2991
2992         mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2993         mr->ibmr.rkey = mr->ibmr.lkey;
2994
2995         DP_DEBUG(dev, QEDR_MSG_MR, "alloc frmr: %x\n", mr->ibmr.lkey);
2996         return mr;
2997
2998 err1:
2999         dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
3000 err0:
3001         kfree(mr);
3002         return ERR_PTR(rc);
3003 }
3004
3005 struct ib_mr *qedr_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
3006                             u32 max_num_sg)
3007 {
3008         struct qedr_mr *mr;
3009
3010         if (mr_type != IB_MR_TYPE_MEM_REG)
3011                 return ERR_PTR(-EINVAL);
3012
3013         mr = __qedr_alloc_mr(ibpd, max_num_sg);
3014
3015         if (IS_ERR(mr))
3016                 return ERR_PTR(-EINVAL);
3017
3018         return &mr->ibmr;
3019 }
3020
3021 static int qedr_set_page(struct ib_mr *ibmr, u64 addr)
3022 {
3023         struct qedr_mr *mr = get_qedr_mr(ibmr);
3024         struct qedr_pbl *pbl_table;
3025         struct regpair *pbe;
3026         u32 pbes_in_page;
3027
3028         if (unlikely(mr->npages == mr->info.pbl_info.num_pbes)) {
3029                 DP_ERR(mr->dev, "qedr_set_page fails when %d\n", mr->npages);
3030                 return -ENOMEM;
3031         }
3032
3033         DP_DEBUG(mr->dev, QEDR_MSG_MR, "qedr_set_page pages[%d] = 0x%llx\n",
3034                  mr->npages, addr);
3035
3036         pbes_in_page = mr->info.pbl_info.pbl_size / sizeof(u64);
3037         pbl_table = mr->info.pbl_table + (mr->npages / pbes_in_page);
3038         pbe = (struct regpair *)pbl_table->va;
3039         pbe +=  mr->npages % pbes_in_page;
3040         pbe->lo = cpu_to_le32((u32)addr);
3041         pbe->hi = cpu_to_le32((u32)upper_32_bits(addr));
3042
3043         mr->npages++;
3044
3045         return 0;
3046 }
3047
3048 static void handle_completed_mrs(struct qedr_dev *dev, struct mr_info *info)
3049 {
3050         int work = info->completed - info->completed_handled - 1;
3051
3052         DP_DEBUG(dev, QEDR_MSG_MR, "Special FMR work = %d\n", work);
3053         while (work-- > 0 && !list_empty(&info->inuse_pbl_list)) {
3054                 struct qedr_pbl *pbl;
3055
3056                 /* Free all the page list that are possible to be freed
3057                  * (all the ones that were invalidated), under the assumption
3058                  * that if an FMR was completed successfully that means that
3059                  * if there was an invalidate operation before it also ended
3060                  */
3061                 pbl = list_first_entry(&info->inuse_pbl_list,
3062                                        struct qedr_pbl, list_entry);
3063                 list_move_tail(&pbl->list_entry, &info->free_pbl_list);
3064                 info->completed_handled++;
3065         }
3066 }
3067
3068 int qedr_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
3069                    int sg_nents, unsigned int *sg_offset)
3070 {
3071         struct qedr_mr *mr = get_qedr_mr(ibmr);
3072
3073         mr->npages = 0;
3074
3075         handle_completed_mrs(mr->dev, &mr->info);
3076         return ib_sg_to_pages(ibmr, sg, sg_nents, NULL, qedr_set_page);
3077 }
3078
3079 struct ib_mr *qedr_get_dma_mr(struct ib_pd *ibpd, int acc)
3080 {
3081         struct qedr_dev *dev = get_qedr_dev(ibpd->device);
3082         struct qedr_pd *pd = get_qedr_pd(ibpd);
3083         struct qedr_mr *mr;
3084         int rc;
3085
3086         mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3087         if (!mr)
3088                 return ERR_PTR(-ENOMEM);
3089
3090         mr->type = QEDR_MR_DMA;
3091
3092         rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
3093         if (rc) {
3094                 DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
3095                 goto err1;
3096         }
3097
3098         /* index only, 18 bit long, lkey = itid << 8 | key */
3099         mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
3100         mr->hw_mr.pd = pd->pd_id;
3101         mr->hw_mr.local_read = 1;
3102         mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
3103         mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
3104         mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
3105         mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
3106         mr->hw_mr.dma_mr = true;
3107
3108         rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
3109         if (rc) {
3110                 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
3111                 goto err2;
3112         }
3113
3114         mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
3115         if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
3116             mr->hw_mr.remote_atomic)
3117                 mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
3118
3119         DP_DEBUG(dev, QEDR_MSG_MR, "get dma mr: lkey = %x\n", mr->ibmr.lkey);
3120         return &mr->ibmr;
3121
3122 err2:
3123         dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
3124 err1:
3125         kfree(mr);
3126         return ERR_PTR(rc);
3127 }
3128
3129 static inline int qedr_wq_is_full(struct qedr_qp_hwq_info *wq)
3130 {
3131         return (((wq->prod + 1) % wq->max_wr) == wq->cons);
3132 }
3133
3134 static int sge_data_len(struct ib_sge *sg_list, int num_sge)
3135 {
3136         int i, len = 0;
3137
3138         for (i = 0; i < num_sge; i++)
3139                 len += sg_list[i].length;
3140
3141         return len;
3142 }
3143
3144 static void swap_wqe_data64(u64 *p)
3145 {
3146         int i;
3147
3148         for (i = 0; i < QEDR_SQE_ELEMENT_SIZE / sizeof(u64); i++, p++)
3149                 *p = cpu_to_be64(cpu_to_le64(*p));
3150 }
3151
3152 static u32 qedr_prepare_sq_inline_data(struct qedr_dev *dev,
3153                                        struct qedr_qp *qp, u8 *wqe_size,
3154                                        const struct ib_send_wr *wr,
3155                                        const struct ib_send_wr **bad_wr,
3156                                        u8 *bits, u8 bit)
3157 {
3158         u32 data_size = sge_data_len(wr->sg_list, wr->num_sge);
3159         char *seg_prt, *wqe;
3160         int i, seg_siz;
3161
3162         if (data_size > ROCE_REQ_MAX_INLINE_DATA_SIZE) {
3163                 DP_ERR(dev, "Too much inline data in WR: %d\n", data_size);
3164                 *bad_wr = wr;
3165                 return 0;
3166         }
3167
3168         if (!data_size)
3169                 return data_size;
3170
3171         *bits |= bit;
3172
3173         seg_prt = NULL;
3174         wqe = NULL;
3175         seg_siz = 0;
3176
3177         /* Copy data inline */
3178         for (i = 0; i < wr->num_sge; i++) {
3179                 u32 len = wr->sg_list[i].length;
3180                 void *src = (void *)(uintptr_t)wr->sg_list[i].addr;
3181
3182                 while (len > 0) {
3183                         u32 cur;
3184
3185                         /* New segment required */
3186                         if (!seg_siz) {
3187                                 wqe = (char *)qed_chain_produce(&qp->sq.pbl);
3188                                 seg_prt = wqe;
3189                                 seg_siz = sizeof(struct rdma_sq_common_wqe);
3190                                 (*wqe_size)++;
3191                         }
3192
3193                         /* Calculate currently allowed length */
3194                         cur = min_t(u32, len, seg_siz);
3195                         memcpy(seg_prt, src, cur);
3196
3197                         /* Update segment variables */
3198                         seg_prt += cur;
3199                         seg_siz -= cur;
3200
3201                         /* Update sge variables */
3202                         src += cur;
3203                         len -= cur;
3204
3205                         /* Swap fully-completed segments */
3206                         if (!seg_siz)
3207                                 swap_wqe_data64((u64 *)wqe);
3208                 }
3209         }
3210
3211         /* swap last not completed segment */
3212         if (seg_siz)
3213                 swap_wqe_data64((u64 *)wqe);
3214
3215         return data_size;
3216 }
3217
3218 #define RQ_SGE_SET(sge, vaddr, vlength, vflags)                 \
3219         do {                                                    \
3220                 DMA_REGPAIR_LE(sge->addr, vaddr);               \
3221                 (sge)->length = cpu_to_le32(vlength);           \
3222                 (sge)->flags = cpu_to_le32(vflags);             \
3223         } while (0)
3224
3225 #define SRQ_HDR_SET(hdr, vwr_id, num_sge)                       \
3226         do {                                                    \
3227                 DMA_REGPAIR_LE(hdr->wr_id, vwr_id);             \
3228                 (hdr)->num_sges = num_sge;                      \
3229         } while (0)
3230
3231 #define SRQ_SGE_SET(sge, vaddr, vlength, vlkey)                 \
3232         do {                                                    \
3233                 DMA_REGPAIR_LE(sge->addr, vaddr);               \
3234                 (sge)->length = cpu_to_le32(vlength);           \
3235                 (sge)->l_key = cpu_to_le32(vlkey);              \
3236         } while (0)
3237
3238 static u32 qedr_prepare_sq_sges(struct qedr_qp *qp, u8 *wqe_size,
3239                                 const struct ib_send_wr *wr)
3240 {
3241         u32 data_size = 0;
3242         int i;
3243
3244         for (i = 0; i < wr->num_sge; i++) {
3245                 struct rdma_sq_sge *sge = qed_chain_produce(&qp->sq.pbl);
3246
3247                 DMA_REGPAIR_LE(sge->addr, wr->sg_list[i].addr);
3248                 sge->l_key = cpu_to_le32(wr->sg_list[i].lkey);
3249                 sge->length = cpu_to_le32(wr->sg_list[i].length);
3250                 data_size += wr->sg_list[i].length;
3251         }
3252
3253         if (wqe_size)
3254                 *wqe_size += wr->num_sge;
3255
3256         return data_size;
3257 }
3258
3259 static u32 qedr_prepare_sq_rdma_data(struct qedr_dev *dev,
3260                                      struct qedr_qp *qp,
3261                                      struct rdma_sq_rdma_wqe_1st *rwqe,
3262                                      struct rdma_sq_rdma_wqe_2nd *rwqe2,
3263                                      const struct ib_send_wr *wr,
3264                                      const struct ib_send_wr **bad_wr)
3265 {
3266         rwqe2->r_key = cpu_to_le32(rdma_wr(wr)->rkey);
3267         DMA_REGPAIR_LE(rwqe2->remote_va, rdma_wr(wr)->remote_addr);
3268
3269         if (wr->send_flags & IB_SEND_INLINE &&
3270             (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
3271              wr->opcode == IB_WR_RDMA_WRITE)) {
3272                 u8 flags = 0;
3273
3274                 SET_FIELD2(flags, RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG, 1);
3275                 return qedr_prepare_sq_inline_data(dev, qp, &rwqe->wqe_size, wr,
3276                                                    bad_wr, &rwqe->flags, flags);
3277         }
3278
3279         return qedr_prepare_sq_sges(qp, &rwqe->wqe_size, wr);
3280 }
3281
3282 static u32 qedr_prepare_sq_send_data(struct qedr_dev *dev,
3283                                      struct qedr_qp *qp,
3284                                      struct rdma_sq_send_wqe_1st *swqe,
3285                                      struct rdma_sq_send_wqe_2st *swqe2,
3286                                      const struct ib_send_wr *wr,
3287                                      const struct ib_send_wr **bad_wr)
3288 {
3289         memset(swqe2, 0, sizeof(*swqe2));
3290         if (wr->send_flags & IB_SEND_INLINE) {
3291                 u8 flags = 0;
3292
3293                 SET_FIELD2(flags, RDMA_SQ_SEND_WQE_INLINE_FLG, 1);
3294                 return qedr_prepare_sq_inline_data(dev, qp, &swqe->wqe_size, wr,
3295                                                    bad_wr, &swqe->flags, flags);
3296         }
3297
3298         return qedr_prepare_sq_sges(qp, &swqe->wqe_size, wr);
3299 }
3300
3301 static int qedr_prepare_reg(struct qedr_qp *qp,
3302                             struct rdma_sq_fmr_wqe_1st *fwqe1,
3303                             const struct ib_reg_wr *wr)
3304 {
3305         struct qedr_mr *mr = get_qedr_mr(wr->mr);
3306         struct rdma_sq_fmr_wqe_2nd *fwqe2;
3307
3308         fwqe2 = (struct rdma_sq_fmr_wqe_2nd *)qed_chain_produce(&qp->sq.pbl);
3309         fwqe1->addr.hi = upper_32_bits(mr->ibmr.iova);
3310         fwqe1->addr.lo = lower_32_bits(mr->ibmr.iova);
3311         fwqe1->l_key = wr->key;
3312
3313         fwqe2->access_ctrl = 0;
3314
3315         SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_READ,
3316                    !!(wr->access & IB_ACCESS_REMOTE_READ));
3317         SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE,
3318                    !!(wr->access & IB_ACCESS_REMOTE_WRITE));
3319         SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC,
3320                    !!(wr->access & IB_ACCESS_REMOTE_ATOMIC));
3321         SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_READ, 1);
3322         SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE,
3323                    !!(wr->access & IB_ACCESS_LOCAL_WRITE));
3324         fwqe2->fmr_ctrl = 0;
3325
3326         SET_FIELD2(fwqe2->fmr_ctrl, RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG,
3327                    ilog2(mr->ibmr.page_size) - 12);
3328
3329         fwqe2->length_hi = 0;
3330         fwqe2->length_lo = mr->ibmr.length;
3331         fwqe2->pbl_addr.hi = upper_32_bits(mr->info.pbl_table->pa);
3332         fwqe2->pbl_addr.lo = lower_32_bits(mr->info.pbl_table->pa);
3333
3334         qp->wqe_wr_id[qp->sq.prod].mr = mr;
3335
3336         return 0;
3337 }
3338
3339 static enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
3340 {
3341         switch (opcode) {
3342         case IB_WR_RDMA_WRITE:
3343         case IB_WR_RDMA_WRITE_WITH_IMM:
3344                 return IB_WC_RDMA_WRITE;
3345         case IB_WR_SEND_WITH_IMM:
3346         case IB_WR_SEND:
3347         case IB_WR_SEND_WITH_INV:
3348                 return IB_WC_SEND;
3349         case IB_WR_RDMA_READ:
3350         case IB_WR_RDMA_READ_WITH_INV:
3351                 return IB_WC_RDMA_READ;
3352         case IB_WR_ATOMIC_CMP_AND_SWP:
3353                 return IB_WC_COMP_SWAP;
3354         case IB_WR_ATOMIC_FETCH_AND_ADD:
3355                 return IB_WC_FETCH_ADD;
3356         case IB_WR_REG_MR:
3357                 return IB_WC_REG_MR;
3358         case IB_WR_LOCAL_INV:
3359                 return IB_WC_LOCAL_INV;
3360         default:
3361                 return IB_WC_SEND;
3362         }
3363 }
3364
3365 static inline bool qedr_can_post_send(struct qedr_qp *qp,
3366                                       const struct ib_send_wr *wr)
3367 {
3368         int wq_is_full, err_wr, pbl_is_full;
3369         struct qedr_dev *dev = qp->dev;
3370
3371         /* prevent SQ overflow and/or processing of a bad WR */
3372         err_wr = wr->num_sge > qp->sq.max_sges;
3373         wq_is_full = qedr_wq_is_full(&qp->sq);
3374         pbl_is_full = qed_chain_get_elem_left_u32(&qp->sq.pbl) <
3375                       QEDR_MAX_SQE_ELEMENTS_PER_SQE;
3376         if (wq_is_full || err_wr || pbl_is_full) {
3377                 if (wq_is_full && !(qp->err_bitmap & QEDR_QP_ERR_SQ_FULL)) {
3378                         DP_ERR(dev,
3379                                "error: WQ is full. Post send on QP %p failed (this error appears only once)\n",
3380                                qp);
3381                         qp->err_bitmap |= QEDR_QP_ERR_SQ_FULL;
3382                 }
3383
3384                 if (err_wr && !(qp->err_bitmap & QEDR_QP_ERR_BAD_SR)) {
3385                         DP_ERR(dev,
3386                                "error: WR is bad. Post send on QP %p failed (this error appears only once)\n",
3387                                qp);
3388                         qp->err_bitmap |= QEDR_QP_ERR_BAD_SR;
3389                 }
3390
3391                 if (pbl_is_full &&
3392                     !(qp->err_bitmap & QEDR_QP_ERR_SQ_PBL_FULL)) {
3393                         DP_ERR(dev,
3394                                "error: WQ PBL is full. Post send on QP %p failed (this error appears only once)\n",
3395                                qp);
3396                         qp->err_bitmap |= QEDR_QP_ERR_SQ_PBL_FULL;
3397                 }
3398                 return false;
3399         }
3400         return true;
3401 }
3402
3403 static int __qedr_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
3404                             const struct ib_send_wr **bad_wr)
3405 {
3406         struct qedr_dev *dev = get_qedr_dev(ibqp->device);
3407         struct qedr_qp *qp = get_qedr_qp(ibqp);
3408         struct rdma_sq_atomic_wqe_1st *awqe1;
3409         struct rdma_sq_atomic_wqe_2nd *awqe2;
3410         struct rdma_sq_atomic_wqe_3rd *awqe3;
3411         struct rdma_sq_send_wqe_2st *swqe2;
3412         struct rdma_sq_local_inv_wqe *iwqe;
3413         struct rdma_sq_rdma_wqe_2nd *rwqe2;
3414         struct rdma_sq_send_wqe_1st *swqe;
3415         struct rdma_sq_rdma_wqe_1st *rwqe;
3416         struct rdma_sq_fmr_wqe_1st *fwqe1;
3417         struct rdma_sq_common_wqe *wqe;
3418         u32 length;
3419         int rc = 0;
3420         bool comp;
3421
3422         if (!qedr_can_post_send(qp, wr)) {
3423                 *bad_wr = wr;
3424                 return -ENOMEM;
3425         }
3426
3427         wqe = qed_chain_produce(&qp->sq.pbl);
3428         qp->wqe_wr_id[qp->sq.prod].signaled =
3429                 !!(wr->send_flags & IB_SEND_SIGNALED) || qp->signaled;
3430
3431         wqe->flags = 0;
3432         SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_SE_FLG,
3433                    !!(wr->send_flags & IB_SEND_SOLICITED));
3434         comp = (!!(wr->send_flags & IB_SEND_SIGNALED)) || qp->signaled;
3435         SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_COMP_FLG, comp);
3436         SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_RD_FENCE_FLG,
3437                    !!(wr->send_flags & IB_SEND_FENCE));
3438         wqe->prev_wqe_size = qp->prev_wqe_size;
3439
3440         qp->wqe_wr_id[qp->sq.prod].opcode = qedr_ib_to_wc_opcode(wr->opcode);
3441
3442         switch (wr->opcode) {
3443         case IB_WR_SEND_WITH_IMM:
3444                 if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) {
3445                         rc = -EINVAL;
3446                         *bad_wr = wr;
3447                         break;
3448                 }
3449                 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM;
3450                 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
3451                 swqe->wqe_size = 2;
3452                 swqe2 = qed_chain_produce(&qp->sq.pbl);
3453
3454                 swqe->inv_key_or_imm_data = cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
3455                 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
3456                                                    wr, bad_wr);
3457                 swqe->length = cpu_to_le32(length);
3458                 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
3459                 qp->prev_wqe_size = swqe->wqe_size;
3460                 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
3461                 break;
3462         case IB_WR_SEND:
3463                 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND;
3464                 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
3465
3466                 swqe->wqe_size = 2;
3467                 swqe2 = qed_chain_produce(&qp->sq.pbl);
3468                 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
3469                                                    wr, bad_wr);
3470                 swqe->length = cpu_to_le32(length);
3471                 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
3472                 qp->prev_wqe_size = swqe->wqe_size;
3473                 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
3474                 break;
3475         case IB_WR_SEND_WITH_INV:
3476                 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_INVALIDATE;
3477                 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
3478                 swqe2 = qed_chain_produce(&qp->sq.pbl);
3479                 swqe->wqe_size = 2;
3480                 swqe->inv_key_or_imm_data = cpu_to_le32(wr->ex.invalidate_rkey);
3481                 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
3482                                                    wr, bad_wr);
3483                 swqe->length = cpu_to_le32(length);
3484                 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
3485                 qp->prev_wqe_size = swqe->wqe_size;
3486                 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
3487                 break;
3488
3489         case IB_WR_RDMA_WRITE_WITH_IMM:
3490                 if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) {
3491                         rc = -EINVAL;
3492                         *bad_wr = wr;
3493                         break;
3494                 }
3495                 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM;
3496                 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
3497
3498                 rwqe->wqe_size = 2;
3499                 rwqe->imm_data = htonl(cpu_to_le32(wr->ex.imm_data));
3500                 rwqe2 = qed_chain_produce(&qp->sq.pbl);
3501                 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
3502                                                    wr, bad_wr);
3503                 rwqe->length = cpu_to_le32(length);
3504                 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
3505                 qp->prev_wqe_size = rwqe->wqe_size;
3506                 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
3507                 break;
3508         case IB_WR_RDMA_WRITE:
3509                 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR;
3510                 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
3511
3512                 rwqe->wqe_size = 2;
3513                 rwqe2 = qed_chain_produce(&qp->sq.pbl);
3514                 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
3515                                                    wr, bad_wr);
3516                 rwqe->length = cpu_to_le32(length);
3517                 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
3518                 qp->prev_wqe_size = rwqe->wqe_size;
3519                 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
3520                 break;
3521         case IB_WR_RDMA_READ_WITH_INV:
3522                 SET_FIELD2(wqe->flags, RDMA_SQ_RDMA_WQE_1ST_READ_INV_FLG, 1);
3523                 /* fallthrough -- same is identical to RDMA READ */
3524
3525         case IB_WR_RDMA_READ:
3526                 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_RD;
3527                 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
3528
3529                 rwqe->wqe_size = 2;
3530                 rwqe2 = qed_chain_produce(&qp->sq.pbl);
3531                 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
3532                                                    wr, bad_wr);
3533                 rwqe->length = cpu_to_le32(length);
3534                 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
3535                 qp->prev_wqe_size = rwqe->wqe_size;
3536                 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
3537                 break;
3538
3539         case IB_WR_ATOMIC_CMP_AND_SWP:
3540         case IB_WR_ATOMIC_FETCH_AND_ADD:
3541                 awqe1 = (struct rdma_sq_atomic_wqe_1st *)wqe;
3542                 awqe1->wqe_size = 4;
3543
3544                 awqe2 = qed_chain_produce(&qp->sq.pbl);
3545                 DMA_REGPAIR_LE(awqe2->remote_va, atomic_wr(wr)->remote_addr);
3546                 awqe2->r_key = cpu_to_le32(atomic_wr(wr)->rkey);
3547
3548                 awqe3 = qed_chain_produce(&qp->sq.pbl);
3549
3550                 if (wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
3551                         wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_ADD;
3552                         DMA_REGPAIR_LE(awqe3->swap_data,
3553                                        atomic_wr(wr)->compare_add);
3554                 } else {
3555                         wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_CMP_AND_SWAP;
3556                         DMA_REGPAIR_LE(awqe3->swap_data,
3557                                        atomic_wr(wr)->swap);
3558                         DMA_REGPAIR_LE(awqe3->cmp_data,
3559                                        atomic_wr(wr)->compare_add);
3560                 }
3561
3562                 qedr_prepare_sq_sges(qp, NULL, wr);
3563
3564                 qp->wqe_wr_id[qp->sq.prod].wqe_size = awqe1->wqe_size;
3565                 qp->prev_wqe_size = awqe1->wqe_size;
3566                 break;
3567
3568         case IB_WR_LOCAL_INV:
3569                 iwqe = (struct rdma_sq_local_inv_wqe *)wqe;
3570                 iwqe->wqe_size = 1;
3571
3572                 iwqe->req_type = RDMA_SQ_REQ_TYPE_LOCAL_INVALIDATE;
3573                 iwqe->inv_l_key = wr->ex.invalidate_rkey;
3574                 qp->wqe_wr_id[qp->sq.prod].wqe_size = iwqe->wqe_size;
3575                 qp->prev_wqe_size = iwqe->wqe_size;
3576                 break;
3577         case IB_WR_REG_MR:
3578                 DP_DEBUG(dev, QEDR_MSG_CQ, "REG_MR\n");
3579                 wqe->req_type = RDMA_SQ_REQ_TYPE_FAST_MR;
3580                 fwqe1 = (struct rdma_sq_fmr_wqe_1st *)wqe;
3581                 fwqe1->wqe_size = 2;
3582
3583                 rc = qedr_prepare_reg(qp, fwqe1, reg_wr(wr));
3584                 if (rc) {
3585                         DP_ERR(dev, "IB_REG_MR failed rc=%d\n", rc);
3586                         *bad_wr = wr;
3587                         break;
3588                 }
3589
3590                 qp->wqe_wr_id[qp->sq.prod].wqe_size = fwqe1->wqe_size;
3591                 qp->prev_wqe_size = fwqe1->wqe_size;
3592                 break;
3593         default:
3594                 DP_ERR(dev, "invalid opcode 0x%x!\n", wr->opcode);
3595                 rc = -EINVAL;
3596                 *bad_wr = wr;
3597                 break;
3598         }
3599
3600         if (*bad_wr) {
3601                 u16 value;
3602
3603                 /* Restore prod to its position before
3604                  * this WR was processed
3605                  */
3606                 value = le16_to_cpu(qp->sq.db_data.data.value);
3607                 qed_chain_set_prod(&qp->sq.pbl, value, wqe);
3608
3609                 /* Restore prev_wqe_size */
3610                 qp->prev_wqe_size = wqe->prev_wqe_size;
3611                 rc = -EINVAL;
3612                 DP_ERR(dev, "POST SEND FAILED\n");
3613         }
3614
3615         return rc;
3616 }
3617
3618 int qedr_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
3619                    const struct ib_send_wr **bad_wr)
3620 {
3621         struct qedr_dev *dev = get_qedr_dev(ibqp->device);
3622         struct qedr_qp *qp = get_qedr_qp(ibqp);
3623         unsigned long flags;
3624         int rc = 0;
3625
3626         *bad_wr = NULL;
3627
3628         if (qp->qp_type == IB_QPT_GSI)
3629                 return qedr_gsi_post_send(ibqp, wr, bad_wr);
3630
3631         spin_lock_irqsave(&qp->q_lock, flags);
3632
3633         if (rdma_protocol_roce(&dev->ibdev, 1)) {
3634                 if ((qp->state != QED_ROCE_QP_STATE_RTS) &&
3635                     (qp->state != QED_ROCE_QP_STATE_ERR) &&
3636                     (qp->state != QED_ROCE_QP_STATE_SQD)) {
3637                         spin_unlock_irqrestore(&qp->q_lock, flags);
3638                         *bad_wr = wr;
3639                         DP_DEBUG(dev, QEDR_MSG_CQ,
3640                                  "QP in wrong state! QP icid=0x%x state %d\n",
3641                                  qp->icid, qp->state);
3642                         return -EINVAL;
3643                 }
3644         }
3645
3646         while (wr) {
3647                 rc = __qedr_post_send(ibqp, wr, bad_wr);
3648                 if (rc)
3649                         break;
3650
3651                 qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id;
3652
3653                 qedr_inc_sw_prod(&qp->sq);
3654
3655                 qp->sq.db_data.data.value++;
3656
3657                 wr = wr->next;
3658         }
3659
3660         /* Trigger doorbell
3661          * If there was a failure in the first WR then it will be triggered in
3662          * vane. However this is not harmful (as long as the producer value is
3663          * unchanged). For performance reasons we avoid checking for this
3664          * redundant doorbell.
3665          *
3666          * qp->wqe_wr_id is accessed during qedr_poll_cq, as
3667          * soon as we give the doorbell, we could get a completion
3668          * for this wr, therefore we need to make sure that the
3669          * memory is updated before giving the doorbell.
3670          * During qedr_poll_cq, rmb is called before accessing the
3671          * cqe. This covers for the smp_rmb as well.
3672          */
3673         smp_wmb();
3674         writel(qp->sq.db_data.raw, qp->sq.db);
3675
3676         spin_unlock_irqrestore(&qp->q_lock, flags);
3677
3678         return rc;
3679 }
3680
3681 static u32 qedr_srq_elem_left(struct qedr_srq_hwq_info *hw_srq)
3682 {
3683         u32 used;
3684
3685         /* Calculate number of elements used based on producer
3686          * count and consumer count and subtract it from max
3687          * work request supported so that we get elements left.
3688          */
3689         used = hw_srq->wr_prod_cnt - hw_srq->wr_cons_cnt;
3690
3691         return hw_srq->max_wr - used;
3692 }
3693
3694 int qedr_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
3695                        const struct ib_recv_wr **bad_wr)
3696 {
3697         struct qedr_srq *srq = get_qedr_srq(ibsrq);
3698         struct qedr_srq_hwq_info *hw_srq;
3699         struct qedr_dev *dev = srq->dev;
3700         struct qed_chain *pbl;
3701         unsigned long flags;
3702         int status = 0;
3703         u32 num_sge;
3704         u32 offset;
3705
3706         spin_lock_irqsave(&srq->lock, flags);
3707
3708         hw_srq = &srq->hw_srq;
3709         pbl = &srq->hw_srq.pbl;
3710         while (wr) {
3711                 struct rdma_srq_wqe_header *hdr;
3712                 int i;
3713
3714                 if (!qedr_srq_elem_left(hw_srq) ||
3715                     wr->num_sge > srq->hw_srq.max_sges) {
3716                         DP_ERR(dev, "Can't post WR  (%d,%d) || (%d > %d)\n",
3717                                hw_srq->wr_prod_cnt, hw_srq->wr_cons_cnt,
3718                                wr->num_sge, srq->hw_srq.max_sges);
3719                         status = -ENOMEM;
3720                         *bad_wr = wr;
3721                         break;
3722                 }
3723
3724                 hdr = qed_chain_produce(pbl);
3725                 num_sge = wr->num_sge;
3726                 /* Set number of sge and work request id in header */
3727                 SRQ_HDR_SET(hdr, wr->wr_id, num_sge);
3728
3729                 srq->hw_srq.wr_prod_cnt++;
3730                 hw_srq->wqe_prod++;
3731                 hw_srq->sge_prod++;
3732
3733                 DP_DEBUG(dev, QEDR_MSG_SRQ,
3734                          "SRQ WR: SGEs: %d with wr_id[%d] = %llx\n",
3735                          wr->num_sge, hw_srq->wqe_prod, wr->wr_id);
3736
3737                 for (i = 0; i < wr->num_sge; i++) {
3738                         struct rdma_srq_sge *srq_sge = qed_chain_produce(pbl);
3739
3740                         /* Set SGE length, lkey and address */
3741                         SRQ_SGE_SET(srq_sge, wr->sg_list[i].addr,
3742                                     wr->sg_list[i].length, wr->sg_list[i].lkey);
3743
3744                         DP_DEBUG(dev, QEDR_MSG_SRQ,
3745                                  "[%d]: len %d key %x addr %x:%x\n",
3746                                  i, srq_sge->length, srq_sge->l_key,
3747                                  srq_sge->addr.hi, srq_sge->addr.lo);
3748                         hw_srq->sge_prod++;
3749                 }
3750
3751                 /* Flush WQE and SGE information before
3752                  * updating producer.
3753                  */
3754                 wmb();
3755
3756                 /* SRQ producer is 8 bytes. Need to update SGE producer index
3757                  * in first 4 bytes and need to update WQE producer in
3758                  * next 4 bytes.
3759                  */
3760                 *srq->hw_srq.virt_prod_pair_addr = hw_srq->sge_prod;
3761                 offset = offsetof(struct rdma_srq_producers, wqe_prod);
3762                 *((u8 *)srq->hw_srq.virt_prod_pair_addr + offset) =
3763                         hw_srq->wqe_prod;
3764
3765                 /* Flush producer after updating it. */
3766                 wmb();
3767                 wr = wr->next;
3768         }
3769
3770         DP_DEBUG(dev, QEDR_MSG_SRQ, "POST: Elements in S-RQ: %d\n",
3771                  qed_chain_get_elem_left(pbl));
3772         spin_unlock_irqrestore(&srq->lock, flags);
3773
3774         return status;
3775 }
3776
3777 int qedr_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
3778                    const struct ib_recv_wr **bad_wr)
3779 {
3780         struct qedr_qp *qp = get_qedr_qp(ibqp);
3781         struct qedr_dev *dev = qp->dev;
3782         unsigned long flags;
3783         int status = 0;
3784
3785         if (qp->qp_type == IB_QPT_GSI)
3786                 return qedr_gsi_post_recv(ibqp, wr, bad_wr);
3787
3788         spin_lock_irqsave(&qp->q_lock, flags);
3789
3790         if (qp->state == QED_ROCE_QP_STATE_RESET) {
3791                 spin_unlock_irqrestore(&qp->q_lock, flags);
3792                 *bad_wr = wr;
3793                 return -EINVAL;
3794         }
3795
3796         while (wr) {
3797                 int i;
3798
3799                 if (qed_chain_get_elem_left_u32(&qp->rq.pbl) <
3800                     QEDR_MAX_RQE_ELEMENTS_PER_RQE ||
3801                     wr->num_sge > qp->rq.max_sges) {
3802                         DP_ERR(dev, "Can't post WR  (%d < %d) || (%d > %d)\n",
3803                                qed_chain_get_elem_left_u32(&qp->rq.pbl),
3804                                QEDR_MAX_RQE_ELEMENTS_PER_RQE, wr->num_sge,
3805                                qp->rq.max_sges);
3806                         status = -ENOMEM;
3807                         *bad_wr = wr;
3808                         break;
3809                 }
3810                 for (i = 0; i < wr->num_sge; i++) {
3811                         u32 flags = 0;
3812                         struct rdma_rq_sge *rqe =
3813                             qed_chain_produce(&qp->rq.pbl);
3814
3815                         /* First one must include the number
3816                          * of SGE in the list
3817                          */
3818                         if (!i)
3819                                 SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES,
3820                                           wr->num_sge);
3821
3822                         SET_FIELD(flags, RDMA_RQ_SGE_L_KEY_LO,
3823                                   wr->sg_list[i].lkey);
3824
3825                         RQ_SGE_SET(rqe, wr->sg_list[i].addr,
3826                                    wr->sg_list[i].length, flags);
3827                 }
3828
3829                 /* Special case of no sges. FW requires between 1-4 sges...
3830                  * in this case we need to post 1 sge with length zero. this is
3831                  * because rdma write with immediate consumes an RQ.
3832                  */
3833                 if (!wr->num_sge) {
3834                         u32 flags = 0;
3835                         struct rdma_rq_sge *rqe =
3836                             qed_chain_produce(&qp->rq.pbl);
3837
3838                         /* First one must include the number
3839                          * of SGE in the list
3840                          */
3841                         SET_FIELD(flags, RDMA_RQ_SGE_L_KEY_LO, 0);
3842                         SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES, 1);
3843
3844                         RQ_SGE_SET(rqe, 0, 0, flags);
3845                         i = 1;
3846                 }
3847
3848                 qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id;
3849                 qp->rqe_wr_id[qp->rq.prod].wqe_size = i;
3850
3851                 qedr_inc_sw_prod(&qp->rq);
3852
3853                 /* qp->rqe_wr_id is accessed during qedr_poll_cq, as
3854                  * soon as we give the doorbell, we could get a completion
3855                  * for this wr, therefore we need to make sure that the
3856                  * memory is update before giving the doorbell.
3857                  * During qedr_poll_cq, rmb is called before accessing the
3858                  * cqe. This covers for the smp_rmb as well.
3859                  */
3860                 smp_wmb();
3861
3862                 qp->rq.db_data.data.value++;
3863
3864                 writel(qp->rq.db_data.raw, qp->rq.db);
3865
3866                 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
3867                         writel(qp->rq.iwarp_db2_data.raw, qp->rq.iwarp_db2);
3868                 }
3869
3870                 wr = wr->next;
3871         }
3872
3873         spin_unlock_irqrestore(&qp->q_lock, flags);
3874
3875         return status;
3876 }
3877
3878 static int is_valid_cqe(struct qedr_cq *cq, union rdma_cqe *cqe)
3879 {
3880         struct rdma_cqe_requester *resp_cqe = &cqe->req;
3881
3882         return (resp_cqe->flags & RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK) ==
3883                 cq->pbl_toggle;
3884 }
3885
3886 static struct qedr_qp *cqe_get_qp(union rdma_cqe *cqe)
3887 {
3888         struct rdma_cqe_requester *resp_cqe = &cqe->req;
3889         struct qedr_qp *qp;
3890
3891         qp = (struct qedr_qp *)(uintptr_t)HILO_GEN(resp_cqe->qp_handle.hi,
3892                                                    resp_cqe->qp_handle.lo,
3893                                                    u64);
3894         return qp;
3895 }
3896
3897 static enum rdma_cqe_type cqe_get_type(union rdma_cqe *cqe)
3898 {
3899         struct rdma_cqe_requester *resp_cqe = &cqe->req;
3900
3901         return GET_FIELD(resp_cqe->flags, RDMA_CQE_REQUESTER_TYPE);
3902 }
3903
3904 /* Return latest CQE (needs processing) */
3905 static union rdma_cqe *get_cqe(struct qedr_cq *cq)
3906 {
3907         return cq->latest_cqe;
3908 }
3909
3910 /* In fmr we need to increase the number of fmr completed counter for the fmr
3911  * algorithm determining whether we can free a pbl or not.
3912  * we need to perform this whether the work request was signaled or not. for
3913  * this purpose we call this function from the condition that checks if a wr
3914  * should be skipped, to make sure we don't miss it ( possibly this fmr
3915  * operation was not signalted)
3916  */
3917 static inline void qedr_chk_if_fmr(struct qedr_qp *qp)
3918 {
3919         if (qp->wqe_wr_id[qp->sq.cons].opcode == IB_WC_REG_MR)
3920                 qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
3921 }
3922
3923 static int process_req(struct qedr_dev *dev, struct qedr_qp *qp,
3924                        struct qedr_cq *cq, int num_entries,
3925                        struct ib_wc *wc, u16 hw_cons, enum ib_wc_status status,
3926                        int force)
3927 {
3928         u16 cnt = 0;
3929
3930         while (num_entries && qp->sq.wqe_cons != hw_cons) {
3931                 if (!qp->wqe_wr_id[qp->sq.cons].signaled && !force) {
3932                         qedr_chk_if_fmr(qp);
3933                         /* skip WC */
3934                         goto next_cqe;
3935                 }
3936
3937                 /* fill WC */
3938                 wc->status = status;
3939                 wc->vendor_err = 0;
3940                 wc->wc_flags = 0;
3941                 wc->src_qp = qp->id;
3942                 wc->qp = &qp->ibqp;
3943
3944                 wc->wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id;
3945                 wc->opcode = qp->wqe_wr_id[qp->sq.cons].opcode;
3946
3947                 switch (wc->opcode) {
3948                 case IB_WC_RDMA_WRITE:
3949                         wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
3950                         break;
3951                 case IB_WC_COMP_SWAP:
3952                 case IB_WC_FETCH_ADD:
3953                         wc->byte_len = 8;
3954                         break;
3955                 case IB_WC_REG_MR:
3956                         qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
3957                         break;
3958                 case IB_WC_RDMA_READ:
3959                 case IB_WC_SEND:
3960                         wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
3961                         break;
3962                 default:
3963                         break;
3964                 }
3965
3966                 num_entries--;
3967                 wc++;
3968                 cnt++;
3969 next_cqe:
3970                 while (qp->wqe_wr_id[qp->sq.cons].wqe_size--)
3971                         qed_chain_consume(&qp->sq.pbl);
3972                 qedr_inc_sw_cons(&qp->sq);
3973         }
3974
3975         return cnt;
3976 }
3977
3978 static int qedr_poll_cq_req(struct qedr_dev *dev,
3979                             struct qedr_qp *qp, struct qedr_cq *cq,
3980                             int num_entries, struct ib_wc *wc,
3981                             struct rdma_cqe_requester *req)
3982 {
3983         int cnt = 0;
3984
3985         switch (req->status) {
3986         case RDMA_CQE_REQ_STS_OK:
3987                 cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
3988                                   IB_WC_SUCCESS, 0);
3989                 break;
3990         case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR:
3991                 if (qp->state != QED_ROCE_QP_STATE_ERR)
3992                         DP_DEBUG(dev, QEDR_MSG_CQ,
3993                                  "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3994                                  cq->icid, qp->icid);
3995                 cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
3996                                   IB_WC_WR_FLUSH_ERR, 1);
3997                 break;
3998         default:
3999                 /* process all WQE before the cosumer */
4000                 qp->state = QED_ROCE_QP_STATE_ERR;
4001                 cnt = process_req(dev, qp, cq, num_entries, wc,
4002                                   req->sq_cons - 1, IB_WC_SUCCESS, 0);
4003                 wc += cnt;
4004                 /* if we have extra WC fill it with actual error info */
4005                 if (cnt < num_entries) {
4006                         enum ib_wc_status wc_status;
4007
4008                         switch (req->status) {
4009                         case RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR:
4010                                 DP_ERR(dev,
4011                                        "Error: POLL CQ with RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4012                                        cq->icid, qp->icid);
4013                                 wc_status = IB_WC_BAD_RESP_ERR;
4014                                 break;
4015                         case RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR:
4016                                 DP_ERR(dev,
4017                                        "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4018                                        cq->icid, qp->icid);
4019                                 wc_status = IB_WC_LOC_LEN_ERR;
4020                                 break;
4021                         case RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR:
4022                                 DP_ERR(dev,
4023                                        "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4024                                        cq->icid, qp->icid);
4025                                 wc_status = IB_WC_LOC_QP_OP_ERR;
4026                                 break;
4027                         case RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR:
4028                                 DP_ERR(dev,
4029                                        "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4030                                        cq->icid, qp->icid);
4031                                 wc_status = IB_WC_LOC_PROT_ERR;
4032                                 break;
4033                         case RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR:
4034                                 DP_ERR(dev,
4035                                        "Error: POLL CQ with RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4036                                        cq->icid, qp->icid);
4037                                 wc_status = IB_WC_MW_BIND_ERR;
4038                                 break;
4039                         case RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR:
4040                                 DP_ERR(dev,
4041                                        "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4042                                        cq->icid, qp->icid);
4043                                 wc_status = IB_WC_REM_INV_REQ_ERR;
4044                                 break;
4045                         case RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR:
4046                                 DP_ERR(dev,
4047                                        "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4048                                        cq->icid, qp->icid);
4049                                 wc_status = IB_WC_REM_ACCESS_ERR;
4050                                 break;
4051                         case RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR:
4052                                 DP_ERR(dev,
4053                                        "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4054                                        cq->icid, qp->icid);
4055                                 wc_status = IB_WC_REM_OP_ERR;
4056                                 break;
4057                         case RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR:
4058                                 DP_ERR(dev,
4059                                        "Error: POLL CQ with RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4060                                        cq->icid, qp->icid);
4061                                 wc_status = IB_WC_RNR_RETRY_EXC_ERR;
4062                                 break;
4063                         case RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR:
4064                                 DP_ERR(dev,
4065                                        "Error: POLL CQ with ROCE_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4066                                        cq->icid, qp->icid);
4067                                 wc_status = IB_WC_RETRY_EXC_ERR;
4068                                 break;
4069                         default:
4070                                 DP_ERR(dev,
4071                                        "Error: POLL CQ with IB_WC_GENERAL_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4072                                        cq->icid, qp->icid);
4073                                 wc_status = IB_WC_GENERAL_ERR;
4074                         }
4075                         cnt += process_req(dev, qp, cq, 1, wc, req->sq_cons,
4076                                            wc_status, 1);
4077                 }
4078         }
4079
4080         return cnt;
4081 }
4082
4083 static inline int qedr_cqe_resp_status_to_ib(u8 status)
4084 {
4085         switch (status) {
4086         case RDMA_CQE_RESP_STS_LOCAL_ACCESS_ERR:
4087                 return IB_WC_LOC_ACCESS_ERR;
4088         case RDMA_CQE_RESP_STS_LOCAL_LENGTH_ERR:
4089                 return IB_WC_LOC_LEN_ERR;
4090         case RDMA_CQE_RESP_STS_LOCAL_QP_OPERATION_ERR:
4091                 return IB_WC_LOC_QP_OP_ERR;
4092         case RDMA_CQE_RESP_STS_LOCAL_PROTECTION_ERR:
4093                 return IB_WC_LOC_PROT_ERR;
4094         case RDMA_CQE_RESP_STS_MEMORY_MGT_OPERATION_ERR:
4095                 return IB_WC_MW_BIND_ERR;
4096         case RDMA_CQE_RESP_STS_REMOTE_INVALID_REQUEST_ERR:
4097                 return IB_WC_REM_INV_RD_REQ_ERR;
4098         case RDMA_CQE_RESP_STS_OK:
4099                 return IB_WC_SUCCESS;
4100         default:
4101                 return IB_WC_GENERAL_ERR;
4102         }
4103 }
4104
4105 static inline int qedr_set_ok_cqe_resp_wc(struct rdma_cqe_responder *resp,
4106                                           struct ib_wc *wc)
4107 {
4108         wc->status = IB_WC_SUCCESS;
4109         wc->byte_len = le32_to_cpu(resp->length);
4110
4111         if (resp->flags & QEDR_RESP_IMM) {
4112                 wc->ex.imm_data = cpu_to_be32(le32_to_cpu(resp->imm_data_or_inv_r_Key));
4113                 wc->wc_flags |= IB_WC_WITH_IMM;
4114
4115                 if (resp->flags & QEDR_RESP_RDMA)
4116                         wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
4117
4118                 if (resp->flags & QEDR_RESP_INV)
4119                         return -EINVAL;
4120
4121         } else if (resp->flags & QEDR_RESP_INV) {
4122                 wc->ex.imm_data = le32_to_cpu(resp->imm_data_or_inv_r_Key);
4123                 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
4124
4125                 if (resp->flags & QEDR_RESP_RDMA)
4126                         return -EINVAL;
4127
4128         } else if (resp->flags & QEDR_RESP_RDMA) {
4129                 return -EINVAL;
4130         }
4131
4132         return 0;
4133 }
4134
4135 static void __process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
4136                                struct qedr_cq *cq, struct ib_wc *wc,
4137                                struct rdma_cqe_responder *resp, u64 wr_id)
4138 {
4139         /* Must fill fields before qedr_set_ok_cqe_resp_wc() */
4140         wc->opcode = IB_WC_RECV;
4141         wc->wc_flags = 0;
4142
4143         if (likely(resp->status == RDMA_CQE_RESP_STS_OK)) {
4144                 if (qedr_set_ok_cqe_resp_wc(resp, wc))
4145                         DP_ERR(dev,
4146                                "CQ %p (icid=%d) has invalid CQE responder flags=0x%x\n",
4147                                cq, cq->icid, resp->flags);
4148
4149         } else {
4150                 wc->status = qedr_cqe_resp_status_to_ib(resp->status);
4151                 if (wc->status == IB_WC_GENERAL_ERR)
4152                         DP_ERR(dev,
4153                                "CQ %p (icid=%d) contains an invalid CQE status %d\n",
4154                                cq, cq->icid, resp->status);
4155         }
4156
4157         /* Fill the rest of the WC */
4158         wc->vendor_err = 0;
4159         wc->src_qp = qp->id;
4160         wc->qp = &qp->ibqp;
4161         wc->wr_id = wr_id;
4162 }
4163
4164 static int process_resp_one_srq(struct qedr_dev *dev, struct qedr_qp *qp,
4165                                 struct qedr_cq *cq, struct ib_wc *wc,
4166                                 struct rdma_cqe_responder *resp)
4167 {
4168         struct qedr_srq *srq = qp->srq;
4169         u64 wr_id;
4170
4171         wr_id = HILO_GEN(le32_to_cpu(resp->srq_wr_id.hi),
4172                          le32_to_cpu(resp->srq_wr_id.lo), u64);
4173
4174         if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) {
4175                 wc->status = IB_WC_WR_FLUSH_ERR;
4176                 wc->vendor_err = 0;
4177                 wc->wr_id = wr_id;
4178                 wc->byte_len = 0;
4179                 wc->src_qp = qp->id;
4180                 wc->qp = &qp->ibqp;
4181                 wc->wr_id = wr_id;
4182         } else {
4183                 __process_resp_one(dev, qp, cq, wc, resp, wr_id);
4184         }
4185         srq->hw_srq.wr_cons_cnt++;
4186
4187         return 1;
4188 }
4189 static int process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
4190                             struct qedr_cq *cq, struct ib_wc *wc,
4191                             struct rdma_cqe_responder *resp)
4192 {
4193         u64 wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
4194
4195         __process_resp_one(dev, qp, cq, wc, resp, wr_id);
4196
4197         while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
4198                 qed_chain_consume(&qp->rq.pbl);
4199         qedr_inc_sw_cons(&qp->rq);
4200
4201         return 1;
4202 }
4203
4204 static int process_resp_flush(struct qedr_qp *qp, struct qedr_cq *cq,
4205                               int num_entries, struct ib_wc *wc, u16 hw_cons)
4206 {
4207         u16 cnt = 0;
4208
4209         while (num_entries && qp->rq.wqe_cons != hw_cons) {
4210                 /* fill WC */
4211                 wc->status = IB_WC_WR_FLUSH_ERR;
4212                 wc->vendor_err = 0;
4213                 wc->wc_flags = 0;
4214                 wc->src_qp = qp->id;
4215                 wc->byte_len = 0;
4216                 wc->wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
4217                 wc->qp = &qp->ibqp;
4218                 num_entries--;
4219                 wc++;
4220                 cnt++;
4221                 while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
4222                         qed_chain_consume(&qp->rq.pbl);
4223                 qedr_inc_sw_cons(&qp->rq);
4224         }
4225
4226         return cnt;
4227 }
4228
4229 static void try_consume_resp_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
4230                                  struct rdma_cqe_responder *resp, int *update)
4231 {
4232         if (le16_to_cpu(resp->rq_cons_or_srq_id) == qp->rq.wqe_cons) {
4233                 consume_cqe(cq);
4234                 *update |= 1;
4235         }
4236 }
4237
4238 static int qedr_poll_cq_resp_srq(struct qedr_dev *dev, struct qedr_qp *qp,
4239                                  struct qedr_cq *cq, int num_entries,
4240                                  struct ib_wc *wc,
4241                                  struct rdma_cqe_responder *resp)
4242 {
4243         int cnt;
4244
4245         cnt = process_resp_one_srq(dev, qp, cq, wc, resp);
4246         consume_cqe(cq);
4247
4248         return cnt;
4249 }
4250
4251 static int qedr_poll_cq_resp(struct qedr_dev *dev, struct qedr_qp *qp,
4252                              struct qedr_cq *cq, int num_entries,
4253                              struct ib_wc *wc, struct rdma_cqe_responder *resp,
4254                              int *update)
4255 {
4256         int cnt;
4257
4258         if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) {
4259                 cnt = process_resp_flush(qp, cq, num_entries, wc,
4260                                          resp->rq_cons_or_srq_id);
4261                 try_consume_resp_cqe(cq, qp, resp, update);
4262         } else {
4263                 cnt = process_resp_one(dev, qp, cq, wc, resp);
4264                 consume_cqe(cq);
4265                 *update |= 1;
4266         }
4267
4268         return cnt;
4269 }
4270
4271 static void try_consume_req_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
4272                                 struct rdma_cqe_requester *req, int *update)
4273 {
4274         if (le16_to_cpu(req->sq_cons) == qp->sq.wqe_cons) {
4275                 consume_cqe(cq);
4276                 *update |= 1;
4277         }
4278 }
4279
4280 int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
4281 {
4282         struct qedr_dev *dev = get_qedr_dev(ibcq->device);
4283         struct qedr_cq *cq = get_qedr_cq(ibcq);
4284         union rdma_cqe *cqe;
4285         u32 old_cons, new_cons;
4286         unsigned long flags;
4287         int update = 0;
4288         int done = 0;
4289
4290         if (cq->destroyed) {
4291                 DP_ERR(dev,
4292                        "warning: poll was invoked after destroy for cq %p (icid=%d)\n",
4293                        cq, cq->icid);
4294                 return 0;
4295         }
4296
4297         if (cq->cq_type == QEDR_CQ_TYPE_GSI)
4298                 return qedr_gsi_poll_cq(ibcq, num_entries, wc);
4299
4300         spin_lock_irqsave(&cq->cq_lock, flags);
4301         cqe = cq->latest_cqe;
4302         old_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
4303         while (num_entries && is_valid_cqe(cq, cqe)) {
4304                 struct qedr_qp *qp;
4305                 int cnt = 0;
4306
4307                 /* prevent speculative reads of any field of CQE */
4308                 rmb();
4309
4310                 qp = cqe_get_qp(cqe);
4311                 if (!qp) {
4312                         WARN(1, "Error: CQE QP pointer is NULL. CQE=%p\n", cqe);
4313                         break;
4314                 }
4315
4316                 wc->qp = &qp->ibqp;
4317
4318                 switch (cqe_get_type(cqe)) {
4319                 case RDMA_CQE_TYPE_REQUESTER:
4320                         cnt = qedr_poll_cq_req(dev, qp, cq, num_entries, wc,
4321                                                &cqe->req);
4322                         try_consume_req_cqe(cq, qp, &cqe->req, &update);
4323                         break;
4324                 case RDMA_CQE_TYPE_RESPONDER_RQ:
4325                         cnt = qedr_poll_cq_resp(dev, qp, cq, num_entries, wc,
4326                                                 &cqe->resp, &update);
4327                         break;
4328                 case RDMA_CQE_TYPE_RESPONDER_SRQ:
4329                         cnt = qedr_poll_cq_resp_srq(dev, qp, cq, num_entries,
4330                                                     wc, &cqe->resp);
4331                         update = 1;
4332                         break;
4333                 case RDMA_CQE_TYPE_INVALID:
4334                 default:
4335                         DP_ERR(dev, "Error: invalid CQE type = %d\n",
4336                                cqe_get_type(cqe));
4337                 }
4338                 num_entries -= cnt;
4339                 wc += cnt;
4340                 done += cnt;
4341
4342                 cqe = get_cqe(cq);
4343         }
4344         new_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
4345
4346         cq->cq_cons += new_cons - old_cons;
4347
4348         if (update)
4349                 /* doorbell notifies abount latest VALID entry,
4350                  * but chain already point to the next INVALID one
4351                  */
4352                 doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
4353
4354         spin_unlock_irqrestore(&cq->cq_lock, flags);
4355         return done;
4356 }
4357
4358 int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags,
4359                      u8 port_num, const struct ib_wc *in_wc,
4360                      const struct ib_grh *in_grh, const struct ib_mad *in,
4361                      struct ib_mad *out_mad, size_t *out_mad_size,
4362                      u16 *out_mad_pkey_index)
4363 {
4364         return IB_MAD_RESULT_SUCCESS;
4365 }