Merge tag 'for-5.9-rc7-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave...
[linux-2.6-microblaze.git] / drivers / infiniband / hw / efa / efa_verbs.c
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3  * Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
4  */
5
6 #include <linux/vmalloc.h>
7
8 #include <rdma/ib_addr.h>
9 #include <rdma/ib_umem.h>
10 #include <rdma/ib_user_verbs.h>
11 #include <rdma/ib_verbs.h>
12 #include <rdma/uverbs_ioctl.h>
13
14 #include "efa.h"
15
16 enum {
17         EFA_MMAP_DMA_PAGE = 0,
18         EFA_MMAP_IO_WC,
19         EFA_MMAP_IO_NC,
20 };
21
22 #define EFA_AENQ_ENABLED_GROUPS \
23         (BIT(EFA_ADMIN_FATAL_ERROR) | BIT(EFA_ADMIN_WARNING) | \
24          BIT(EFA_ADMIN_NOTIFICATION) | BIT(EFA_ADMIN_KEEP_ALIVE))
25
26 struct efa_user_mmap_entry {
27         struct rdma_user_mmap_entry rdma_entry;
28         u64 address;
29         u8 mmap_flag;
30 };
31
32 #define EFA_DEFINE_STATS(op) \
33         op(EFA_TX_BYTES, "tx_bytes") \
34         op(EFA_TX_PKTS, "tx_pkts") \
35         op(EFA_RX_BYTES, "rx_bytes") \
36         op(EFA_RX_PKTS, "rx_pkts") \
37         op(EFA_RX_DROPS, "rx_drops") \
38         op(EFA_SUBMITTED_CMDS, "submitted_cmds") \
39         op(EFA_COMPLETED_CMDS, "completed_cmds") \
40         op(EFA_CMDS_ERR, "cmds_err") \
41         op(EFA_NO_COMPLETION_CMDS, "no_completion_cmds") \
42         op(EFA_KEEP_ALIVE_RCVD, "keep_alive_rcvd") \
43         op(EFA_ALLOC_PD_ERR, "alloc_pd_err") \
44         op(EFA_CREATE_QP_ERR, "create_qp_err") \
45         op(EFA_CREATE_CQ_ERR, "create_cq_err") \
46         op(EFA_REG_MR_ERR, "reg_mr_err") \
47         op(EFA_ALLOC_UCONTEXT_ERR, "alloc_ucontext_err") \
48         op(EFA_CREATE_AH_ERR, "create_ah_err") \
49         op(EFA_MMAP_ERR, "mmap_err")
50
51 #define EFA_STATS_ENUM(ename, name) ename,
52 #define EFA_STATS_STR(ename, name) [ename] = name,
53
54 enum efa_hw_stats {
55         EFA_DEFINE_STATS(EFA_STATS_ENUM)
56 };
57
58 static const char *const efa_stats_names[] = {
59         EFA_DEFINE_STATS(EFA_STATS_STR)
60 };
61
62 #define EFA_CHUNK_PAYLOAD_SHIFT       12
63 #define EFA_CHUNK_PAYLOAD_SIZE        BIT(EFA_CHUNK_PAYLOAD_SHIFT)
64 #define EFA_CHUNK_PAYLOAD_PTR_SIZE    8
65
66 #define EFA_CHUNK_SHIFT               12
67 #define EFA_CHUNK_SIZE                BIT(EFA_CHUNK_SHIFT)
68 #define EFA_CHUNK_PTR_SIZE            sizeof(struct efa_com_ctrl_buff_info)
69
70 #define EFA_PTRS_PER_CHUNK \
71         ((EFA_CHUNK_SIZE - EFA_CHUNK_PTR_SIZE) / EFA_CHUNK_PAYLOAD_PTR_SIZE)
72
73 #define EFA_CHUNK_USED_SIZE \
74         ((EFA_PTRS_PER_CHUNK * EFA_CHUNK_PAYLOAD_PTR_SIZE) + EFA_CHUNK_PTR_SIZE)
75
76 struct pbl_chunk {
77         dma_addr_t dma_addr;
78         u64 *buf;
79         u32 length;
80 };
81
82 struct pbl_chunk_list {
83         struct pbl_chunk *chunks;
84         unsigned int size;
85 };
86
87 struct pbl_context {
88         union {
89                 struct {
90                         dma_addr_t dma_addr;
91                 } continuous;
92                 struct {
93                         u32 pbl_buf_size_in_pages;
94                         struct scatterlist *sgl;
95                         int sg_dma_cnt;
96                         struct pbl_chunk_list chunk_list;
97                 } indirect;
98         } phys;
99         u64 *pbl_buf;
100         u32 pbl_buf_size_in_bytes;
101         u8 physically_continuous;
102 };
103
104 static inline struct efa_dev *to_edev(struct ib_device *ibdev)
105 {
106         return container_of(ibdev, struct efa_dev, ibdev);
107 }
108
109 static inline struct efa_ucontext *to_eucontext(struct ib_ucontext *ibucontext)
110 {
111         return container_of(ibucontext, struct efa_ucontext, ibucontext);
112 }
113
114 static inline struct efa_pd *to_epd(struct ib_pd *ibpd)
115 {
116         return container_of(ibpd, struct efa_pd, ibpd);
117 }
118
119 static inline struct efa_mr *to_emr(struct ib_mr *ibmr)
120 {
121         return container_of(ibmr, struct efa_mr, ibmr);
122 }
123
124 static inline struct efa_qp *to_eqp(struct ib_qp *ibqp)
125 {
126         return container_of(ibqp, struct efa_qp, ibqp);
127 }
128
129 static inline struct efa_cq *to_ecq(struct ib_cq *ibcq)
130 {
131         return container_of(ibcq, struct efa_cq, ibcq);
132 }
133
134 static inline struct efa_ah *to_eah(struct ib_ah *ibah)
135 {
136         return container_of(ibah, struct efa_ah, ibah);
137 }
138
139 static inline struct efa_user_mmap_entry *
140 to_emmap(struct rdma_user_mmap_entry *rdma_entry)
141 {
142         return container_of(rdma_entry, struct efa_user_mmap_entry, rdma_entry);
143 }
144
145 static inline bool is_rdma_read_cap(struct efa_dev *dev)
146 {
147         return dev->dev_attr.device_caps & EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_RDMA_READ_MASK;
148 }
149
150 #define is_reserved_cleared(reserved) \
151         !memchr_inv(reserved, 0, sizeof(reserved))
152
153 static void *efa_zalloc_mapped(struct efa_dev *dev, dma_addr_t *dma_addr,
154                                size_t size, enum dma_data_direction dir)
155 {
156         void *addr;
157
158         addr = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO);
159         if (!addr)
160                 return NULL;
161
162         *dma_addr = dma_map_single(&dev->pdev->dev, addr, size, dir);
163         if (dma_mapping_error(&dev->pdev->dev, *dma_addr)) {
164                 ibdev_err(&dev->ibdev, "Failed to map DMA address\n");
165                 free_pages_exact(addr, size);
166                 return NULL;
167         }
168
169         return addr;
170 }
171
172 static void efa_free_mapped(struct efa_dev *dev, void *cpu_addr,
173                             dma_addr_t dma_addr,
174                             size_t size, enum dma_data_direction dir)
175 {
176         dma_unmap_single(&dev->pdev->dev, dma_addr, size, dir);
177         free_pages_exact(cpu_addr, size);
178 }
179
180 int efa_query_device(struct ib_device *ibdev,
181                      struct ib_device_attr *props,
182                      struct ib_udata *udata)
183 {
184         struct efa_com_get_device_attr_result *dev_attr;
185         struct efa_ibv_ex_query_device_resp resp = {};
186         struct efa_dev *dev = to_edev(ibdev);
187         int err;
188
189         if (udata && udata->inlen &&
190             !ib_is_udata_cleared(udata, 0, udata->inlen)) {
191                 ibdev_dbg(ibdev,
192                           "Incompatible ABI params, udata not cleared\n");
193                 return -EINVAL;
194         }
195
196         dev_attr = &dev->dev_attr;
197
198         memset(props, 0, sizeof(*props));
199         props->max_mr_size = dev_attr->max_mr_pages * PAGE_SIZE;
200         props->page_size_cap = dev_attr->page_size_cap;
201         props->vendor_id = dev->pdev->vendor;
202         props->vendor_part_id = dev->pdev->device;
203         props->hw_ver = dev->pdev->subsystem_device;
204         props->max_qp = dev_attr->max_qp;
205         props->max_cq = dev_attr->max_cq;
206         props->max_pd = dev_attr->max_pd;
207         props->max_mr = dev_attr->max_mr;
208         props->max_ah = dev_attr->max_ah;
209         props->max_cqe = dev_attr->max_cq_depth;
210         props->max_qp_wr = min_t(u32, dev_attr->max_sq_depth,
211                                  dev_attr->max_rq_depth);
212         props->max_send_sge = dev_attr->max_sq_sge;
213         props->max_recv_sge = dev_attr->max_rq_sge;
214         props->max_sge_rd = dev_attr->max_wr_rdma_sge;
215         props->max_pkeys = 1;
216
217         if (udata && udata->outlen) {
218                 resp.max_sq_sge = dev_attr->max_sq_sge;
219                 resp.max_rq_sge = dev_attr->max_rq_sge;
220                 resp.max_sq_wr = dev_attr->max_sq_depth;
221                 resp.max_rq_wr = dev_attr->max_rq_depth;
222                 resp.max_rdma_size = dev_attr->max_rdma_size;
223
224                 if (is_rdma_read_cap(dev))
225                         resp.device_caps |= EFA_QUERY_DEVICE_CAPS_RDMA_READ;
226
227                 err = ib_copy_to_udata(udata, &resp,
228                                        min(sizeof(resp), udata->outlen));
229                 if (err) {
230                         ibdev_dbg(ibdev,
231                                   "Failed to copy udata for query_device\n");
232                         return err;
233                 }
234         }
235
236         return 0;
237 }
238
239 int efa_query_port(struct ib_device *ibdev, u8 port,
240                    struct ib_port_attr *props)
241 {
242         struct efa_dev *dev = to_edev(ibdev);
243
244         props->lmc = 1;
245
246         props->state = IB_PORT_ACTIVE;
247         props->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
248         props->gid_tbl_len = 1;
249         props->pkey_tbl_len = 1;
250         props->active_speed = IB_SPEED_EDR;
251         props->active_width = IB_WIDTH_4X;
252         props->max_mtu = ib_mtu_int_to_enum(dev->dev_attr.mtu);
253         props->active_mtu = ib_mtu_int_to_enum(dev->dev_attr.mtu);
254         props->max_msg_sz = dev->dev_attr.mtu;
255         props->max_vl_num = 1;
256
257         return 0;
258 }
259
260 int efa_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
261                  int qp_attr_mask,
262                  struct ib_qp_init_attr *qp_init_attr)
263 {
264         struct efa_dev *dev = to_edev(ibqp->device);
265         struct efa_com_query_qp_params params = {};
266         struct efa_com_query_qp_result result;
267         struct efa_qp *qp = to_eqp(ibqp);
268         int err;
269
270 #define EFA_QUERY_QP_SUPP_MASK \
271         (IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT | \
272          IB_QP_QKEY | IB_QP_SQ_PSN | IB_QP_CAP)
273
274         if (qp_attr_mask & ~EFA_QUERY_QP_SUPP_MASK) {
275                 ibdev_dbg(&dev->ibdev,
276                           "Unsupported qp_attr_mask[%#x] supported[%#x]\n",
277                           qp_attr_mask, EFA_QUERY_QP_SUPP_MASK);
278                 return -EOPNOTSUPP;
279         }
280
281         memset(qp_attr, 0, sizeof(*qp_attr));
282         memset(qp_init_attr, 0, sizeof(*qp_init_attr));
283
284         params.qp_handle = qp->qp_handle;
285         err = efa_com_query_qp(&dev->edev, &params, &result);
286         if (err)
287                 return err;
288
289         qp_attr->qp_state = result.qp_state;
290         qp_attr->qkey = result.qkey;
291         qp_attr->sq_psn = result.sq_psn;
292         qp_attr->sq_draining = result.sq_draining;
293         qp_attr->port_num = 1;
294
295         qp_attr->cap.max_send_wr = qp->max_send_wr;
296         qp_attr->cap.max_recv_wr = qp->max_recv_wr;
297         qp_attr->cap.max_send_sge = qp->max_send_sge;
298         qp_attr->cap.max_recv_sge = qp->max_recv_sge;
299         qp_attr->cap.max_inline_data = qp->max_inline_data;
300
301         qp_init_attr->qp_type = ibqp->qp_type;
302         qp_init_attr->recv_cq = ibqp->recv_cq;
303         qp_init_attr->send_cq = ibqp->send_cq;
304         qp_init_attr->qp_context = ibqp->qp_context;
305         qp_init_attr->cap = qp_attr->cap;
306
307         return 0;
308 }
309
310 int efa_query_gid(struct ib_device *ibdev, u8 port, int index,
311                   union ib_gid *gid)
312 {
313         struct efa_dev *dev = to_edev(ibdev);
314
315         memcpy(gid->raw, dev->dev_attr.addr, sizeof(dev->dev_attr.addr));
316
317         return 0;
318 }
319
320 int efa_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
321                    u16 *pkey)
322 {
323         if (index > 0)
324                 return -EINVAL;
325
326         *pkey = 0xffff;
327         return 0;
328 }
329
330 static int efa_pd_dealloc(struct efa_dev *dev, u16 pdn)
331 {
332         struct efa_com_dealloc_pd_params params = {
333                 .pdn = pdn,
334         };
335
336         return efa_com_dealloc_pd(&dev->edev, &params);
337 }
338
339 int efa_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
340 {
341         struct efa_dev *dev = to_edev(ibpd->device);
342         struct efa_ibv_alloc_pd_resp resp = {};
343         struct efa_com_alloc_pd_result result;
344         struct efa_pd *pd = to_epd(ibpd);
345         int err;
346
347         if (udata->inlen &&
348             !ib_is_udata_cleared(udata, 0, udata->inlen)) {
349                 ibdev_dbg(&dev->ibdev,
350                           "Incompatible ABI params, udata not cleared\n");
351                 err = -EINVAL;
352                 goto err_out;
353         }
354
355         err = efa_com_alloc_pd(&dev->edev, &result);
356         if (err)
357                 goto err_out;
358
359         pd->pdn = result.pdn;
360         resp.pdn = result.pdn;
361
362         if (udata->outlen) {
363                 err = ib_copy_to_udata(udata, &resp,
364                                        min(sizeof(resp), udata->outlen));
365                 if (err) {
366                         ibdev_dbg(&dev->ibdev,
367                                   "Failed to copy udata for alloc_pd\n");
368                         goto err_dealloc_pd;
369                 }
370         }
371
372         ibdev_dbg(&dev->ibdev, "Allocated pd[%d]\n", pd->pdn);
373
374         return 0;
375
376 err_dealloc_pd:
377         efa_pd_dealloc(dev, result.pdn);
378 err_out:
379         atomic64_inc(&dev->stats.sw_stats.alloc_pd_err);
380         return err;
381 }
382
383 void efa_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
384 {
385         struct efa_dev *dev = to_edev(ibpd->device);
386         struct efa_pd *pd = to_epd(ibpd);
387
388         ibdev_dbg(&dev->ibdev, "Dealloc pd[%d]\n", pd->pdn);
389         efa_pd_dealloc(dev, pd->pdn);
390 }
391
392 static int efa_destroy_qp_handle(struct efa_dev *dev, u32 qp_handle)
393 {
394         struct efa_com_destroy_qp_params params = { .qp_handle = qp_handle };
395
396         return efa_com_destroy_qp(&dev->edev, &params);
397 }
398
399 static void efa_qp_user_mmap_entries_remove(struct efa_qp *qp)
400 {
401         rdma_user_mmap_entry_remove(qp->rq_mmap_entry);
402         rdma_user_mmap_entry_remove(qp->rq_db_mmap_entry);
403         rdma_user_mmap_entry_remove(qp->llq_desc_mmap_entry);
404         rdma_user_mmap_entry_remove(qp->sq_db_mmap_entry);
405 }
406
407 int efa_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
408 {
409         struct efa_dev *dev = to_edev(ibqp->pd->device);
410         struct efa_qp *qp = to_eqp(ibqp);
411         int err;
412
413         ibdev_dbg(&dev->ibdev, "Destroy qp[%u]\n", ibqp->qp_num);
414
415         efa_qp_user_mmap_entries_remove(qp);
416
417         err = efa_destroy_qp_handle(dev, qp->qp_handle);
418         if (err)
419                 return err;
420
421         if (qp->rq_cpu_addr) {
422                 ibdev_dbg(&dev->ibdev,
423                           "qp->cpu_addr[0x%p] freed: size[%lu], dma[%pad]\n",
424                           qp->rq_cpu_addr, qp->rq_size,
425                           &qp->rq_dma_addr);
426                 efa_free_mapped(dev, qp->rq_cpu_addr, qp->rq_dma_addr,
427                                 qp->rq_size, DMA_TO_DEVICE);
428         }
429
430         kfree(qp);
431         return 0;
432 }
433
434 static struct rdma_user_mmap_entry*
435 efa_user_mmap_entry_insert(struct ib_ucontext *ucontext,
436                            u64 address, size_t length,
437                            u8 mmap_flag, u64 *offset)
438 {
439         struct efa_user_mmap_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
440         int err;
441
442         if (!entry)
443                 return NULL;
444
445         entry->address = address;
446         entry->mmap_flag = mmap_flag;
447
448         err = rdma_user_mmap_entry_insert(ucontext, &entry->rdma_entry,
449                                           length);
450         if (err) {
451                 kfree(entry);
452                 return NULL;
453         }
454         *offset = rdma_user_mmap_get_offset(&entry->rdma_entry);
455
456         return &entry->rdma_entry;
457 }
458
459 static int qp_mmap_entries_setup(struct efa_qp *qp,
460                                  struct efa_dev *dev,
461                                  struct efa_ucontext *ucontext,
462                                  struct efa_com_create_qp_params *params,
463                                  struct efa_ibv_create_qp_resp *resp)
464 {
465         size_t length;
466         u64 address;
467
468         address = dev->db_bar_addr + resp->sq_db_offset;
469         qp->sq_db_mmap_entry =
470                 efa_user_mmap_entry_insert(&ucontext->ibucontext,
471                                            address,
472                                            PAGE_SIZE, EFA_MMAP_IO_NC,
473                                            &resp->sq_db_mmap_key);
474         if (!qp->sq_db_mmap_entry)
475                 return -ENOMEM;
476
477         resp->sq_db_offset &= ~PAGE_MASK;
478
479         address = dev->mem_bar_addr + resp->llq_desc_offset;
480         length = PAGE_ALIGN(params->sq_ring_size_in_bytes +
481                             (resp->llq_desc_offset & ~PAGE_MASK));
482
483         qp->llq_desc_mmap_entry =
484                 efa_user_mmap_entry_insert(&ucontext->ibucontext,
485                                            address, length,
486                                            EFA_MMAP_IO_WC,
487                                            &resp->llq_desc_mmap_key);
488         if (!qp->llq_desc_mmap_entry)
489                 goto err_remove_mmap;
490
491         resp->llq_desc_offset &= ~PAGE_MASK;
492
493         if (qp->rq_size) {
494                 address = dev->db_bar_addr + resp->rq_db_offset;
495
496                 qp->rq_db_mmap_entry =
497                         efa_user_mmap_entry_insert(&ucontext->ibucontext,
498                                                    address, PAGE_SIZE,
499                                                    EFA_MMAP_IO_NC,
500                                                    &resp->rq_db_mmap_key);
501                 if (!qp->rq_db_mmap_entry)
502                         goto err_remove_mmap;
503
504                 resp->rq_db_offset &= ~PAGE_MASK;
505
506                 address = virt_to_phys(qp->rq_cpu_addr);
507                 qp->rq_mmap_entry =
508                         efa_user_mmap_entry_insert(&ucontext->ibucontext,
509                                                    address, qp->rq_size,
510                                                    EFA_MMAP_DMA_PAGE,
511                                                    &resp->rq_mmap_key);
512                 if (!qp->rq_mmap_entry)
513                         goto err_remove_mmap;
514
515                 resp->rq_mmap_size = qp->rq_size;
516         }
517
518         return 0;
519
520 err_remove_mmap:
521         efa_qp_user_mmap_entries_remove(qp);
522
523         return -ENOMEM;
524 }
525
526 static int efa_qp_validate_cap(struct efa_dev *dev,
527                                struct ib_qp_init_attr *init_attr)
528 {
529         if (init_attr->cap.max_send_wr > dev->dev_attr.max_sq_depth) {
530                 ibdev_dbg(&dev->ibdev,
531                           "qp: requested send wr[%u] exceeds the max[%u]\n",
532                           init_attr->cap.max_send_wr,
533                           dev->dev_attr.max_sq_depth);
534                 return -EINVAL;
535         }
536         if (init_attr->cap.max_recv_wr > dev->dev_attr.max_rq_depth) {
537                 ibdev_dbg(&dev->ibdev,
538                           "qp: requested receive wr[%u] exceeds the max[%u]\n",
539                           init_attr->cap.max_recv_wr,
540                           dev->dev_attr.max_rq_depth);
541                 return -EINVAL;
542         }
543         if (init_attr->cap.max_send_sge > dev->dev_attr.max_sq_sge) {
544                 ibdev_dbg(&dev->ibdev,
545                           "qp: requested sge send[%u] exceeds the max[%u]\n",
546                           init_attr->cap.max_send_sge, dev->dev_attr.max_sq_sge);
547                 return -EINVAL;
548         }
549         if (init_attr->cap.max_recv_sge > dev->dev_attr.max_rq_sge) {
550                 ibdev_dbg(&dev->ibdev,
551                           "qp: requested sge recv[%u] exceeds the max[%u]\n",
552                           init_attr->cap.max_recv_sge, dev->dev_attr.max_rq_sge);
553                 return -EINVAL;
554         }
555         if (init_attr->cap.max_inline_data > dev->dev_attr.inline_buf_size) {
556                 ibdev_dbg(&dev->ibdev,
557                           "qp: requested inline data[%u] exceeds the max[%u]\n",
558                           init_attr->cap.max_inline_data,
559                           dev->dev_attr.inline_buf_size);
560                 return -EINVAL;
561         }
562
563         return 0;
564 }
565
566 static int efa_qp_validate_attr(struct efa_dev *dev,
567                                 struct ib_qp_init_attr *init_attr)
568 {
569         if (init_attr->qp_type != IB_QPT_DRIVER &&
570             init_attr->qp_type != IB_QPT_UD) {
571                 ibdev_dbg(&dev->ibdev,
572                           "Unsupported qp type %d\n", init_attr->qp_type);
573                 return -EOPNOTSUPP;
574         }
575
576         if (init_attr->srq) {
577                 ibdev_dbg(&dev->ibdev, "SRQ is not supported\n");
578                 return -EOPNOTSUPP;
579         }
580
581         if (init_attr->create_flags) {
582                 ibdev_dbg(&dev->ibdev, "Unsupported create flags\n");
583                 return -EOPNOTSUPP;
584         }
585
586         return 0;
587 }
588
589 struct ib_qp *efa_create_qp(struct ib_pd *ibpd,
590                             struct ib_qp_init_attr *init_attr,
591                             struct ib_udata *udata)
592 {
593         struct efa_com_create_qp_params create_qp_params = {};
594         struct efa_com_create_qp_result create_qp_resp;
595         struct efa_dev *dev = to_edev(ibpd->device);
596         struct efa_ibv_create_qp_resp resp = {};
597         struct efa_ibv_create_qp cmd = {};
598         struct efa_ucontext *ucontext;
599         struct efa_qp *qp;
600         int err;
601
602         ucontext = rdma_udata_to_drv_context(udata, struct efa_ucontext,
603                                              ibucontext);
604
605         err = efa_qp_validate_cap(dev, init_attr);
606         if (err)
607                 goto err_out;
608
609         err = efa_qp_validate_attr(dev, init_attr);
610         if (err)
611                 goto err_out;
612
613         if (offsetofend(typeof(cmd), driver_qp_type) > udata->inlen) {
614                 ibdev_dbg(&dev->ibdev,
615                           "Incompatible ABI params, no input udata\n");
616                 err = -EINVAL;
617                 goto err_out;
618         }
619
620         if (udata->inlen > sizeof(cmd) &&
621             !ib_is_udata_cleared(udata, sizeof(cmd),
622                                  udata->inlen - sizeof(cmd))) {
623                 ibdev_dbg(&dev->ibdev,
624                           "Incompatible ABI params, unknown fields in udata\n");
625                 err = -EINVAL;
626                 goto err_out;
627         }
628
629         err = ib_copy_from_udata(&cmd, udata,
630                                  min(sizeof(cmd), udata->inlen));
631         if (err) {
632                 ibdev_dbg(&dev->ibdev,
633                           "Cannot copy udata for create_qp\n");
634                 goto err_out;
635         }
636
637         if (cmd.comp_mask) {
638                 ibdev_dbg(&dev->ibdev,
639                           "Incompatible ABI params, unknown fields in udata\n");
640                 err = -EINVAL;
641                 goto err_out;
642         }
643
644         qp = kzalloc(sizeof(*qp), GFP_KERNEL);
645         if (!qp) {
646                 err = -ENOMEM;
647                 goto err_out;
648         }
649
650         create_qp_params.uarn = ucontext->uarn;
651         create_qp_params.pd = to_epd(ibpd)->pdn;
652
653         if (init_attr->qp_type == IB_QPT_UD) {
654                 create_qp_params.qp_type = EFA_ADMIN_QP_TYPE_UD;
655         } else if (cmd.driver_qp_type == EFA_QP_DRIVER_TYPE_SRD) {
656                 create_qp_params.qp_type = EFA_ADMIN_QP_TYPE_SRD;
657         } else {
658                 ibdev_dbg(&dev->ibdev,
659                           "Unsupported qp type %d driver qp type %d\n",
660                           init_attr->qp_type, cmd.driver_qp_type);
661                 err = -EOPNOTSUPP;
662                 goto err_free_qp;
663         }
664
665         ibdev_dbg(&dev->ibdev, "Create QP: qp type %d driver qp type %#x\n",
666                   init_attr->qp_type, cmd.driver_qp_type);
667         create_qp_params.send_cq_idx = to_ecq(init_attr->send_cq)->cq_idx;
668         create_qp_params.recv_cq_idx = to_ecq(init_attr->recv_cq)->cq_idx;
669         create_qp_params.sq_depth = init_attr->cap.max_send_wr;
670         create_qp_params.sq_ring_size_in_bytes = cmd.sq_ring_size;
671
672         create_qp_params.rq_depth = init_attr->cap.max_recv_wr;
673         create_qp_params.rq_ring_size_in_bytes = cmd.rq_ring_size;
674         qp->rq_size = PAGE_ALIGN(create_qp_params.rq_ring_size_in_bytes);
675         if (qp->rq_size) {
676                 qp->rq_cpu_addr = efa_zalloc_mapped(dev, &qp->rq_dma_addr,
677                                                     qp->rq_size, DMA_TO_DEVICE);
678                 if (!qp->rq_cpu_addr) {
679                         err = -ENOMEM;
680                         goto err_free_qp;
681                 }
682
683                 ibdev_dbg(&dev->ibdev,
684                           "qp->cpu_addr[0x%p] allocated: size[%lu], dma[%pad]\n",
685                           qp->rq_cpu_addr, qp->rq_size, &qp->rq_dma_addr);
686                 create_qp_params.rq_base_addr = qp->rq_dma_addr;
687         }
688
689         err = efa_com_create_qp(&dev->edev, &create_qp_params,
690                                 &create_qp_resp);
691         if (err)
692                 goto err_free_mapped;
693
694         resp.sq_db_offset = create_qp_resp.sq_db_offset;
695         resp.rq_db_offset = create_qp_resp.rq_db_offset;
696         resp.llq_desc_offset = create_qp_resp.llq_descriptors_offset;
697         resp.send_sub_cq_idx = create_qp_resp.send_sub_cq_idx;
698         resp.recv_sub_cq_idx = create_qp_resp.recv_sub_cq_idx;
699
700         err = qp_mmap_entries_setup(qp, dev, ucontext, &create_qp_params,
701                                     &resp);
702         if (err)
703                 goto err_destroy_qp;
704
705         qp->qp_handle = create_qp_resp.qp_handle;
706         qp->ibqp.qp_num = create_qp_resp.qp_num;
707         qp->ibqp.qp_type = init_attr->qp_type;
708         qp->max_send_wr = init_attr->cap.max_send_wr;
709         qp->max_recv_wr = init_attr->cap.max_recv_wr;
710         qp->max_send_sge = init_attr->cap.max_send_sge;
711         qp->max_recv_sge = init_attr->cap.max_recv_sge;
712         qp->max_inline_data = init_attr->cap.max_inline_data;
713
714         if (udata->outlen) {
715                 err = ib_copy_to_udata(udata, &resp,
716                                        min(sizeof(resp), udata->outlen));
717                 if (err) {
718                         ibdev_dbg(&dev->ibdev,
719                                   "Failed to copy udata for qp[%u]\n",
720                                   create_qp_resp.qp_num);
721                         goto err_remove_mmap_entries;
722                 }
723         }
724
725         ibdev_dbg(&dev->ibdev, "Created qp[%d]\n", qp->ibqp.qp_num);
726
727         return &qp->ibqp;
728
729 err_remove_mmap_entries:
730         efa_qp_user_mmap_entries_remove(qp);
731 err_destroy_qp:
732         efa_destroy_qp_handle(dev, create_qp_resp.qp_handle);
733 err_free_mapped:
734         if (qp->rq_size)
735                 efa_free_mapped(dev, qp->rq_cpu_addr, qp->rq_dma_addr,
736                                 qp->rq_size, DMA_TO_DEVICE);
737 err_free_qp:
738         kfree(qp);
739 err_out:
740         atomic64_inc(&dev->stats.sw_stats.create_qp_err);
741         return ERR_PTR(err);
742 }
743
744 static int efa_modify_qp_validate(struct efa_dev *dev, struct efa_qp *qp,
745                                   struct ib_qp_attr *qp_attr, int qp_attr_mask,
746                                   enum ib_qp_state cur_state,
747                                   enum ib_qp_state new_state)
748 {
749 #define EFA_MODIFY_QP_SUPP_MASK \
750         (IB_QP_STATE | IB_QP_CUR_STATE | IB_QP_EN_SQD_ASYNC_NOTIFY | \
751          IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_QKEY | IB_QP_SQ_PSN)
752
753         if (qp_attr_mask & ~EFA_MODIFY_QP_SUPP_MASK) {
754                 ibdev_dbg(&dev->ibdev,
755                           "Unsupported qp_attr_mask[%#x] supported[%#x]\n",
756                           qp_attr_mask, EFA_MODIFY_QP_SUPP_MASK);
757                 return -EOPNOTSUPP;
758         }
759
760         if (!ib_modify_qp_is_ok(cur_state, new_state, IB_QPT_UD,
761                                 qp_attr_mask)) {
762                 ibdev_dbg(&dev->ibdev, "Invalid modify QP parameters\n");
763                 return -EINVAL;
764         }
765
766         if ((qp_attr_mask & IB_QP_PORT) && qp_attr->port_num != 1) {
767                 ibdev_dbg(&dev->ibdev, "Can't change port num\n");
768                 return -EOPNOTSUPP;
769         }
770
771         if ((qp_attr_mask & IB_QP_PKEY_INDEX) && qp_attr->pkey_index) {
772                 ibdev_dbg(&dev->ibdev, "Can't change pkey index\n");
773                 return -EOPNOTSUPP;
774         }
775
776         return 0;
777 }
778
779 int efa_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
780                   int qp_attr_mask, struct ib_udata *udata)
781 {
782         struct efa_dev *dev = to_edev(ibqp->device);
783         struct efa_com_modify_qp_params params = {};
784         struct efa_qp *qp = to_eqp(ibqp);
785         enum ib_qp_state cur_state;
786         enum ib_qp_state new_state;
787         int err;
788
789         if (udata->inlen &&
790             !ib_is_udata_cleared(udata, 0, udata->inlen)) {
791                 ibdev_dbg(&dev->ibdev,
792                           "Incompatible ABI params, udata not cleared\n");
793                 return -EINVAL;
794         }
795
796         cur_state = qp_attr_mask & IB_QP_CUR_STATE ? qp_attr->cur_qp_state :
797                                                      qp->state;
798         new_state = qp_attr_mask & IB_QP_STATE ? qp_attr->qp_state : cur_state;
799
800         err = efa_modify_qp_validate(dev, qp, qp_attr, qp_attr_mask, cur_state,
801                                      new_state);
802         if (err)
803                 return err;
804
805         params.qp_handle = qp->qp_handle;
806
807         if (qp_attr_mask & IB_QP_STATE) {
808                 params.modify_mask |= BIT(EFA_ADMIN_QP_STATE_BIT) |
809                                       BIT(EFA_ADMIN_CUR_QP_STATE_BIT);
810                 params.cur_qp_state = qp_attr->cur_qp_state;
811                 params.qp_state = qp_attr->qp_state;
812         }
813
814         if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
815                 params.modify_mask |=
816                         BIT(EFA_ADMIN_SQ_DRAINED_ASYNC_NOTIFY_BIT);
817                 params.sq_drained_async_notify = qp_attr->en_sqd_async_notify;
818         }
819
820         if (qp_attr_mask & IB_QP_QKEY) {
821                 params.modify_mask |= BIT(EFA_ADMIN_QKEY_BIT);
822                 params.qkey = qp_attr->qkey;
823         }
824
825         if (qp_attr_mask & IB_QP_SQ_PSN) {
826                 params.modify_mask |= BIT(EFA_ADMIN_SQ_PSN_BIT);
827                 params.sq_psn = qp_attr->sq_psn;
828         }
829
830         err = efa_com_modify_qp(&dev->edev, &params);
831         if (err)
832                 return err;
833
834         qp->state = new_state;
835
836         return 0;
837 }
838
839 static int efa_destroy_cq_idx(struct efa_dev *dev, int cq_idx)
840 {
841         struct efa_com_destroy_cq_params params = { .cq_idx = cq_idx };
842
843         return efa_com_destroy_cq(&dev->edev, &params);
844 }
845
846 void efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
847 {
848         struct efa_dev *dev = to_edev(ibcq->device);
849         struct efa_cq *cq = to_ecq(ibcq);
850
851         ibdev_dbg(&dev->ibdev,
852                   "Destroy cq[%d] virt[0x%p] freed: size[%lu], dma[%pad]\n",
853                   cq->cq_idx, cq->cpu_addr, cq->size, &cq->dma_addr);
854
855         rdma_user_mmap_entry_remove(cq->mmap_entry);
856         efa_destroy_cq_idx(dev, cq->cq_idx);
857         efa_free_mapped(dev, cq->cpu_addr, cq->dma_addr, cq->size,
858                         DMA_FROM_DEVICE);
859 }
860
861 static int cq_mmap_entries_setup(struct efa_dev *dev, struct efa_cq *cq,
862                                  struct efa_ibv_create_cq_resp *resp)
863 {
864         resp->q_mmap_size = cq->size;
865         cq->mmap_entry = efa_user_mmap_entry_insert(&cq->ucontext->ibucontext,
866                                                     virt_to_phys(cq->cpu_addr),
867                                                     cq->size, EFA_MMAP_DMA_PAGE,
868                                                     &resp->q_mmap_key);
869         if (!cq->mmap_entry)
870                 return -ENOMEM;
871
872         return 0;
873 }
874
875 int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
876                   struct ib_udata *udata)
877 {
878         struct efa_ucontext *ucontext = rdma_udata_to_drv_context(
879                 udata, struct efa_ucontext, ibucontext);
880         struct efa_ibv_create_cq_resp resp = {};
881         struct efa_com_create_cq_params params;
882         struct efa_com_create_cq_result result;
883         struct ib_device *ibdev = ibcq->device;
884         struct efa_dev *dev = to_edev(ibdev);
885         struct efa_ibv_create_cq cmd = {};
886         struct efa_cq *cq = to_ecq(ibcq);
887         int entries = attr->cqe;
888         int err;
889
890         ibdev_dbg(ibdev, "create_cq entries %d\n", entries);
891
892         if (entries < 1 || entries > dev->dev_attr.max_cq_depth) {
893                 ibdev_dbg(ibdev,
894                           "cq: requested entries[%u] non-positive or greater than max[%u]\n",
895                           entries, dev->dev_attr.max_cq_depth);
896                 err = -EINVAL;
897                 goto err_out;
898         }
899
900         if (offsetofend(typeof(cmd), num_sub_cqs) > udata->inlen) {
901                 ibdev_dbg(ibdev,
902                           "Incompatible ABI params, no input udata\n");
903                 err = -EINVAL;
904                 goto err_out;
905         }
906
907         if (udata->inlen > sizeof(cmd) &&
908             !ib_is_udata_cleared(udata, sizeof(cmd),
909                                  udata->inlen - sizeof(cmd))) {
910                 ibdev_dbg(ibdev,
911                           "Incompatible ABI params, unknown fields in udata\n");
912                 err = -EINVAL;
913                 goto err_out;
914         }
915
916         err = ib_copy_from_udata(&cmd, udata,
917                                  min(sizeof(cmd), udata->inlen));
918         if (err) {
919                 ibdev_dbg(ibdev, "Cannot copy udata for create_cq\n");
920                 goto err_out;
921         }
922
923         if (cmd.comp_mask || !is_reserved_cleared(cmd.reserved_50)) {
924                 ibdev_dbg(ibdev,
925                           "Incompatible ABI params, unknown fields in udata\n");
926                 err = -EINVAL;
927                 goto err_out;
928         }
929
930         if (!cmd.cq_entry_size) {
931                 ibdev_dbg(ibdev,
932                           "Invalid entry size [%u]\n", cmd.cq_entry_size);
933                 err = -EINVAL;
934                 goto err_out;
935         }
936
937         if (cmd.num_sub_cqs != dev->dev_attr.sub_cqs_per_cq) {
938                 ibdev_dbg(ibdev,
939                           "Invalid number of sub cqs[%u] expected[%u]\n",
940                           cmd.num_sub_cqs, dev->dev_attr.sub_cqs_per_cq);
941                 err = -EINVAL;
942                 goto err_out;
943         }
944
945         cq->ucontext = ucontext;
946         cq->size = PAGE_ALIGN(cmd.cq_entry_size * entries * cmd.num_sub_cqs);
947         cq->cpu_addr = efa_zalloc_mapped(dev, &cq->dma_addr, cq->size,
948                                          DMA_FROM_DEVICE);
949         if (!cq->cpu_addr) {
950                 err = -ENOMEM;
951                 goto err_out;
952         }
953
954         params.uarn = cq->ucontext->uarn;
955         params.cq_depth = entries;
956         params.dma_addr = cq->dma_addr;
957         params.entry_size_in_bytes = cmd.cq_entry_size;
958         params.num_sub_cqs = cmd.num_sub_cqs;
959         err = efa_com_create_cq(&dev->edev, &params, &result);
960         if (err)
961                 goto err_free_mapped;
962
963         resp.cq_idx = result.cq_idx;
964         cq->cq_idx = result.cq_idx;
965         cq->ibcq.cqe = result.actual_depth;
966         WARN_ON_ONCE(entries != result.actual_depth);
967
968         err = cq_mmap_entries_setup(dev, cq, &resp);
969         if (err) {
970                 ibdev_dbg(ibdev, "Could not setup cq[%u] mmap entries\n",
971                           cq->cq_idx);
972                 goto err_destroy_cq;
973         }
974
975         if (udata->outlen) {
976                 err = ib_copy_to_udata(udata, &resp,
977                                        min(sizeof(resp), udata->outlen));
978                 if (err) {
979                         ibdev_dbg(ibdev,
980                                   "Failed to copy udata for create_cq\n");
981                         goto err_remove_mmap;
982                 }
983         }
984
985         ibdev_dbg(ibdev, "Created cq[%d], cq depth[%u]. dma[%pad] virt[0x%p]\n",
986                   cq->cq_idx, result.actual_depth, &cq->dma_addr, cq->cpu_addr);
987
988         return 0;
989
990 err_remove_mmap:
991         rdma_user_mmap_entry_remove(cq->mmap_entry);
992 err_destroy_cq:
993         efa_destroy_cq_idx(dev, cq->cq_idx);
994 err_free_mapped:
995         efa_free_mapped(dev, cq->cpu_addr, cq->dma_addr, cq->size,
996                         DMA_FROM_DEVICE);
997
998 err_out:
999         atomic64_inc(&dev->stats.sw_stats.create_cq_err);
1000         return err;
1001 }
1002
1003 static int umem_to_page_list(struct efa_dev *dev,
1004                              struct ib_umem *umem,
1005                              u64 *page_list,
1006                              u32 hp_cnt,
1007                              u8 hp_shift)
1008 {
1009         u32 pages_in_hp = BIT(hp_shift - PAGE_SHIFT);
1010         struct ib_block_iter biter;
1011         unsigned int hp_idx = 0;
1012
1013         ibdev_dbg(&dev->ibdev, "hp_cnt[%u], pages_in_hp[%u]\n",
1014                   hp_cnt, pages_in_hp);
1015
1016         rdma_for_each_block(umem->sg_head.sgl, &biter, umem->nmap,
1017                             BIT(hp_shift))
1018                 page_list[hp_idx++] = rdma_block_iter_dma_address(&biter);
1019
1020         return 0;
1021 }
1022
1023 static struct scatterlist *efa_vmalloc_buf_to_sg(u64 *buf, int page_cnt)
1024 {
1025         struct scatterlist *sglist;
1026         struct page *pg;
1027         int i;
1028
1029         sglist = kcalloc(page_cnt, sizeof(*sglist), GFP_KERNEL);
1030         if (!sglist)
1031                 return NULL;
1032         sg_init_table(sglist, page_cnt);
1033         for (i = 0; i < page_cnt; i++) {
1034                 pg = vmalloc_to_page(buf);
1035                 if (!pg)
1036                         goto err;
1037                 sg_set_page(&sglist[i], pg, PAGE_SIZE, 0);
1038                 buf += PAGE_SIZE / sizeof(*buf);
1039         }
1040         return sglist;
1041
1042 err:
1043         kfree(sglist);
1044         return NULL;
1045 }
1046
1047 /*
1048  * create a chunk list of physical pages dma addresses from the supplied
1049  * scatter gather list
1050  */
1051 static int pbl_chunk_list_create(struct efa_dev *dev, struct pbl_context *pbl)
1052 {
1053         struct pbl_chunk_list *chunk_list = &pbl->phys.indirect.chunk_list;
1054         int page_cnt = pbl->phys.indirect.pbl_buf_size_in_pages;
1055         struct scatterlist *pages_sgl = pbl->phys.indirect.sgl;
1056         unsigned int chunk_list_size, chunk_idx, payload_idx;
1057         int sg_dma_cnt = pbl->phys.indirect.sg_dma_cnt;
1058         struct efa_com_ctrl_buff_info *ctrl_buf;
1059         u64 *cur_chunk_buf, *prev_chunk_buf;
1060         struct ib_block_iter biter;
1061         dma_addr_t dma_addr;
1062         int i;
1063
1064         /* allocate a chunk list that consists of 4KB chunks */
1065         chunk_list_size = DIV_ROUND_UP(page_cnt, EFA_PTRS_PER_CHUNK);
1066
1067         chunk_list->size = chunk_list_size;
1068         chunk_list->chunks = kcalloc(chunk_list_size,
1069                                      sizeof(*chunk_list->chunks),
1070                                      GFP_KERNEL);
1071         if (!chunk_list->chunks)
1072                 return -ENOMEM;
1073
1074         ibdev_dbg(&dev->ibdev,
1075                   "chunk_list_size[%u] - pages[%u]\n", chunk_list_size,
1076                   page_cnt);
1077
1078         /* allocate chunk buffers: */
1079         for (i = 0; i < chunk_list_size; i++) {
1080                 chunk_list->chunks[i].buf = kzalloc(EFA_CHUNK_SIZE, GFP_KERNEL);
1081                 if (!chunk_list->chunks[i].buf)
1082                         goto chunk_list_dealloc;
1083
1084                 chunk_list->chunks[i].length = EFA_CHUNK_USED_SIZE;
1085         }
1086         chunk_list->chunks[chunk_list_size - 1].length =
1087                 ((page_cnt % EFA_PTRS_PER_CHUNK) * EFA_CHUNK_PAYLOAD_PTR_SIZE) +
1088                         EFA_CHUNK_PTR_SIZE;
1089
1090         /* fill the dma addresses of sg list pages to chunks: */
1091         chunk_idx = 0;
1092         payload_idx = 0;
1093         cur_chunk_buf = chunk_list->chunks[0].buf;
1094         rdma_for_each_block(pages_sgl, &biter, sg_dma_cnt,
1095                             EFA_CHUNK_PAYLOAD_SIZE) {
1096                 cur_chunk_buf[payload_idx++] =
1097                         rdma_block_iter_dma_address(&biter);
1098
1099                 if (payload_idx == EFA_PTRS_PER_CHUNK) {
1100                         chunk_idx++;
1101                         cur_chunk_buf = chunk_list->chunks[chunk_idx].buf;
1102                         payload_idx = 0;
1103                 }
1104         }
1105
1106         /* map chunks to dma and fill chunks next ptrs */
1107         for (i = chunk_list_size - 1; i >= 0; i--) {
1108                 dma_addr = dma_map_single(&dev->pdev->dev,
1109                                           chunk_list->chunks[i].buf,
1110                                           chunk_list->chunks[i].length,
1111                                           DMA_TO_DEVICE);
1112                 if (dma_mapping_error(&dev->pdev->dev, dma_addr)) {
1113                         ibdev_err(&dev->ibdev,
1114                                   "chunk[%u] dma_map_failed\n", i);
1115                         goto chunk_list_unmap;
1116                 }
1117
1118                 chunk_list->chunks[i].dma_addr = dma_addr;
1119                 ibdev_dbg(&dev->ibdev,
1120                           "chunk[%u] mapped at [%pad]\n", i, &dma_addr);
1121
1122                 if (!i)
1123                         break;
1124
1125                 prev_chunk_buf = chunk_list->chunks[i - 1].buf;
1126
1127                 ctrl_buf = (struct efa_com_ctrl_buff_info *)
1128                                 &prev_chunk_buf[EFA_PTRS_PER_CHUNK];
1129                 ctrl_buf->length = chunk_list->chunks[i].length;
1130
1131                 efa_com_set_dma_addr(dma_addr,
1132                                      &ctrl_buf->address.mem_addr_high,
1133                                      &ctrl_buf->address.mem_addr_low);
1134         }
1135
1136         return 0;
1137
1138 chunk_list_unmap:
1139         for (; i < chunk_list_size; i++) {
1140                 dma_unmap_single(&dev->pdev->dev, chunk_list->chunks[i].dma_addr,
1141                                  chunk_list->chunks[i].length, DMA_TO_DEVICE);
1142         }
1143 chunk_list_dealloc:
1144         for (i = 0; i < chunk_list_size; i++)
1145                 kfree(chunk_list->chunks[i].buf);
1146
1147         kfree(chunk_list->chunks);
1148         return -ENOMEM;
1149 }
1150
1151 static void pbl_chunk_list_destroy(struct efa_dev *dev, struct pbl_context *pbl)
1152 {
1153         struct pbl_chunk_list *chunk_list = &pbl->phys.indirect.chunk_list;
1154         int i;
1155
1156         for (i = 0; i < chunk_list->size; i++) {
1157                 dma_unmap_single(&dev->pdev->dev, chunk_list->chunks[i].dma_addr,
1158                                  chunk_list->chunks[i].length, DMA_TO_DEVICE);
1159                 kfree(chunk_list->chunks[i].buf);
1160         }
1161
1162         kfree(chunk_list->chunks);
1163 }
1164
1165 /* initialize pbl continuous mode: map pbl buffer to a dma address. */
1166 static int pbl_continuous_initialize(struct efa_dev *dev,
1167                                      struct pbl_context *pbl)
1168 {
1169         dma_addr_t dma_addr;
1170
1171         dma_addr = dma_map_single(&dev->pdev->dev, pbl->pbl_buf,
1172                                   pbl->pbl_buf_size_in_bytes, DMA_TO_DEVICE);
1173         if (dma_mapping_error(&dev->pdev->dev, dma_addr)) {
1174                 ibdev_err(&dev->ibdev, "Unable to map pbl to DMA address\n");
1175                 return -ENOMEM;
1176         }
1177
1178         pbl->phys.continuous.dma_addr = dma_addr;
1179         ibdev_dbg(&dev->ibdev,
1180                   "pbl continuous - dma_addr = %pad, size[%u]\n",
1181                   &dma_addr, pbl->pbl_buf_size_in_bytes);
1182
1183         return 0;
1184 }
1185
1186 /*
1187  * initialize pbl indirect mode:
1188  * create a chunk list out of the dma addresses of the physical pages of
1189  * pbl buffer.
1190  */
1191 static int pbl_indirect_initialize(struct efa_dev *dev, struct pbl_context *pbl)
1192 {
1193         u32 size_in_pages = DIV_ROUND_UP(pbl->pbl_buf_size_in_bytes, PAGE_SIZE);
1194         struct scatterlist *sgl;
1195         int sg_dma_cnt, err;
1196
1197         BUILD_BUG_ON(EFA_CHUNK_PAYLOAD_SIZE > PAGE_SIZE);
1198         sgl = efa_vmalloc_buf_to_sg(pbl->pbl_buf, size_in_pages);
1199         if (!sgl)
1200                 return -ENOMEM;
1201
1202         sg_dma_cnt = dma_map_sg(&dev->pdev->dev, sgl, size_in_pages, DMA_TO_DEVICE);
1203         if (!sg_dma_cnt) {
1204                 err = -EINVAL;
1205                 goto err_map;
1206         }
1207
1208         pbl->phys.indirect.pbl_buf_size_in_pages = size_in_pages;
1209         pbl->phys.indirect.sgl = sgl;
1210         pbl->phys.indirect.sg_dma_cnt = sg_dma_cnt;
1211         err = pbl_chunk_list_create(dev, pbl);
1212         if (err) {
1213                 ibdev_dbg(&dev->ibdev,
1214                           "chunk_list creation failed[%d]\n", err);
1215                 goto err_chunk;
1216         }
1217
1218         ibdev_dbg(&dev->ibdev,
1219                   "pbl indirect - size[%u], chunks[%u]\n",
1220                   pbl->pbl_buf_size_in_bytes,
1221                   pbl->phys.indirect.chunk_list.size);
1222
1223         return 0;
1224
1225 err_chunk:
1226         dma_unmap_sg(&dev->pdev->dev, sgl, size_in_pages, DMA_TO_DEVICE);
1227 err_map:
1228         kfree(sgl);
1229         return err;
1230 }
1231
1232 static void pbl_indirect_terminate(struct efa_dev *dev, struct pbl_context *pbl)
1233 {
1234         pbl_chunk_list_destroy(dev, pbl);
1235         dma_unmap_sg(&dev->pdev->dev, pbl->phys.indirect.sgl,
1236                      pbl->phys.indirect.pbl_buf_size_in_pages, DMA_TO_DEVICE);
1237         kfree(pbl->phys.indirect.sgl);
1238 }
1239
1240 /* create a page buffer list from a mapped user memory region */
1241 static int pbl_create(struct efa_dev *dev,
1242                       struct pbl_context *pbl,
1243                       struct ib_umem *umem,
1244                       int hp_cnt,
1245                       u8 hp_shift)
1246 {
1247         int err;
1248
1249         pbl->pbl_buf_size_in_bytes = hp_cnt * EFA_CHUNK_PAYLOAD_PTR_SIZE;
1250         pbl->pbl_buf = kvzalloc(pbl->pbl_buf_size_in_bytes, GFP_KERNEL);
1251         if (!pbl->pbl_buf)
1252                 return -ENOMEM;
1253
1254         if (is_vmalloc_addr(pbl->pbl_buf)) {
1255                 pbl->physically_continuous = 0;
1256                 err = umem_to_page_list(dev, umem, pbl->pbl_buf, hp_cnt,
1257                                         hp_shift);
1258                 if (err)
1259                         goto err_free;
1260
1261                 err = pbl_indirect_initialize(dev, pbl);
1262                 if (err)
1263                         goto err_free;
1264         } else {
1265                 pbl->physically_continuous = 1;
1266                 err = umem_to_page_list(dev, umem, pbl->pbl_buf, hp_cnt,
1267                                         hp_shift);
1268                 if (err)
1269                         goto err_free;
1270
1271                 err = pbl_continuous_initialize(dev, pbl);
1272                 if (err)
1273                         goto err_free;
1274         }
1275
1276         ibdev_dbg(&dev->ibdev,
1277                   "user_pbl_created: user_pages[%u], continuous[%u]\n",
1278                   hp_cnt, pbl->physically_continuous);
1279
1280         return 0;
1281
1282 err_free:
1283         kvfree(pbl->pbl_buf);
1284         return err;
1285 }
1286
1287 static void pbl_destroy(struct efa_dev *dev, struct pbl_context *pbl)
1288 {
1289         if (pbl->physically_continuous)
1290                 dma_unmap_single(&dev->pdev->dev, pbl->phys.continuous.dma_addr,
1291                                  pbl->pbl_buf_size_in_bytes, DMA_TO_DEVICE);
1292         else
1293                 pbl_indirect_terminate(dev, pbl);
1294
1295         kvfree(pbl->pbl_buf);
1296 }
1297
1298 static int efa_create_inline_pbl(struct efa_dev *dev, struct efa_mr *mr,
1299                                  struct efa_com_reg_mr_params *params)
1300 {
1301         int err;
1302
1303         params->inline_pbl = 1;
1304         err = umem_to_page_list(dev, mr->umem, params->pbl.inline_pbl_array,
1305                                 params->page_num, params->page_shift);
1306         if (err)
1307                 return err;
1308
1309         ibdev_dbg(&dev->ibdev,
1310                   "inline_pbl_array - pages[%u]\n", params->page_num);
1311
1312         return 0;
1313 }
1314
1315 static int efa_create_pbl(struct efa_dev *dev,
1316                           struct pbl_context *pbl,
1317                           struct efa_mr *mr,
1318                           struct efa_com_reg_mr_params *params)
1319 {
1320         int err;
1321
1322         err = pbl_create(dev, pbl, mr->umem, params->page_num,
1323                          params->page_shift);
1324         if (err) {
1325                 ibdev_dbg(&dev->ibdev, "Failed to create pbl[%d]\n", err);
1326                 return err;
1327         }
1328
1329         params->inline_pbl = 0;
1330         params->indirect = !pbl->physically_continuous;
1331         if (pbl->physically_continuous) {
1332                 params->pbl.pbl.length = pbl->pbl_buf_size_in_bytes;
1333
1334                 efa_com_set_dma_addr(pbl->phys.continuous.dma_addr,
1335                                      &params->pbl.pbl.address.mem_addr_high,
1336                                      &params->pbl.pbl.address.mem_addr_low);
1337         } else {
1338                 params->pbl.pbl.length =
1339                         pbl->phys.indirect.chunk_list.chunks[0].length;
1340
1341                 efa_com_set_dma_addr(pbl->phys.indirect.chunk_list.chunks[0].dma_addr,
1342                                      &params->pbl.pbl.address.mem_addr_high,
1343                                      &params->pbl.pbl.address.mem_addr_low);
1344         }
1345
1346         return 0;
1347 }
1348
1349 struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
1350                          u64 virt_addr, int access_flags,
1351                          struct ib_udata *udata)
1352 {
1353         struct efa_dev *dev = to_edev(ibpd->device);
1354         struct efa_com_reg_mr_params params = {};
1355         struct efa_com_reg_mr_result result = {};
1356         struct pbl_context pbl;
1357         int supp_access_flags;
1358         unsigned int pg_sz;
1359         struct efa_mr *mr;
1360         int inline_size;
1361         int err;
1362
1363         if (udata && udata->inlen &&
1364             !ib_is_udata_cleared(udata, 0, sizeof(udata->inlen))) {
1365                 ibdev_dbg(&dev->ibdev,
1366                           "Incompatible ABI params, udata not cleared\n");
1367                 err = -EINVAL;
1368                 goto err_out;
1369         }
1370
1371         supp_access_flags =
1372                 IB_ACCESS_LOCAL_WRITE |
1373                 (is_rdma_read_cap(dev) ? IB_ACCESS_REMOTE_READ : 0);
1374
1375         access_flags &= ~IB_ACCESS_OPTIONAL;
1376         if (access_flags & ~supp_access_flags) {
1377                 ibdev_dbg(&dev->ibdev,
1378                           "Unsupported access flags[%#x], supported[%#x]\n",
1379                           access_flags, supp_access_flags);
1380                 err = -EOPNOTSUPP;
1381                 goto err_out;
1382         }
1383
1384         mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1385         if (!mr) {
1386                 err = -ENOMEM;
1387                 goto err_out;
1388         }
1389
1390         mr->umem = ib_umem_get(ibpd->device, start, length, access_flags);
1391         if (IS_ERR(mr->umem)) {
1392                 err = PTR_ERR(mr->umem);
1393                 ibdev_dbg(&dev->ibdev,
1394                           "Failed to pin and map user space memory[%d]\n", err);
1395                 goto err_free;
1396         }
1397
1398         params.pd = to_epd(ibpd)->pdn;
1399         params.iova = virt_addr;
1400         params.mr_length_in_bytes = length;
1401         params.permissions = access_flags;
1402
1403         pg_sz = ib_umem_find_best_pgsz(mr->umem,
1404                                        dev->dev_attr.page_size_cap,
1405                                        virt_addr);
1406         if (!pg_sz) {
1407                 err = -EOPNOTSUPP;
1408                 ibdev_dbg(&dev->ibdev, "Failed to find a suitable page size in page_size_cap %#llx\n",
1409                           dev->dev_attr.page_size_cap);
1410                 goto err_unmap;
1411         }
1412
1413         params.page_shift = __ffs(pg_sz);
1414         params.page_num = DIV_ROUND_UP(length + (start & (pg_sz - 1)),
1415                                        pg_sz);
1416
1417         ibdev_dbg(&dev->ibdev,
1418                   "start %#llx length %#llx params.page_shift %u params.page_num %u\n",
1419                   start, length, params.page_shift, params.page_num);
1420
1421         inline_size = ARRAY_SIZE(params.pbl.inline_pbl_array);
1422         if (params.page_num <= inline_size) {
1423                 err = efa_create_inline_pbl(dev, mr, &params);
1424                 if (err)
1425                         goto err_unmap;
1426
1427                 err = efa_com_register_mr(&dev->edev, &params, &result);
1428                 if (err)
1429                         goto err_unmap;
1430         } else {
1431                 err = efa_create_pbl(dev, &pbl, mr, &params);
1432                 if (err)
1433                         goto err_unmap;
1434
1435                 err = efa_com_register_mr(&dev->edev, &params, &result);
1436                 pbl_destroy(dev, &pbl);
1437
1438                 if (err)
1439                         goto err_unmap;
1440         }
1441
1442         mr->ibmr.lkey = result.l_key;
1443         mr->ibmr.rkey = result.r_key;
1444         mr->ibmr.length = length;
1445         ibdev_dbg(&dev->ibdev, "Registered mr[%d]\n", mr->ibmr.lkey);
1446
1447         return &mr->ibmr;
1448
1449 err_unmap:
1450         ib_umem_release(mr->umem);
1451 err_free:
1452         kfree(mr);
1453 err_out:
1454         atomic64_inc(&dev->stats.sw_stats.reg_mr_err);
1455         return ERR_PTR(err);
1456 }
1457
1458 int efa_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
1459 {
1460         struct efa_dev *dev = to_edev(ibmr->device);
1461         struct efa_com_dereg_mr_params params;
1462         struct efa_mr *mr = to_emr(ibmr);
1463         int err;
1464
1465         ibdev_dbg(&dev->ibdev, "Deregister mr[%d]\n", ibmr->lkey);
1466
1467         params.l_key = mr->ibmr.lkey;
1468         err = efa_com_dereg_mr(&dev->edev, &params);
1469         if (err)
1470                 return err;
1471
1472         ib_umem_release(mr->umem);
1473         kfree(mr);
1474
1475         return 0;
1476 }
1477
1478 int efa_get_port_immutable(struct ib_device *ibdev, u8 port_num,
1479                            struct ib_port_immutable *immutable)
1480 {
1481         struct ib_port_attr attr;
1482         int err;
1483
1484         err = ib_query_port(ibdev, port_num, &attr);
1485         if (err) {
1486                 ibdev_dbg(ibdev, "Couldn't query port err[%d]\n", err);
1487                 return err;
1488         }
1489
1490         immutable->pkey_tbl_len = attr.pkey_tbl_len;
1491         immutable->gid_tbl_len = attr.gid_tbl_len;
1492
1493         return 0;
1494 }
1495
1496 static int efa_dealloc_uar(struct efa_dev *dev, u16 uarn)
1497 {
1498         struct efa_com_dealloc_uar_params params = {
1499                 .uarn = uarn,
1500         };
1501
1502         return efa_com_dealloc_uar(&dev->edev, &params);
1503 }
1504
1505 #define EFA_CHECK_USER_COMP(_dev, _comp_mask, _attr, _mask, _attr_str) \
1506         (_attr_str = (!(_dev)->dev_attr._attr || ((_comp_mask) & (_mask))) ? \
1507                      NULL : #_attr)
1508
1509 static int efa_user_comp_handshake(const struct ib_ucontext *ibucontext,
1510                                    const struct efa_ibv_alloc_ucontext_cmd *cmd)
1511 {
1512         struct efa_dev *dev = to_edev(ibucontext->device);
1513         char *attr_str;
1514
1515         if (EFA_CHECK_USER_COMP(dev, cmd->comp_mask, max_tx_batch,
1516                                 EFA_ALLOC_UCONTEXT_CMD_COMP_TX_BATCH, attr_str))
1517                 goto err;
1518
1519         if (EFA_CHECK_USER_COMP(dev, cmd->comp_mask, min_sq_depth,
1520                                 EFA_ALLOC_UCONTEXT_CMD_COMP_MIN_SQ_WR,
1521                                 attr_str))
1522                 goto err;
1523
1524         return 0;
1525
1526 err:
1527         ibdev_dbg(&dev->ibdev, "Userspace handshake failed for %s attribute\n",
1528                   attr_str);
1529         return -EOPNOTSUPP;
1530 }
1531
1532 int efa_alloc_ucontext(struct ib_ucontext *ibucontext, struct ib_udata *udata)
1533 {
1534         struct efa_ucontext *ucontext = to_eucontext(ibucontext);
1535         struct efa_dev *dev = to_edev(ibucontext->device);
1536         struct efa_ibv_alloc_ucontext_resp resp = {};
1537         struct efa_ibv_alloc_ucontext_cmd cmd = {};
1538         struct efa_com_alloc_uar_result result;
1539         int err;
1540
1541         /*
1542          * it's fine if the driver does not know all request fields,
1543          * we will ack input fields in our response.
1544          */
1545
1546         err = ib_copy_from_udata(&cmd, udata,
1547                                  min(sizeof(cmd), udata->inlen));
1548         if (err) {
1549                 ibdev_dbg(&dev->ibdev,
1550                           "Cannot copy udata for alloc_ucontext\n");
1551                 goto err_out;
1552         }
1553
1554         err = efa_user_comp_handshake(ibucontext, &cmd);
1555         if (err)
1556                 goto err_out;
1557
1558         err = efa_com_alloc_uar(&dev->edev, &result);
1559         if (err)
1560                 goto err_out;
1561
1562         ucontext->uarn = result.uarn;
1563
1564         resp.cmds_supp_udata_mask |= EFA_USER_CMDS_SUPP_UDATA_QUERY_DEVICE;
1565         resp.cmds_supp_udata_mask |= EFA_USER_CMDS_SUPP_UDATA_CREATE_AH;
1566         resp.sub_cqs_per_cq = dev->dev_attr.sub_cqs_per_cq;
1567         resp.inline_buf_size = dev->dev_attr.inline_buf_size;
1568         resp.max_llq_size = dev->dev_attr.max_llq_size;
1569         resp.max_tx_batch = dev->dev_attr.max_tx_batch;
1570         resp.min_sq_wr = dev->dev_attr.min_sq_depth;
1571
1572         if (udata && udata->outlen) {
1573                 err = ib_copy_to_udata(udata, &resp,
1574                                        min(sizeof(resp), udata->outlen));
1575                 if (err)
1576                         goto err_dealloc_uar;
1577         }
1578
1579         return 0;
1580
1581 err_dealloc_uar:
1582         efa_dealloc_uar(dev, result.uarn);
1583 err_out:
1584         atomic64_inc(&dev->stats.sw_stats.alloc_ucontext_err);
1585         return err;
1586 }
1587
1588 void efa_dealloc_ucontext(struct ib_ucontext *ibucontext)
1589 {
1590         struct efa_ucontext *ucontext = to_eucontext(ibucontext);
1591         struct efa_dev *dev = to_edev(ibucontext->device);
1592
1593         efa_dealloc_uar(dev, ucontext->uarn);
1594 }
1595
1596 void efa_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
1597 {
1598         struct efa_user_mmap_entry *entry = to_emmap(rdma_entry);
1599
1600         kfree(entry);
1601 }
1602
1603 static int __efa_mmap(struct efa_dev *dev, struct efa_ucontext *ucontext,
1604                       struct vm_area_struct *vma)
1605 {
1606         struct rdma_user_mmap_entry *rdma_entry;
1607         struct efa_user_mmap_entry *entry;
1608         unsigned long va;
1609         int err = 0;
1610         u64 pfn;
1611
1612         rdma_entry = rdma_user_mmap_entry_get(&ucontext->ibucontext, vma);
1613         if (!rdma_entry) {
1614                 ibdev_dbg(&dev->ibdev,
1615                           "pgoff[%#lx] does not have valid entry\n",
1616                           vma->vm_pgoff);
1617                 atomic64_inc(&dev->stats.sw_stats.mmap_err);
1618                 return -EINVAL;
1619         }
1620         entry = to_emmap(rdma_entry);
1621
1622         ibdev_dbg(&dev->ibdev,
1623                   "Mapping address[%#llx], length[%#zx], mmap_flag[%d]\n",
1624                   entry->address, rdma_entry->npages * PAGE_SIZE,
1625                   entry->mmap_flag);
1626
1627         pfn = entry->address >> PAGE_SHIFT;
1628         switch (entry->mmap_flag) {
1629         case EFA_MMAP_IO_NC:
1630                 err = rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn,
1631                                         entry->rdma_entry.npages * PAGE_SIZE,
1632                                         pgprot_noncached(vma->vm_page_prot),
1633                                         rdma_entry);
1634                 break;
1635         case EFA_MMAP_IO_WC:
1636                 err = rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn,
1637                                         entry->rdma_entry.npages * PAGE_SIZE,
1638                                         pgprot_writecombine(vma->vm_page_prot),
1639                                         rdma_entry);
1640                 break;
1641         case EFA_MMAP_DMA_PAGE:
1642                 for (va = vma->vm_start; va < vma->vm_end;
1643                      va += PAGE_SIZE, pfn++) {
1644                         err = vm_insert_page(vma, va, pfn_to_page(pfn));
1645                         if (err)
1646                                 break;
1647                 }
1648                 break;
1649         default:
1650                 err = -EINVAL;
1651         }
1652
1653         if (err) {
1654                 ibdev_dbg(
1655                         &dev->ibdev,
1656                         "Couldn't mmap address[%#llx] length[%#zx] mmap_flag[%d] err[%d]\n",
1657                         entry->address, rdma_entry->npages * PAGE_SIZE,
1658                         entry->mmap_flag, err);
1659                 atomic64_inc(&dev->stats.sw_stats.mmap_err);
1660         }
1661
1662         rdma_user_mmap_entry_put(rdma_entry);
1663         return err;
1664 }
1665
1666 int efa_mmap(struct ib_ucontext *ibucontext,
1667              struct vm_area_struct *vma)
1668 {
1669         struct efa_ucontext *ucontext = to_eucontext(ibucontext);
1670         struct efa_dev *dev = to_edev(ibucontext->device);
1671         size_t length = vma->vm_end - vma->vm_start;
1672
1673         ibdev_dbg(&dev->ibdev,
1674                   "start %#lx, end %#lx, length = %#zx, pgoff = %#lx\n",
1675                   vma->vm_start, vma->vm_end, length, vma->vm_pgoff);
1676
1677         return __efa_mmap(dev, ucontext, vma);
1678 }
1679
1680 static int efa_ah_destroy(struct efa_dev *dev, struct efa_ah *ah)
1681 {
1682         struct efa_com_destroy_ah_params params = {
1683                 .ah = ah->ah,
1684                 .pdn = to_epd(ah->ibah.pd)->pdn,
1685         };
1686
1687         return efa_com_destroy_ah(&dev->edev, &params);
1688 }
1689
1690 int efa_create_ah(struct ib_ah *ibah,
1691                   struct rdma_ah_init_attr *init_attr,
1692                   struct ib_udata *udata)
1693 {
1694         struct rdma_ah_attr *ah_attr = init_attr->ah_attr;
1695         struct efa_dev *dev = to_edev(ibah->device);
1696         struct efa_com_create_ah_params params = {};
1697         struct efa_ibv_create_ah_resp resp = {};
1698         struct efa_com_create_ah_result result;
1699         struct efa_ah *ah = to_eah(ibah);
1700         int err;
1701
1702         if (!(init_attr->flags & RDMA_CREATE_AH_SLEEPABLE)) {
1703                 ibdev_dbg(&dev->ibdev,
1704                           "Create address handle is not supported in atomic context\n");
1705                 err = -EOPNOTSUPP;
1706                 goto err_out;
1707         }
1708
1709         if (udata->inlen &&
1710             !ib_is_udata_cleared(udata, 0, udata->inlen)) {
1711                 ibdev_dbg(&dev->ibdev, "Incompatible ABI params\n");
1712                 err = -EINVAL;
1713                 goto err_out;
1714         }
1715
1716         memcpy(params.dest_addr, ah_attr->grh.dgid.raw,
1717                sizeof(params.dest_addr));
1718         params.pdn = to_epd(ibah->pd)->pdn;
1719         err = efa_com_create_ah(&dev->edev, &params, &result);
1720         if (err)
1721                 goto err_out;
1722
1723         memcpy(ah->id, ah_attr->grh.dgid.raw, sizeof(ah->id));
1724         ah->ah = result.ah;
1725
1726         resp.efa_address_handle = result.ah;
1727
1728         if (udata->outlen) {
1729                 err = ib_copy_to_udata(udata, &resp,
1730                                        min(sizeof(resp), udata->outlen));
1731                 if (err) {
1732                         ibdev_dbg(&dev->ibdev,
1733                                   "Failed to copy udata for create_ah response\n");
1734                         goto err_destroy_ah;
1735                 }
1736         }
1737         ibdev_dbg(&dev->ibdev, "Created ah[%d]\n", ah->ah);
1738
1739         return 0;
1740
1741 err_destroy_ah:
1742         efa_ah_destroy(dev, ah);
1743 err_out:
1744         atomic64_inc(&dev->stats.sw_stats.create_ah_err);
1745         return err;
1746 }
1747
1748 void efa_destroy_ah(struct ib_ah *ibah, u32 flags)
1749 {
1750         struct efa_dev *dev = to_edev(ibah->pd->device);
1751         struct efa_ah *ah = to_eah(ibah);
1752
1753         ibdev_dbg(&dev->ibdev, "Destroy ah[%d]\n", ah->ah);
1754
1755         if (!(flags & RDMA_DESTROY_AH_SLEEPABLE)) {
1756                 ibdev_dbg(&dev->ibdev,
1757                           "Destroy address handle is not supported in atomic context\n");
1758                 return;
1759         }
1760
1761         efa_ah_destroy(dev, ah);
1762 }
1763
1764 struct rdma_hw_stats *efa_alloc_hw_stats(struct ib_device *ibdev, u8 port_num)
1765 {
1766         return rdma_alloc_hw_stats_struct(efa_stats_names,
1767                                           ARRAY_SIZE(efa_stats_names),
1768                                           RDMA_HW_STATS_DEFAULT_LIFESPAN);
1769 }
1770
1771 int efa_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
1772                      u8 port_num, int index)
1773 {
1774         struct efa_com_get_stats_params params = {};
1775         union efa_com_get_stats_result result;
1776         struct efa_dev *dev = to_edev(ibdev);
1777         struct efa_com_basic_stats *bs;
1778         struct efa_com_stats_admin *as;
1779         struct efa_stats *s;
1780         int err;
1781
1782         params.type = EFA_ADMIN_GET_STATS_TYPE_BASIC;
1783         params.scope = EFA_ADMIN_GET_STATS_SCOPE_ALL;
1784
1785         err = efa_com_get_stats(&dev->edev, &params, &result);
1786         if (err)
1787                 return err;
1788
1789         bs = &result.basic_stats;
1790         stats->value[EFA_TX_BYTES] = bs->tx_bytes;
1791         stats->value[EFA_TX_PKTS] = bs->tx_pkts;
1792         stats->value[EFA_RX_BYTES] = bs->rx_bytes;
1793         stats->value[EFA_RX_PKTS] = bs->rx_pkts;
1794         stats->value[EFA_RX_DROPS] = bs->rx_drops;
1795
1796         as = &dev->edev.aq.stats;
1797         stats->value[EFA_SUBMITTED_CMDS] = atomic64_read(&as->submitted_cmd);
1798         stats->value[EFA_COMPLETED_CMDS] = atomic64_read(&as->completed_cmd);
1799         stats->value[EFA_CMDS_ERR] = atomic64_read(&as->cmd_err);
1800         stats->value[EFA_NO_COMPLETION_CMDS] = atomic64_read(&as->no_completion);
1801
1802         s = &dev->stats;
1803         stats->value[EFA_KEEP_ALIVE_RCVD] = atomic64_read(&s->keep_alive_rcvd);
1804         stats->value[EFA_ALLOC_PD_ERR] = atomic64_read(&s->sw_stats.alloc_pd_err);
1805         stats->value[EFA_CREATE_QP_ERR] = atomic64_read(&s->sw_stats.create_qp_err);
1806         stats->value[EFA_CREATE_CQ_ERR] = atomic64_read(&s->sw_stats.create_cq_err);
1807         stats->value[EFA_REG_MR_ERR] = atomic64_read(&s->sw_stats.reg_mr_err);
1808         stats->value[EFA_ALLOC_UCONTEXT_ERR] = atomic64_read(&s->sw_stats.alloc_ucontext_err);
1809         stats->value[EFA_CREATE_AH_ERR] = atomic64_read(&s->sw_stats.create_ah_err);
1810         stats->value[EFA_MMAP_ERR] = atomic64_read(&s->sw_stats.mmap_err);
1811
1812         return ARRAY_SIZE(efa_stats_names);
1813 }
1814
1815 enum rdma_link_layer efa_port_link_layer(struct ib_device *ibdev,
1816                                          u8 port_num)
1817 {
1818         return IB_LINK_LAYER_UNSPECIFIED;
1819 }
1820