RDMA/efa: Do not delay freeing of DMA pages
[linux-2.6-microblaze.git] / drivers / infiniband / hw / efa / efa_verbs.c
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3  * Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
4  */
5
6 #include <linux/vmalloc.h>
7
8 #include <rdma/ib_addr.h>
9 #include <rdma/ib_umem.h>
10 #include <rdma/ib_user_verbs.h>
11 #include <rdma/ib_verbs.h>
12 #include <rdma/uverbs_ioctl.h>
13
14 #include "efa.h"
15
16 enum {
17         EFA_MMAP_DMA_PAGE = 0,
18         EFA_MMAP_IO_WC,
19         EFA_MMAP_IO_NC,
20 };
21
22 #define EFA_AENQ_ENABLED_GROUPS \
23         (BIT(EFA_ADMIN_FATAL_ERROR) | BIT(EFA_ADMIN_WARNING) | \
24          BIT(EFA_ADMIN_NOTIFICATION) | BIT(EFA_ADMIN_KEEP_ALIVE))
25
26 struct efa_user_mmap_entry {
27         struct rdma_user_mmap_entry rdma_entry;
28         u64 address;
29         u8 mmap_flag;
30 };
31
32 #define EFA_DEFINE_STATS(op) \
33         op(EFA_TX_BYTES, "tx_bytes") \
34         op(EFA_TX_PKTS, "tx_pkts") \
35         op(EFA_RX_BYTES, "rx_bytes") \
36         op(EFA_RX_PKTS, "rx_pkts") \
37         op(EFA_RX_DROPS, "rx_drops") \
38         op(EFA_SUBMITTED_CMDS, "submitted_cmds") \
39         op(EFA_COMPLETED_CMDS, "completed_cmds") \
40         op(EFA_NO_COMPLETION_CMDS, "no_completion_cmds") \
41         op(EFA_KEEP_ALIVE_RCVD, "keep_alive_rcvd") \
42         op(EFA_ALLOC_PD_ERR, "alloc_pd_err") \
43         op(EFA_CREATE_QP_ERR, "create_qp_err") \
44         op(EFA_REG_MR_ERR, "reg_mr_err") \
45         op(EFA_ALLOC_UCONTEXT_ERR, "alloc_ucontext_err") \
46         op(EFA_CREATE_AH_ERR, "create_ah_err")
47
48 #define EFA_STATS_ENUM(ename, name) ename,
49 #define EFA_STATS_STR(ename, name) [ename] = name,
50
51 enum efa_hw_stats {
52         EFA_DEFINE_STATS(EFA_STATS_ENUM)
53 };
54
55 static const char *const efa_stats_names[] = {
56         EFA_DEFINE_STATS(EFA_STATS_STR)
57 };
58
59 #define EFA_CHUNK_PAYLOAD_SHIFT       12
60 #define EFA_CHUNK_PAYLOAD_SIZE        BIT(EFA_CHUNK_PAYLOAD_SHIFT)
61 #define EFA_CHUNK_PAYLOAD_PTR_SIZE    8
62
63 #define EFA_CHUNK_SHIFT               12
64 #define EFA_CHUNK_SIZE                BIT(EFA_CHUNK_SHIFT)
65 #define EFA_CHUNK_PTR_SIZE            sizeof(struct efa_com_ctrl_buff_info)
66
67 #define EFA_PTRS_PER_CHUNK \
68         ((EFA_CHUNK_SIZE - EFA_CHUNK_PTR_SIZE) / EFA_CHUNK_PAYLOAD_PTR_SIZE)
69
70 #define EFA_CHUNK_USED_SIZE \
71         ((EFA_PTRS_PER_CHUNK * EFA_CHUNK_PAYLOAD_PTR_SIZE) + EFA_CHUNK_PTR_SIZE)
72
73 struct pbl_chunk {
74         dma_addr_t dma_addr;
75         u64 *buf;
76         u32 length;
77 };
78
79 struct pbl_chunk_list {
80         struct pbl_chunk *chunks;
81         unsigned int size;
82 };
83
84 struct pbl_context {
85         union {
86                 struct {
87                         dma_addr_t dma_addr;
88                 } continuous;
89                 struct {
90                         u32 pbl_buf_size_in_pages;
91                         struct scatterlist *sgl;
92                         int sg_dma_cnt;
93                         struct pbl_chunk_list chunk_list;
94                 } indirect;
95         } phys;
96         u64 *pbl_buf;
97         u32 pbl_buf_size_in_bytes;
98         u8 physically_continuous;
99 };
100
101 static inline struct efa_dev *to_edev(struct ib_device *ibdev)
102 {
103         return container_of(ibdev, struct efa_dev, ibdev);
104 }
105
106 static inline struct efa_ucontext *to_eucontext(struct ib_ucontext *ibucontext)
107 {
108         return container_of(ibucontext, struct efa_ucontext, ibucontext);
109 }
110
111 static inline struct efa_pd *to_epd(struct ib_pd *ibpd)
112 {
113         return container_of(ibpd, struct efa_pd, ibpd);
114 }
115
116 static inline struct efa_mr *to_emr(struct ib_mr *ibmr)
117 {
118         return container_of(ibmr, struct efa_mr, ibmr);
119 }
120
121 static inline struct efa_qp *to_eqp(struct ib_qp *ibqp)
122 {
123         return container_of(ibqp, struct efa_qp, ibqp);
124 }
125
126 static inline struct efa_cq *to_ecq(struct ib_cq *ibcq)
127 {
128         return container_of(ibcq, struct efa_cq, ibcq);
129 }
130
131 static inline struct efa_ah *to_eah(struct ib_ah *ibah)
132 {
133         return container_of(ibah, struct efa_ah, ibah);
134 }
135
136 static inline struct efa_user_mmap_entry *
137 to_emmap(struct rdma_user_mmap_entry *rdma_entry)
138 {
139         return container_of(rdma_entry, struct efa_user_mmap_entry, rdma_entry);
140 }
141
142 static inline bool is_rdma_read_cap(struct efa_dev *dev)
143 {
144         return dev->dev_attr.device_caps & EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_RDMA_READ_MASK;
145 }
146
147 #define field_avail(x, fld, sz) (offsetof(typeof(x), fld) + \
148                                  sizeof_field(typeof(x), fld) <= (sz))
149
150 #define is_reserved_cleared(reserved) \
151         !memchr_inv(reserved, 0, sizeof(reserved))
152
153 static void *efa_zalloc_mapped(struct efa_dev *dev, dma_addr_t *dma_addr,
154                                size_t size, enum dma_data_direction dir)
155 {
156         void *addr;
157
158         addr = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO);
159         if (!addr)
160                 return NULL;
161
162         *dma_addr = dma_map_single(&dev->pdev->dev, addr, size, dir);
163         if (dma_mapping_error(&dev->pdev->dev, *dma_addr)) {
164                 ibdev_err(&dev->ibdev, "Failed to map DMA address\n");
165                 free_pages_exact(addr, size);
166                 return NULL;
167         }
168
169         return addr;
170 }
171
172 static void efa_free_mapped(struct efa_dev *dev, void *cpu_addr,
173                             dma_addr_t dma_addr,
174                             size_t size, enum dma_data_direction dir)
175 {
176         dma_unmap_single(&dev->pdev->dev, dma_addr, size, dir);
177         free_pages_exact(cpu_addr, size);
178 }
179
180 int efa_query_device(struct ib_device *ibdev,
181                      struct ib_device_attr *props,
182                      struct ib_udata *udata)
183 {
184         struct efa_com_get_device_attr_result *dev_attr;
185         struct efa_ibv_ex_query_device_resp resp = {};
186         struct efa_dev *dev = to_edev(ibdev);
187         int err;
188
189         if (udata && udata->inlen &&
190             !ib_is_udata_cleared(udata, 0, udata->inlen)) {
191                 ibdev_dbg(ibdev,
192                           "Incompatible ABI params, udata not cleared\n");
193                 return -EINVAL;
194         }
195
196         dev_attr = &dev->dev_attr;
197
198         memset(props, 0, sizeof(*props));
199         props->max_mr_size = dev_attr->max_mr_pages * PAGE_SIZE;
200         props->page_size_cap = dev_attr->page_size_cap;
201         props->vendor_id = dev->pdev->vendor;
202         props->vendor_part_id = dev->pdev->device;
203         props->hw_ver = dev->pdev->subsystem_device;
204         props->max_qp = dev_attr->max_qp;
205         props->max_cq = dev_attr->max_cq;
206         props->max_pd = dev_attr->max_pd;
207         props->max_mr = dev_attr->max_mr;
208         props->max_ah = dev_attr->max_ah;
209         props->max_cqe = dev_attr->max_cq_depth;
210         props->max_qp_wr = min_t(u32, dev_attr->max_sq_depth,
211                                  dev_attr->max_rq_depth);
212         props->max_send_sge = dev_attr->max_sq_sge;
213         props->max_recv_sge = dev_attr->max_rq_sge;
214         props->max_sge_rd = dev_attr->max_wr_rdma_sge;
215
216         if (udata && udata->outlen) {
217                 resp.max_sq_sge = dev_attr->max_sq_sge;
218                 resp.max_rq_sge = dev_attr->max_rq_sge;
219                 resp.max_sq_wr = dev_attr->max_sq_depth;
220                 resp.max_rq_wr = dev_attr->max_rq_depth;
221                 resp.max_rdma_size = dev_attr->max_rdma_size;
222
223                 if (is_rdma_read_cap(dev))
224                         resp.device_caps |= EFA_QUERY_DEVICE_CAPS_RDMA_READ;
225
226                 err = ib_copy_to_udata(udata, &resp,
227                                        min(sizeof(resp), udata->outlen));
228                 if (err) {
229                         ibdev_dbg(ibdev,
230                                   "Failed to copy udata for query_device\n");
231                         return err;
232                 }
233         }
234
235         return 0;
236 }
237
238 int efa_query_port(struct ib_device *ibdev, u8 port,
239                    struct ib_port_attr *props)
240 {
241         struct efa_dev *dev = to_edev(ibdev);
242
243         props->lmc = 1;
244
245         props->state = IB_PORT_ACTIVE;
246         props->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
247         props->gid_tbl_len = 1;
248         props->pkey_tbl_len = 1;
249         props->active_speed = IB_SPEED_EDR;
250         props->active_width = IB_WIDTH_4X;
251         props->max_mtu = ib_mtu_int_to_enum(dev->dev_attr.mtu);
252         props->active_mtu = ib_mtu_int_to_enum(dev->dev_attr.mtu);
253         props->max_msg_sz = dev->dev_attr.mtu;
254         props->max_vl_num = 1;
255
256         return 0;
257 }
258
259 int efa_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
260                  int qp_attr_mask,
261                  struct ib_qp_init_attr *qp_init_attr)
262 {
263         struct efa_dev *dev = to_edev(ibqp->device);
264         struct efa_com_query_qp_params params = {};
265         struct efa_com_query_qp_result result;
266         struct efa_qp *qp = to_eqp(ibqp);
267         int err;
268
269 #define EFA_QUERY_QP_SUPP_MASK \
270         (IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT | \
271          IB_QP_QKEY | IB_QP_SQ_PSN | IB_QP_CAP)
272
273         if (qp_attr_mask & ~EFA_QUERY_QP_SUPP_MASK) {
274                 ibdev_dbg(&dev->ibdev,
275                           "Unsupported qp_attr_mask[%#x] supported[%#x]\n",
276                           qp_attr_mask, EFA_QUERY_QP_SUPP_MASK);
277                 return -EOPNOTSUPP;
278         }
279
280         memset(qp_attr, 0, sizeof(*qp_attr));
281         memset(qp_init_attr, 0, sizeof(*qp_init_attr));
282
283         params.qp_handle = qp->qp_handle;
284         err = efa_com_query_qp(&dev->edev, &params, &result);
285         if (err)
286                 return err;
287
288         qp_attr->qp_state = result.qp_state;
289         qp_attr->qkey = result.qkey;
290         qp_attr->sq_psn = result.sq_psn;
291         qp_attr->sq_draining = result.sq_draining;
292         qp_attr->port_num = 1;
293
294         qp_attr->cap.max_send_wr = qp->max_send_wr;
295         qp_attr->cap.max_recv_wr = qp->max_recv_wr;
296         qp_attr->cap.max_send_sge = qp->max_send_sge;
297         qp_attr->cap.max_recv_sge = qp->max_recv_sge;
298         qp_attr->cap.max_inline_data = qp->max_inline_data;
299
300         qp_init_attr->qp_type = ibqp->qp_type;
301         qp_init_attr->recv_cq = ibqp->recv_cq;
302         qp_init_attr->send_cq = ibqp->send_cq;
303         qp_init_attr->qp_context = ibqp->qp_context;
304         qp_init_attr->cap = qp_attr->cap;
305
306         return 0;
307 }
308
309 int efa_query_gid(struct ib_device *ibdev, u8 port, int index,
310                   union ib_gid *gid)
311 {
312         struct efa_dev *dev = to_edev(ibdev);
313
314         memcpy(gid->raw, dev->dev_attr.addr, sizeof(dev->dev_attr.addr));
315
316         return 0;
317 }
318
319 int efa_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
320                    u16 *pkey)
321 {
322         if (index > 0)
323                 return -EINVAL;
324
325         *pkey = 0xffff;
326         return 0;
327 }
328
329 static int efa_pd_dealloc(struct efa_dev *dev, u16 pdn)
330 {
331         struct efa_com_dealloc_pd_params params = {
332                 .pdn = pdn,
333         };
334
335         return efa_com_dealloc_pd(&dev->edev, &params);
336 }
337
338 int efa_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
339 {
340         struct efa_dev *dev = to_edev(ibpd->device);
341         struct efa_ibv_alloc_pd_resp resp = {};
342         struct efa_com_alloc_pd_result result;
343         struct efa_pd *pd = to_epd(ibpd);
344         int err;
345
346         if (udata->inlen &&
347             !ib_is_udata_cleared(udata, 0, udata->inlen)) {
348                 ibdev_dbg(&dev->ibdev,
349                           "Incompatible ABI params, udata not cleared\n");
350                 err = -EINVAL;
351                 goto err_out;
352         }
353
354         err = efa_com_alloc_pd(&dev->edev, &result);
355         if (err)
356                 goto err_out;
357
358         pd->pdn = result.pdn;
359         resp.pdn = result.pdn;
360
361         if (udata->outlen) {
362                 err = ib_copy_to_udata(udata, &resp,
363                                        min(sizeof(resp), udata->outlen));
364                 if (err) {
365                         ibdev_dbg(&dev->ibdev,
366                                   "Failed to copy udata for alloc_pd\n");
367                         goto err_dealloc_pd;
368                 }
369         }
370
371         ibdev_dbg(&dev->ibdev, "Allocated pd[%d]\n", pd->pdn);
372
373         return 0;
374
375 err_dealloc_pd:
376         efa_pd_dealloc(dev, result.pdn);
377 err_out:
378         atomic64_inc(&dev->stats.sw_stats.alloc_pd_err);
379         return err;
380 }
381
382 void efa_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
383 {
384         struct efa_dev *dev = to_edev(ibpd->device);
385         struct efa_pd *pd = to_epd(ibpd);
386
387         ibdev_dbg(&dev->ibdev, "Dealloc pd[%d]\n", pd->pdn);
388         efa_pd_dealloc(dev, pd->pdn);
389 }
390
391 static int efa_destroy_qp_handle(struct efa_dev *dev, u32 qp_handle)
392 {
393         struct efa_com_destroy_qp_params params = { .qp_handle = qp_handle };
394
395         return efa_com_destroy_qp(&dev->edev, &params);
396 }
397
398 static void efa_qp_user_mmap_entries_remove(struct efa_qp *qp)
399 {
400         rdma_user_mmap_entry_remove(qp->rq_mmap_entry);
401         rdma_user_mmap_entry_remove(qp->rq_db_mmap_entry);
402         rdma_user_mmap_entry_remove(qp->llq_desc_mmap_entry);
403         rdma_user_mmap_entry_remove(qp->sq_db_mmap_entry);
404 }
405
406 int efa_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
407 {
408         struct efa_dev *dev = to_edev(ibqp->pd->device);
409         struct efa_qp *qp = to_eqp(ibqp);
410         int err;
411
412         ibdev_dbg(&dev->ibdev, "Destroy qp[%u]\n", ibqp->qp_num);
413
414         efa_qp_user_mmap_entries_remove(qp);
415
416         err = efa_destroy_qp_handle(dev, qp->qp_handle);
417         if (err)
418                 return err;
419
420         if (qp->rq_cpu_addr) {
421                 ibdev_dbg(&dev->ibdev,
422                           "qp->cpu_addr[0x%p] freed: size[%lu], dma[%pad]\n",
423                           qp->rq_cpu_addr, qp->rq_size,
424                           &qp->rq_dma_addr);
425                 efa_free_mapped(dev, qp->rq_cpu_addr, qp->rq_dma_addr,
426                                 qp->rq_size, DMA_TO_DEVICE);
427         }
428
429         kfree(qp);
430         return 0;
431 }
432
433 static struct rdma_user_mmap_entry*
434 efa_user_mmap_entry_insert(struct ib_ucontext *ucontext,
435                            u64 address, size_t length,
436                            u8 mmap_flag, u64 *offset)
437 {
438         struct efa_user_mmap_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
439         int err;
440
441         if (!entry)
442                 return NULL;
443
444         entry->address = address;
445         entry->mmap_flag = mmap_flag;
446
447         err = rdma_user_mmap_entry_insert(ucontext, &entry->rdma_entry,
448                                           length);
449         if (err) {
450                 kfree(entry);
451                 return NULL;
452         }
453         *offset = rdma_user_mmap_get_offset(&entry->rdma_entry);
454
455         return &entry->rdma_entry;
456 }
457
458 static int qp_mmap_entries_setup(struct efa_qp *qp,
459                                  struct efa_dev *dev,
460                                  struct efa_ucontext *ucontext,
461                                  struct efa_com_create_qp_params *params,
462                                  struct efa_ibv_create_qp_resp *resp)
463 {
464         size_t length;
465         u64 address;
466
467         address = dev->db_bar_addr + resp->sq_db_offset;
468         qp->sq_db_mmap_entry =
469                 efa_user_mmap_entry_insert(&ucontext->ibucontext,
470                                            address,
471                                            PAGE_SIZE, EFA_MMAP_IO_NC,
472                                            &resp->sq_db_mmap_key);
473         if (!qp->sq_db_mmap_entry)
474                 return -ENOMEM;
475
476         resp->sq_db_offset &= ~PAGE_MASK;
477
478         address = dev->mem_bar_addr + resp->llq_desc_offset;
479         length = PAGE_ALIGN(params->sq_ring_size_in_bytes +
480                             (resp->llq_desc_offset & ~PAGE_MASK));
481
482         qp->llq_desc_mmap_entry =
483                 efa_user_mmap_entry_insert(&ucontext->ibucontext,
484                                            address, length,
485                                            EFA_MMAP_IO_WC,
486                                            &resp->llq_desc_mmap_key);
487         if (!qp->llq_desc_mmap_entry)
488                 goto err_remove_mmap;
489
490         resp->llq_desc_offset &= ~PAGE_MASK;
491
492         if (qp->rq_size) {
493                 address = dev->db_bar_addr + resp->rq_db_offset;
494
495                 qp->rq_db_mmap_entry =
496                         efa_user_mmap_entry_insert(&ucontext->ibucontext,
497                                                    address, PAGE_SIZE,
498                                                    EFA_MMAP_IO_NC,
499                                                    &resp->rq_db_mmap_key);
500                 if (!qp->rq_db_mmap_entry)
501                         goto err_remove_mmap;
502
503                 resp->rq_db_offset &= ~PAGE_MASK;
504
505                 address = virt_to_phys(qp->rq_cpu_addr);
506                 qp->rq_mmap_entry =
507                         efa_user_mmap_entry_insert(&ucontext->ibucontext,
508                                                    address, qp->rq_size,
509                                                    EFA_MMAP_DMA_PAGE,
510                                                    &resp->rq_mmap_key);
511                 if (!qp->rq_mmap_entry)
512                         goto err_remove_mmap;
513
514                 resp->rq_mmap_size = qp->rq_size;
515         }
516
517         return 0;
518
519 err_remove_mmap:
520         efa_qp_user_mmap_entries_remove(qp);
521
522         return -ENOMEM;
523 }
524
525 static int efa_qp_validate_cap(struct efa_dev *dev,
526                                struct ib_qp_init_attr *init_attr)
527 {
528         if (init_attr->cap.max_send_wr > dev->dev_attr.max_sq_depth) {
529                 ibdev_dbg(&dev->ibdev,
530                           "qp: requested send wr[%u] exceeds the max[%u]\n",
531                           init_attr->cap.max_send_wr,
532                           dev->dev_attr.max_sq_depth);
533                 return -EINVAL;
534         }
535         if (init_attr->cap.max_recv_wr > dev->dev_attr.max_rq_depth) {
536                 ibdev_dbg(&dev->ibdev,
537                           "qp: requested receive wr[%u] exceeds the max[%u]\n",
538                           init_attr->cap.max_recv_wr,
539                           dev->dev_attr.max_rq_depth);
540                 return -EINVAL;
541         }
542         if (init_attr->cap.max_send_sge > dev->dev_attr.max_sq_sge) {
543                 ibdev_dbg(&dev->ibdev,
544                           "qp: requested sge send[%u] exceeds the max[%u]\n",
545                           init_attr->cap.max_send_sge, dev->dev_attr.max_sq_sge);
546                 return -EINVAL;
547         }
548         if (init_attr->cap.max_recv_sge > dev->dev_attr.max_rq_sge) {
549                 ibdev_dbg(&dev->ibdev,
550                           "qp: requested sge recv[%u] exceeds the max[%u]\n",
551                           init_attr->cap.max_recv_sge, dev->dev_attr.max_rq_sge);
552                 return -EINVAL;
553         }
554         if (init_attr->cap.max_inline_data > dev->dev_attr.inline_buf_size) {
555                 ibdev_dbg(&dev->ibdev,
556                           "qp: requested inline data[%u] exceeds the max[%u]\n",
557                           init_attr->cap.max_inline_data,
558                           dev->dev_attr.inline_buf_size);
559                 return -EINVAL;
560         }
561
562         return 0;
563 }
564
565 static int efa_qp_validate_attr(struct efa_dev *dev,
566                                 struct ib_qp_init_attr *init_attr)
567 {
568         if (init_attr->qp_type != IB_QPT_DRIVER &&
569             init_attr->qp_type != IB_QPT_UD) {
570                 ibdev_dbg(&dev->ibdev,
571                           "Unsupported qp type %d\n", init_attr->qp_type);
572                 return -EOPNOTSUPP;
573         }
574
575         if (init_attr->srq) {
576                 ibdev_dbg(&dev->ibdev, "SRQ is not supported\n");
577                 return -EOPNOTSUPP;
578         }
579
580         if (init_attr->create_flags) {
581                 ibdev_dbg(&dev->ibdev, "Unsupported create flags\n");
582                 return -EOPNOTSUPP;
583         }
584
585         return 0;
586 }
587
588 struct ib_qp *efa_create_qp(struct ib_pd *ibpd,
589                             struct ib_qp_init_attr *init_attr,
590                             struct ib_udata *udata)
591 {
592         struct efa_com_create_qp_params create_qp_params = {};
593         struct efa_com_create_qp_result create_qp_resp;
594         struct efa_dev *dev = to_edev(ibpd->device);
595         struct efa_ibv_create_qp_resp resp = {};
596         struct efa_ibv_create_qp cmd = {};
597         struct efa_ucontext *ucontext;
598         struct efa_qp *qp;
599         int err;
600
601         ucontext = rdma_udata_to_drv_context(udata, struct efa_ucontext,
602                                              ibucontext);
603
604         err = efa_qp_validate_cap(dev, init_attr);
605         if (err)
606                 goto err_out;
607
608         err = efa_qp_validate_attr(dev, init_attr);
609         if (err)
610                 goto err_out;
611
612         if (!field_avail(cmd, driver_qp_type, udata->inlen)) {
613                 ibdev_dbg(&dev->ibdev,
614                           "Incompatible ABI params, no input udata\n");
615                 err = -EINVAL;
616                 goto err_out;
617         }
618
619         if (udata->inlen > sizeof(cmd) &&
620             !ib_is_udata_cleared(udata, sizeof(cmd),
621                                  udata->inlen - sizeof(cmd))) {
622                 ibdev_dbg(&dev->ibdev,
623                           "Incompatible ABI params, unknown fields in udata\n");
624                 err = -EINVAL;
625                 goto err_out;
626         }
627
628         err = ib_copy_from_udata(&cmd, udata,
629                                  min(sizeof(cmd), udata->inlen));
630         if (err) {
631                 ibdev_dbg(&dev->ibdev,
632                           "Cannot copy udata for create_qp\n");
633                 goto err_out;
634         }
635
636         if (cmd.comp_mask) {
637                 ibdev_dbg(&dev->ibdev,
638                           "Incompatible ABI params, unknown fields in udata\n");
639                 err = -EINVAL;
640                 goto err_out;
641         }
642
643         qp = kzalloc(sizeof(*qp), GFP_KERNEL);
644         if (!qp) {
645                 err = -ENOMEM;
646                 goto err_out;
647         }
648
649         create_qp_params.uarn = ucontext->uarn;
650         create_qp_params.pd = to_epd(ibpd)->pdn;
651
652         if (init_attr->qp_type == IB_QPT_UD) {
653                 create_qp_params.qp_type = EFA_ADMIN_QP_TYPE_UD;
654         } else if (cmd.driver_qp_type == EFA_QP_DRIVER_TYPE_SRD) {
655                 create_qp_params.qp_type = EFA_ADMIN_QP_TYPE_SRD;
656         } else {
657                 ibdev_dbg(&dev->ibdev,
658                           "Unsupported qp type %d driver qp type %d\n",
659                           init_attr->qp_type, cmd.driver_qp_type);
660                 err = -EOPNOTSUPP;
661                 goto err_free_qp;
662         }
663
664         ibdev_dbg(&dev->ibdev, "Create QP: qp type %d driver qp type %#x\n",
665                   init_attr->qp_type, cmd.driver_qp_type);
666         create_qp_params.send_cq_idx = to_ecq(init_attr->send_cq)->cq_idx;
667         create_qp_params.recv_cq_idx = to_ecq(init_attr->recv_cq)->cq_idx;
668         create_qp_params.sq_depth = init_attr->cap.max_send_wr;
669         create_qp_params.sq_ring_size_in_bytes = cmd.sq_ring_size;
670
671         create_qp_params.rq_depth = init_attr->cap.max_recv_wr;
672         create_qp_params.rq_ring_size_in_bytes = cmd.rq_ring_size;
673         qp->rq_size = PAGE_ALIGN(create_qp_params.rq_ring_size_in_bytes);
674         if (qp->rq_size) {
675                 qp->rq_cpu_addr = efa_zalloc_mapped(dev, &qp->rq_dma_addr,
676                                                     qp->rq_size, DMA_TO_DEVICE);
677                 if (!qp->rq_cpu_addr) {
678                         err = -ENOMEM;
679                         goto err_free_qp;
680                 }
681
682                 ibdev_dbg(&dev->ibdev,
683                           "qp->cpu_addr[0x%p] allocated: size[%lu], dma[%pad]\n",
684                           qp->rq_cpu_addr, qp->rq_size, &qp->rq_dma_addr);
685                 create_qp_params.rq_base_addr = qp->rq_dma_addr;
686         }
687
688         err = efa_com_create_qp(&dev->edev, &create_qp_params,
689                                 &create_qp_resp);
690         if (err)
691                 goto err_free_mapped;
692
693         resp.sq_db_offset = create_qp_resp.sq_db_offset;
694         resp.rq_db_offset = create_qp_resp.rq_db_offset;
695         resp.llq_desc_offset = create_qp_resp.llq_descriptors_offset;
696         resp.send_sub_cq_idx = create_qp_resp.send_sub_cq_idx;
697         resp.recv_sub_cq_idx = create_qp_resp.recv_sub_cq_idx;
698
699         err = qp_mmap_entries_setup(qp, dev, ucontext, &create_qp_params,
700                                     &resp);
701         if (err)
702                 goto err_destroy_qp;
703
704         qp->qp_handle = create_qp_resp.qp_handle;
705         qp->ibqp.qp_num = create_qp_resp.qp_num;
706         qp->ibqp.qp_type = init_attr->qp_type;
707         qp->max_send_wr = init_attr->cap.max_send_wr;
708         qp->max_recv_wr = init_attr->cap.max_recv_wr;
709         qp->max_send_sge = init_attr->cap.max_send_sge;
710         qp->max_recv_sge = init_attr->cap.max_recv_sge;
711         qp->max_inline_data = init_attr->cap.max_inline_data;
712
713         if (udata->outlen) {
714                 err = ib_copy_to_udata(udata, &resp,
715                                        min(sizeof(resp), udata->outlen));
716                 if (err) {
717                         ibdev_dbg(&dev->ibdev,
718                                   "Failed to copy udata for qp[%u]\n",
719                                   create_qp_resp.qp_num);
720                         goto err_remove_mmap_entries;
721                 }
722         }
723
724         ibdev_dbg(&dev->ibdev, "Created qp[%d]\n", qp->ibqp.qp_num);
725
726         return &qp->ibqp;
727
728 err_remove_mmap_entries:
729         efa_qp_user_mmap_entries_remove(qp);
730 err_destroy_qp:
731         efa_destroy_qp_handle(dev, create_qp_resp.qp_handle);
732 err_free_mapped:
733         if (qp->rq_size)
734                 efa_free_mapped(dev, qp->rq_cpu_addr, qp->rq_dma_addr,
735                                 qp->rq_size, DMA_TO_DEVICE);
736 err_free_qp:
737         kfree(qp);
738 err_out:
739         atomic64_inc(&dev->stats.sw_stats.create_qp_err);
740         return ERR_PTR(err);
741 }
742
743 static int efa_modify_qp_validate(struct efa_dev *dev, struct efa_qp *qp,
744                                   struct ib_qp_attr *qp_attr, int qp_attr_mask,
745                                   enum ib_qp_state cur_state,
746                                   enum ib_qp_state new_state)
747 {
748 #define EFA_MODIFY_QP_SUPP_MASK \
749         (IB_QP_STATE | IB_QP_CUR_STATE | IB_QP_EN_SQD_ASYNC_NOTIFY | \
750          IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_QKEY | IB_QP_SQ_PSN)
751
752         if (qp_attr_mask & ~EFA_MODIFY_QP_SUPP_MASK) {
753                 ibdev_dbg(&dev->ibdev,
754                           "Unsupported qp_attr_mask[%#x] supported[%#x]\n",
755                           qp_attr_mask, EFA_MODIFY_QP_SUPP_MASK);
756                 return -EOPNOTSUPP;
757         }
758
759         if (!ib_modify_qp_is_ok(cur_state, new_state, IB_QPT_UD,
760                                 qp_attr_mask)) {
761                 ibdev_dbg(&dev->ibdev, "Invalid modify QP parameters\n");
762                 return -EINVAL;
763         }
764
765         if ((qp_attr_mask & IB_QP_PORT) && qp_attr->port_num != 1) {
766                 ibdev_dbg(&dev->ibdev, "Can't change port num\n");
767                 return -EOPNOTSUPP;
768         }
769
770         if ((qp_attr_mask & IB_QP_PKEY_INDEX) && qp_attr->pkey_index) {
771                 ibdev_dbg(&dev->ibdev, "Can't change pkey index\n");
772                 return -EOPNOTSUPP;
773         }
774
775         return 0;
776 }
777
778 int efa_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
779                   int qp_attr_mask, struct ib_udata *udata)
780 {
781         struct efa_dev *dev = to_edev(ibqp->device);
782         struct efa_com_modify_qp_params params = {};
783         struct efa_qp *qp = to_eqp(ibqp);
784         enum ib_qp_state cur_state;
785         enum ib_qp_state new_state;
786         int err;
787
788         if (udata->inlen &&
789             !ib_is_udata_cleared(udata, 0, udata->inlen)) {
790                 ibdev_dbg(&dev->ibdev,
791                           "Incompatible ABI params, udata not cleared\n");
792                 return -EINVAL;
793         }
794
795         cur_state = qp_attr_mask & IB_QP_CUR_STATE ? qp_attr->cur_qp_state :
796                                                      qp->state;
797         new_state = qp_attr_mask & IB_QP_STATE ? qp_attr->qp_state : cur_state;
798
799         err = efa_modify_qp_validate(dev, qp, qp_attr, qp_attr_mask, cur_state,
800                                      new_state);
801         if (err)
802                 return err;
803
804         params.qp_handle = qp->qp_handle;
805
806         if (qp_attr_mask & IB_QP_STATE) {
807                 params.modify_mask |= BIT(EFA_ADMIN_QP_STATE_BIT) |
808                                       BIT(EFA_ADMIN_CUR_QP_STATE_BIT);
809                 params.cur_qp_state = qp_attr->cur_qp_state;
810                 params.qp_state = qp_attr->qp_state;
811         }
812
813         if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
814                 params.modify_mask |=
815                         BIT(EFA_ADMIN_SQ_DRAINED_ASYNC_NOTIFY_BIT);
816                 params.sq_drained_async_notify = qp_attr->en_sqd_async_notify;
817         }
818
819         if (qp_attr_mask & IB_QP_QKEY) {
820                 params.modify_mask |= BIT(EFA_ADMIN_QKEY_BIT);
821                 params.qkey = qp_attr->qkey;
822         }
823
824         if (qp_attr_mask & IB_QP_SQ_PSN) {
825                 params.modify_mask |= BIT(EFA_ADMIN_SQ_PSN_BIT);
826                 params.sq_psn = qp_attr->sq_psn;
827         }
828
829         err = efa_com_modify_qp(&dev->edev, &params);
830         if (err)
831                 return err;
832
833         qp->state = new_state;
834
835         return 0;
836 }
837
838 static int efa_destroy_cq_idx(struct efa_dev *dev, int cq_idx)
839 {
840         struct efa_com_destroy_cq_params params = { .cq_idx = cq_idx };
841
842         return efa_com_destroy_cq(&dev->edev, &params);
843 }
844
845 void efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
846 {
847         struct efa_dev *dev = to_edev(ibcq->device);
848         struct efa_cq *cq = to_ecq(ibcq);
849
850         ibdev_dbg(&dev->ibdev,
851                   "Destroy cq[%d] virt[0x%p] freed: size[%lu], dma[%pad]\n",
852                   cq->cq_idx, cq->cpu_addr, cq->size, &cq->dma_addr);
853
854         rdma_user_mmap_entry_remove(cq->mmap_entry);
855         efa_destroy_cq_idx(dev, cq->cq_idx);
856         efa_free_mapped(dev, cq->cpu_addr, cq->dma_addr, cq->size,
857                         DMA_FROM_DEVICE);
858 }
859
860 static int cq_mmap_entries_setup(struct efa_dev *dev, struct efa_cq *cq,
861                                  struct efa_ibv_create_cq_resp *resp)
862 {
863         resp->q_mmap_size = cq->size;
864         cq->mmap_entry = efa_user_mmap_entry_insert(&cq->ucontext->ibucontext,
865                                                     virt_to_phys(cq->cpu_addr),
866                                                     cq->size, EFA_MMAP_DMA_PAGE,
867                                                     &resp->q_mmap_key);
868         if (!cq->mmap_entry)
869                 return -ENOMEM;
870
871         return 0;
872 }
873
874 int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
875                   struct ib_udata *udata)
876 {
877         struct efa_ucontext *ucontext = rdma_udata_to_drv_context(
878                 udata, struct efa_ucontext, ibucontext);
879         struct efa_ibv_create_cq_resp resp = {};
880         struct efa_com_create_cq_params params;
881         struct efa_com_create_cq_result result;
882         struct ib_device *ibdev = ibcq->device;
883         struct efa_dev *dev = to_edev(ibdev);
884         struct efa_ibv_create_cq cmd = {};
885         struct efa_cq *cq = to_ecq(ibcq);
886         int entries = attr->cqe;
887         int err;
888
889         ibdev_dbg(ibdev, "create_cq entries %d\n", entries);
890
891         if (entries < 1 || entries > dev->dev_attr.max_cq_depth) {
892                 ibdev_dbg(ibdev,
893                           "cq: requested entries[%u] non-positive or greater than max[%u]\n",
894                           entries, dev->dev_attr.max_cq_depth);
895                 err = -EINVAL;
896                 goto err_out;
897         }
898
899         if (!field_avail(cmd, num_sub_cqs, udata->inlen)) {
900                 ibdev_dbg(ibdev,
901                           "Incompatible ABI params, no input udata\n");
902                 err = -EINVAL;
903                 goto err_out;
904         }
905
906         if (udata->inlen > sizeof(cmd) &&
907             !ib_is_udata_cleared(udata, sizeof(cmd),
908                                  udata->inlen - sizeof(cmd))) {
909                 ibdev_dbg(ibdev,
910                           "Incompatible ABI params, unknown fields in udata\n");
911                 err = -EINVAL;
912                 goto err_out;
913         }
914
915         err = ib_copy_from_udata(&cmd, udata,
916                                  min(sizeof(cmd), udata->inlen));
917         if (err) {
918                 ibdev_dbg(ibdev, "Cannot copy udata for create_cq\n");
919                 goto err_out;
920         }
921
922         if (cmd.comp_mask || !is_reserved_cleared(cmd.reserved_50)) {
923                 ibdev_dbg(ibdev,
924                           "Incompatible ABI params, unknown fields in udata\n");
925                 err = -EINVAL;
926                 goto err_out;
927         }
928
929         if (!cmd.cq_entry_size) {
930                 ibdev_dbg(ibdev,
931                           "Invalid entry size [%u]\n", cmd.cq_entry_size);
932                 err = -EINVAL;
933                 goto err_out;
934         }
935
936         if (cmd.num_sub_cqs != dev->dev_attr.sub_cqs_per_cq) {
937                 ibdev_dbg(ibdev,
938                           "Invalid number of sub cqs[%u] expected[%u]\n",
939                           cmd.num_sub_cqs, dev->dev_attr.sub_cqs_per_cq);
940                 err = -EINVAL;
941                 goto err_out;
942         }
943
944         cq->ucontext = ucontext;
945         cq->size = PAGE_ALIGN(cmd.cq_entry_size * entries * cmd.num_sub_cqs);
946         cq->cpu_addr = efa_zalloc_mapped(dev, &cq->dma_addr, cq->size,
947                                          DMA_FROM_DEVICE);
948         if (!cq->cpu_addr) {
949                 err = -ENOMEM;
950                 goto err_out;
951         }
952
953         params.uarn = cq->ucontext->uarn;
954         params.cq_depth = entries;
955         params.dma_addr = cq->dma_addr;
956         params.entry_size_in_bytes = cmd.cq_entry_size;
957         params.num_sub_cqs = cmd.num_sub_cqs;
958         err = efa_com_create_cq(&dev->edev, &params, &result);
959         if (err)
960                 goto err_free_mapped;
961
962         resp.cq_idx = result.cq_idx;
963         cq->cq_idx = result.cq_idx;
964         cq->ibcq.cqe = result.actual_depth;
965         WARN_ON_ONCE(entries != result.actual_depth);
966
967         err = cq_mmap_entries_setup(dev, cq, &resp);
968         if (err) {
969                 ibdev_dbg(ibdev, "Could not setup cq[%u] mmap entries\n",
970                           cq->cq_idx);
971                 goto err_destroy_cq;
972         }
973
974         if (udata->outlen) {
975                 err = ib_copy_to_udata(udata, &resp,
976                                        min(sizeof(resp), udata->outlen));
977                 if (err) {
978                         ibdev_dbg(ibdev,
979                                   "Failed to copy udata for create_cq\n");
980                         goto err_remove_mmap;
981                 }
982         }
983
984         ibdev_dbg(ibdev, "Created cq[%d], cq depth[%u]. dma[%pad] virt[0x%p]\n",
985                   cq->cq_idx, result.actual_depth, &cq->dma_addr, cq->cpu_addr);
986
987         return 0;
988
989 err_remove_mmap:
990         rdma_user_mmap_entry_remove(cq->mmap_entry);
991 err_destroy_cq:
992         efa_destroy_cq_idx(dev, cq->cq_idx);
993 err_free_mapped:
994         efa_free_mapped(dev, cq->cpu_addr, cq->dma_addr, cq->size,
995                         DMA_FROM_DEVICE);
996
997 err_out:
998         atomic64_inc(&dev->stats.sw_stats.create_cq_err);
999         return err;
1000 }
1001
1002 static int umem_to_page_list(struct efa_dev *dev,
1003                              struct ib_umem *umem,
1004                              u64 *page_list,
1005                              u32 hp_cnt,
1006                              u8 hp_shift)
1007 {
1008         u32 pages_in_hp = BIT(hp_shift - PAGE_SHIFT);
1009         struct ib_block_iter biter;
1010         unsigned int hp_idx = 0;
1011
1012         ibdev_dbg(&dev->ibdev, "hp_cnt[%u], pages_in_hp[%u]\n",
1013                   hp_cnt, pages_in_hp);
1014
1015         rdma_for_each_block(umem->sg_head.sgl, &biter, umem->nmap,
1016                             BIT(hp_shift))
1017                 page_list[hp_idx++] = rdma_block_iter_dma_address(&biter);
1018
1019         return 0;
1020 }
1021
1022 static struct scatterlist *efa_vmalloc_buf_to_sg(u64 *buf, int page_cnt)
1023 {
1024         struct scatterlist *sglist;
1025         struct page *pg;
1026         int i;
1027
1028         sglist = kcalloc(page_cnt, sizeof(*sglist), GFP_KERNEL);
1029         if (!sglist)
1030                 return NULL;
1031         sg_init_table(sglist, page_cnt);
1032         for (i = 0; i < page_cnt; i++) {
1033                 pg = vmalloc_to_page(buf);
1034                 if (!pg)
1035                         goto err;
1036                 sg_set_page(&sglist[i], pg, PAGE_SIZE, 0);
1037                 buf += PAGE_SIZE / sizeof(*buf);
1038         }
1039         return sglist;
1040
1041 err:
1042         kfree(sglist);
1043         return NULL;
1044 }
1045
1046 /*
1047  * create a chunk list of physical pages dma addresses from the supplied
1048  * scatter gather list
1049  */
1050 static int pbl_chunk_list_create(struct efa_dev *dev, struct pbl_context *pbl)
1051 {
1052         struct pbl_chunk_list *chunk_list = &pbl->phys.indirect.chunk_list;
1053         int page_cnt = pbl->phys.indirect.pbl_buf_size_in_pages;
1054         struct scatterlist *pages_sgl = pbl->phys.indirect.sgl;
1055         unsigned int chunk_list_size, chunk_idx, payload_idx;
1056         int sg_dma_cnt = pbl->phys.indirect.sg_dma_cnt;
1057         struct efa_com_ctrl_buff_info *ctrl_buf;
1058         u64 *cur_chunk_buf, *prev_chunk_buf;
1059         struct ib_block_iter biter;
1060         dma_addr_t dma_addr;
1061         int i;
1062
1063         /* allocate a chunk list that consists of 4KB chunks */
1064         chunk_list_size = DIV_ROUND_UP(page_cnt, EFA_PTRS_PER_CHUNK);
1065
1066         chunk_list->size = chunk_list_size;
1067         chunk_list->chunks = kcalloc(chunk_list_size,
1068                                      sizeof(*chunk_list->chunks),
1069                                      GFP_KERNEL);
1070         if (!chunk_list->chunks)
1071                 return -ENOMEM;
1072
1073         ibdev_dbg(&dev->ibdev,
1074                   "chunk_list_size[%u] - pages[%u]\n", chunk_list_size,
1075                   page_cnt);
1076
1077         /* allocate chunk buffers: */
1078         for (i = 0; i < chunk_list_size; i++) {
1079                 chunk_list->chunks[i].buf = kzalloc(EFA_CHUNK_SIZE, GFP_KERNEL);
1080                 if (!chunk_list->chunks[i].buf)
1081                         goto chunk_list_dealloc;
1082
1083                 chunk_list->chunks[i].length = EFA_CHUNK_USED_SIZE;
1084         }
1085         chunk_list->chunks[chunk_list_size - 1].length =
1086                 ((page_cnt % EFA_PTRS_PER_CHUNK) * EFA_CHUNK_PAYLOAD_PTR_SIZE) +
1087                         EFA_CHUNK_PTR_SIZE;
1088
1089         /* fill the dma addresses of sg list pages to chunks: */
1090         chunk_idx = 0;
1091         payload_idx = 0;
1092         cur_chunk_buf = chunk_list->chunks[0].buf;
1093         rdma_for_each_block(pages_sgl, &biter, sg_dma_cnt,
1094                             EFA_CHUNK_PAYLOAD_SIZE) {
1095                 cur_chunk_buf[payload_idx++] =
1096                         rdma_block_iter_dma_address(&biter);
1097
1098                 if (payload_idx == EFA_PTRS_PER_CHUNK) {
1099                         chunk_idx++;
1100                         cur_chunk_buf = chunk_list->chunks[chunk_idx].buf;
1101                         payload_idx = 0;
1102                 }
1103         }
1104
1105         /* map chunks to dma and fill chunks next ptrs */
1106         for (i = chunk_list_size - 1; i >= 0; i--) {
1107                 dma_addr = dma_map_single(&dev->pdev->dev,
1108                                           chunk_list->chunks[i].buf,
1109                                           chunk_list->chunks[i].length,
1110                                           DMA_TO_DEVICE);
1111                 if (dma_mapping_error(&dev->pdev->dev, dma_addr)) {
1112                         ibdev_err(&dev->ibdev,
1113                                   "chunk[%u] dma_map_failed\n", i);
1114                         goto chunk_list_unmap;
1115                 }
1116
1117                 chunk_list->chunks[i].dma_addr = dma_addr;
1118                 ibdev_dbg(&dev->ibdev,
1119                           "chunk[%u] mapped at [%pad]\n", i, &dma_addr);
1120
1121                 if (!i)
1122                         break;
1123
1124                 prev_chunk_buf = chunk_list->chunks[i - 1].buf;
1125
1126                 ctrl_buf = (struct efa_com_ctrl_buff_info *)
1127                                 &prev_chunk_buf[EFA_PTRS_PER_CHUNK];
1128                 ctrl_buf->length = chunk_list->chunks[i].length;
1129
1130                 efa_com_set_dma_addr(dma_addr,
1131                                      &ctrl_buf->address.mem_addr_high,
1132                                      &ctrl_buf->address.mem_addr_low);
1133         }
1134
1135         return 0;
1136
1137 chunk_list_unmap:
1138         for (; i < chunk_list_size; i++) {
1139                 dma_unmap_single(&dev->pdev->dev, chunk_list->chunks[i].dma_addr,
1140                                  chunk_list->chunks[i].length, DMA_TO_DEVICE);
1141         }
1142 chunk_list_dealloc:
1143         for (i = 0; i < chunk_list_size; i++)
1144                 kfree(chunk_list->chunks[i].buf);
1145
1146         kfree(chunk_list->chunks);
1147         return -ENOMEM;
1148 }
1149
1150 static void pbl_chunk_list_destroy(struct efa_dev *dev, struct pbl_context *pbl)
1151 {
1152         struct pbl_chunk_list *chunk_list = &pbl->phys.indirect.chunk_list;
1153         int i;
1154
1155         for (i = 0; i < chunk_list->size; i++) {
1156                 dma_unmap_single(&dev->pdev->dev, chunk_list->chunks[i].dma_addr,
1157                                  chunk_list->chunks[i].length, DMA_TO_DEVICE);
1158                 kfree(chunk_list->chunks[i].buf);
1159         }
1160
1161         kfree(chunk_list->chunks);
1162 }
1163
1164 /* initialize pbl continuous mode: map pbl buffer to a dma address. */
1165 static int pbl_continuous_initialize(struct efa_dev *dev,
1166                                      struct pbl_context *pbl)
1167 {
1168         dma_addr_t dma_addr;
1169
1170         dma_addr = dma_map_single(&dev->pdev->dev, pbl->pbl_buf,
1171                                   pbl->pbl_buf_size_in_bytes, DMA_TO_DEVICE);
1172         if (dma_mapping_error(&dev->pdev->dev, dma_addr)) {
1173                 ibdev_err(&dev->ibdev, "Unable to map pbl to DMA address\n");
1174                 return -ENOMEM;
1175         }
1176
1177         pbl->phys.continuous.dma_addr = dma_addr;
1178         ibdev_dbg(&dev->ibdev,
1179                   "pbl continuous - dma_addr = %pad, size[%u]\n",
1180                   &dma_addr, pbl->pbl_buf_size_in_bytes);
1181
1182         return 0;
1183 }
1184
1185 /*
1186  * initialize pbl indirect mode:
1187  * create a chunk list out of the dma addresses of the physical pages of
1188  * pbl buffer.
1189  */
1190 static int pbl_indirect_initialize(struct efa_dev *dev, struct pbl_context *pbl)
1191 {
1192         u32 size_in_pages = DIV_ROUND_UP(pbl->pbl_buf_size_in_bytes, PAGE_SIZE);
1193         struct scatterlist *sgl;
1194         int sg_dma_cnt, err;
1195
1196         BUILD_BUG_ON(EFA_CHUNK_PAYLOAD_SIZE > PAGE_SIZE);
1197         sgl = efa_vmalloc_buf_to_sg(pbl->pbl_buf, size_in_pages);
1198         if (!sgl)
1199                 return -ENOMEM;
1200
1201         sg_dma_cnt = dma_map_sg(&dev->pdev->dev, sgl, size_in_pages, DMA_TO_DEVICE);
1202         if (!sg_dma_cnt) {
1203                 err = -EINVAL;
1204                 goto err_map;
1205         }
1206
1207         pbl->phys.indirect.pbl_buf_size_in_pages = size_in_pages;
1208         pbl->phys.indirect.sgl = sgl;
1209         pbl->phys.indirect.sg_dma_cnt = sg_dma_cnt;
1210         err = pbl_chunk_list_create(dev, pbl);
1211         if (err) {
1212                 ibdev_dbg(&dev->ibdev,
1213                           "chunk_list creation failed[%d]\n", err);
1214                 goto err_chunk;
1215         }
1216
1217         ibdev_dbg(&dev->ibdev,
1218                   "pbl indirect - size[%u], chunks[%u]\n",
1219                   pbl->pbl_buf_size_in_bytes,
1220                   pbl->phys.indirect.chunk_list.size);
1221
1222         return 0;
1223
1224 err_chunk:
1225         dma_unmap_sg(&dev->pdev->dev, sgl, size_in_pages, DMA_TO_DEVICE);
1226 err_map:
1227         kfree(sgl);
1228         return err;
1229 }
1230
1231 static void pbl_indirect_terminate(struct efa_dev *dev, struct pbl_context *pbl)
1232 {
1233         pbl_chunk_list_destroy(dev, pbl);
1234         dma_unmap_sg(&dev->pdev->dev, pbl->phys.indirect.sgl,
1235                      pbl->phys.indirect.pbl_buf_size_in_pages, DMA_TO_DEVICE);
1236         kfree(pbl->phys.indirect.sgl);
1237 }
1238
1239 /* create a page buffer list from a mapped user memory region */
1240 static int pbl_create(struct efa_dev *dev,
1241                       struct pbl_context *pbl,
1242                       struct ib_umem *umem,
1243                       int hp_cnt,
1244                       u8 hp_shift)
1245 {
1246         int err;
1247
1248         pbl->pbl_buf_size_in_bytes = hp_cnt * EFA_CHUNK_PAYLOAD_PTR_SIZE;
1249         pbl->pbl_buf = kvzalloc(pbl->pbl_buf_size_in_bytes, GFP_KERNEL);
1250         if (!pbl->pbl_buf)
1251                 return -ENOMEM;
1252
1253         if (is_vmalloc_addr(pbl->pbl_buf)) {
1254                 pbl->physically_continuous = 0;
1255                 err = umem_to_page_list(dev, umem, pbl->pbl_buf, hp_cnt,
1256                                         hp_shift);
1257                 if (err)
1258                         goto err_free;
1259
1260                 err = pbl_indirect_initialize(dev, pbl);
1261                 if (err)
1262                         goto err_free;
1263         } else {
1264                 pbl->physically_continuous = 1;
1265                 err = umem_to_page_list(dev, umem, pbl->pbl_buf, hp_cnt,
1266                                         hp_shift);
1267                 if (err)
1268                         goto err_free;
1269
1270                 err = pbl_continuous_initialize(dev, pbl);
1271                 if (err)
1272                         goto err_free;
1273         }
1274
1275         ibdev_dbg(&dev->ibdev,
1276                   "user_pbl_created: user_pages[%u], continuous[%u]\n",
1277                   hp_cnt, pbl->physically_continuous);
1278
1279         return 0;
1280
1281 err_free:
1282         kvfree(pbl->pbl_buf);
1283         return err;
1284 }
1285
1286 static void pbl_destroy(struct efa_dev *dev, struct pbl_context *pbl)
1287 {
1288         if (pbl->physically_continuous)
1289                 dma_unmap_single(&dev->pdev->dev, pbl->phys.continuous.dma_addr,
1290                                  pbl->pbl_buf_size_in_bytes, DMA_TO_DEVICE);
1291         else
1292                 pbl_indirect_terminate(dev, pbl);
1293
1294         kvfree(pbl->pbl_buf);
1295 }
1296
1297 static int efa_create_inline_pbl(struct efa_dev *dev, struct efa_mr *mr,
1298                                  struct efa_com_reg_mr_params *params)
1299 {
1300         int err;
1301
1302         params->inline_pbl = 1;
1303         err = umem_to_page_list(dev, mr->umem, params->pbl.inline_pbl_array,
1304                                 params->page_num, params->page_shift);
1305         if (err)
1306                 return err;
1307
1308         ibdev_dbg(&dev->ibdev,
1309                   "inline_pbl_array - pages[%u]\n", params->page_num);
1310
1311         return 0;
1312 }
1313
1314 static int efa_create_pbl(struct efa_dev *dev,
1315                           struct pbl_context *pbl,
1316                           struct efa_mr *mr,
1317                           struct efa_com_reg_mr_params *params)
1318 {
1319         int err;
1320
1321         err = pbl_create(dev, pbl, mr->umem, params->page_num,
1322                          params->page_shift);
1323         if (err) {
1324                 ibdev_dbg(&dev->ibdev, "Failed to create pbl[%d]\n", err);
1325                 return err;
1326         }
1327
1328         params->inline_pbl = 0;
1329         params->indirect = !pbl->physically_continuous;
1330         if (pbl->physically_continuous) {
1331                 params->pbl.pbl.length = pbl->pbl_buf_size_in_bytes;
1332
1333                 efa_com_set_dma_addr(pbl->phys.continuous.dma_addr,
1334                                      &params->pbl.pbl.address.mem_addr_high,
1335                                      &params->pbl.pbl.address.mem_addr_low);
1336         } else {
1337                 params->pbl.pbl.length =
1338                         pbl->phys.indirect.chunk_list.chunks[0].length;
1339
1340                 efa_com_set_dma_addr(pbl->phys.indirect.chunk_list.chunks[0].dma_addr,
1341                                      &params->pbl.pbl.address.mem_addr_high,
1342                                      &params->pbl.pbl.address.mem_addr_low);
1343         }
1344
1345         return 0;
1346 }
1347
1348 struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
1349                          u64 virt_addr, int access_flags,
1350                          struct ib_udata *udata)
1351 {
1352         struct efa_dev *dev = to_edev(ibpd->device);
1353         struct efa_com_reg_mr_params params = {};
1354         struct efa_com_reg_mr_result result = {};
1355         struct pbl_context pbl;
1356         int supp_access_flags;
1357         unsigned int pg_sz;
1358         struct efa_mr *mr;
1359         int inline_size;
1360         int err;
1361
1362         if (udata && udata->inlen &&
1363             !ib_is_udata_cleared(udata, 0, sizeof(udata->inlen))) {
1364                 ibdev_dbg(&dev->ibdev,
1365                           "Incompatible ABI params, udata not cleared\n");
1366                 err = -EINVAL;
1367                 goto err_out;
1368         }
1369
1370         supp_access_flags =
1371                 IB_ACCESS_LOCAL_WRITE |
1372                 (is_rdma_read_cap(dev) ? IB_ACCESS_REMOTE_READ : 0);
1373
1374         access_flags &= ~IB_ACCESS_OPTIONAL;
1375         if (access_flags & ~supp_access_flags) {
1376                 ibdev_dbg(&dev->ibdev,
1377                           "Unsupported access flags[%#x], supported[%#x]\n",
1378                           access_flags, supp_access_flags);
1379                 err = -EOPNOTSUPP;
1380                 goto err_out;
1381         }
1382
1383         mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1384         if (!mr) {
1385                 err = -ENOMEM;
1386                 goto err_out;
1387         }
1388
1389         mr->umem = ib_umem_get(ibpd->device, start, length, access_flags);
1390         if (IS_ERR(mr->umem)) {
1391                 err = PTR_ERR(mr->umem);
1392                 ibdev_dbg(&dev->ibdev,
1393                           "Failed to pin and map user space memory[%d]\n", err);
1394                 goto err_free;
1395         }
1396
1397         params.pd = to_epd(ibpd)->pdn;
1398         params.iova = virt_addr;
1399         params.mr_length_in_bytes = length;
1400         params.permissions = access_flags;
1401
1402         pg_sz = ib_umem_find_best_pgsz(mr->umem,
1403                                        dev->dev_attr.page_size_cap,
1404                                        virt_addr);
1405         if (!pg_sz) {
1406                 err = -EOPNOTSUPP;
1407                 ibdev_dbg(&dev->ibdev, "Failed to find a suitable page size in page_size_cap %#llx\n",
1408                           dev->dev_attr.page_size_cap);
1409                 goto err_unmap;
1410         }
1411
1412         params.page_shift = __ffs(pg_sz);
1413         params.page_num = DIV_ROUND_UP(length + (start & (pg_sz - 1)),
1414                                        pg_sz);
1415
1416         ibdev_dbg(&dev->ibdev,
1417                   "start %#llx length %#llx params.page_shift %u params.page_num %u\n",
1418                   start, length, params.page_shift, params.page_num);
1419
1420         inline_size = ARRAY_SIZE(params.pbl.inline_pbl_array);
1421         if (params.page_num <= inline_size) {
1422                 err = efa_create_inline_pbl(dev, mr, &params);
1423                 if (err)
1424                         goto err_unmap;
1425
1426                 err = efa_com_register_mr(&dev->edev, &params, &result);
1427                 if (err)
1428                         goto err_unmap;
1429         } else {
1430                 err = efa_create_pbl(dev, &pbl, mr, &params);
1431                 if (err)
1432                         goto err_unmap;
1433
1434                 err = efa_com_register_mr(&dev->edev, &params, &result);
1435                 pbl_destroy(dev, &pbl);
1436
1437                 if (err)
1438                         goto err_unmap;
1439         }
1440
1441         mr->ibmr.lkey = result.l_key;
1442         mr->ibmr.rkey = result.r_key;
1443         mr->ibmr.length = length;
1444         ibdev_dbg(&dev->ibdev, "Registered mr[%d]\n", mr->ibmr.lkey);
1445
1446         return &mr->ibmr;
1447
1448 err_unmap:
1449         ib_umem_release(mr->umem);
1450 err_free:
1451         kfree(mr);
1452 err_out:
1453         atomic64_inc(&dev->stats.sw_stats.reg_mr_err);
1454         return ERR_PTR(err);
1455 }
1456
1457 int efa_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
1458 {
1459         struct efa_dev *dev = to_edev(ibmr->device);
1460         struct efa_com_dereg_mr_params params;
1461         struct efa_mr *mr = to_emr(ibmr);
1462         int err;
1463
1464         ibdev_dbg(&dev->ibdev, "Deregister mr[%d]\n", ibmr->lkey);
1465
1466         params.l_key = mr->ibmr.lkey;
1467         err = efa_com_dereg_mr(&dev->edev, &params);
1468         if (err)
1469                 return err;
1470
1471         ib_umem_release(mr->umem);
1472         kfree(mr);
1473
1474         return 0;
1475 }
1476
1477 int efa_get_port_immutable(struct ib_device *ibdev, u8 port_num,
1478                            struct ib_port_immutable *immutable)
1479 {
1480         struct ib_port_attr attr;
1481         int err;
1482
1483         err = ib_query_port(ibdev, port_num, &attr);
1484         if (err) {
1485                 ibdev_dbg(ibdev, "Couldn't query port err[%d]\n", err);
1486                 return err;
1487         }
1488
1489         immutable->pkey_tbl_len = attr.pkey_tbl_len;
1490         immutable->gid_tbl_len = attr.gid_tbl_len;
1491
1492         return 0;
1493 }
1494
1495 static int efa_dealloc_uar(struct efa_dev *dev, u16 uarn)
1496 {
1497         struct efa_com_dealloc_uar_params params = {
1498                 .uarn = uarn,
1499         };
1500
1501         return efa_com_dealloc_uar(&dev->edev, &params);
1502 }
1503
1504 int efa_alloc_ucontext(struct ib_ucontext *ibucontext, struct ib_udata *udata)
1505 {
1506         struct efa_ucontext *ucontext = to_eucontext(ibucontext);
1507         struct efa_dev *dev = to_edev(ibucontext->device);
1508         struct efa_ibv_alloc_ucontext_resp resp = {};
1509         struct efa_com_alloc_uar_result result;
1510         int err;
1511
1512         /*
1513          * it's fine if the driver does not know all request fields,
1514          * we will ack input fields in our response.
1515          */
1516
1517         err = efa_com_alloc_uar(&dev->edev, &result);
1518         if (err)
1519                 goto err_out;
1520
1521         ucontext->uarn = result.uarn;
1522
1523         resp.cmds_supp_udata_mask |= EFA_USER_CMDS_SUPP_UDATA_QUERY_DEVICE;
1524         resp.cmds_supp_udata_mask |= EFA_USER_CMDS_SUPP_UDATA_CREATE_AH;
1525         resp.sub_cqs_per_cq = dev->dev_attr.sub_cqs_per_cq;
1526         resp.inline_buf_size = dev->dev_attr.inline_buf_size;
1527         resp.max_llq_size = dev->dev_attr.max_llq_size;
1528
1529         if (udata && udata->outlen) {
1530                 err = ib_copy_to_udata(udata, &resp,
1531                                        min(sizeof(resp), udata->outlen));
1532                 if (err)
1533                         goto err_dealloc_uar;
1534         }
1535
1536         return 0;
1537
1538 err_dealloc_uar:
1539         efa_dealloc_uar(dev, result.uarn);
1540 err_out:
1541         atomic64_inc(&dev->stats.sw_stats.alloc_ucontext_err);
1542         return err;
1543 }
1544
1545 void efa_dealloc_ucontext(struct ib_ucontext *ibucontext)
1546 {
1547         struct efa_ucontext *ucontext = to_eucontext(ibucontext);
1548         struct efa_dev *dev = to_edev(ibucontext->device);
1549
1550         efa_dealloc_uar(dev, ucontext->uarn);
1551 }
1552
1553 void efa_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
1554 {
1555         struct efa_user_mmap_entry *entry = to_emmap(rdma_entry);
1556
1557         kfree(entry);
1558 }
1559
1560 static int __efa_mmap(struct efa_dev *dev, struct efa_ucontext *ucontext,
1561                       struct vm_area_struct *vma)
1562 {
1563         struct rdma_user_mmap_entry *rdma_entry;
1564         struct efa_user_mmap_entry *entry;
1565         unsigned long va;
1566         int err = 0;
1567         u64 pfn;
1568
1569         rdma_entry = rdma_user_mmap_entry_get(&ucontext->ibucontext, vma);
1570         if (!rdma_entry) {
1571                 ibdev_dbg(&dev->ibdev,
1572                           "pgoff[%#lx] does not have valid entry\n",
1573                           vma->vm_pgoff);
1574                 return -EINVAL;
1575         }
1576         entry = to_emmap(rdma_entry);
1577
1578         ibdev_dbg(&dev->ibdev,
1579                   "Mapping address[%#llx], length[%#zx], mmap_flag[%d]\n",
1580                   entry->address, rdma_entry->npages * PAGE_SIZE,
1581                   entry->mmap_flag);
1582
1583         pfn = entry->address >> PAGE_SHIFT;
1584         switch (entry->mmap_flag) {
1585         case EFA_MMAP_IO_NC:
1586                 err = rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn,
1587                                         entry->rdma_entry.npages * PAGE_SIZE,
1588                                         pgprot_noncached(vma->vm_page_prot),
1589                                         rdma_entry);
1590                 break;
1591         case EFA_MMAP_IO_WC:
1592                 err = rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn,
1593                                         entry->rdma_entry.npages * PAGE_SIZE,
1594                                         pgprot_writecombine(vma->vm_page_prot),
1595                                         rdma_entry);
1596                 break;
1597         case EFA_MMAP_DMA_PAGE:
1598                 for (va = vma->vm_start; va < vma->vm_end;
1599                      va += PAGE_SIZE, pfn++) {
1600                         err = vm_insert_page(vma, va, pfn_to_page(pfn));
1601                         if (err)
1602                                 break;
1603                 }
1604                 break;
1605         default:
1606                 err = -EINVAL;
1607         }
1608
1609         if (err)
1610                 ibdev_dbg(
1611                         &dev->ibdev,
1612                         "Couldn't mmap address[%#llx] length[%#zx] mmap_flag[%d] err[%d]\n",
1613                         entry->address, rdma_entry->npages * PAGE_SIZE,
1614                         entry->mmap_flag, err);
1615
1616         rdma_user_mmap_entry_put(rdma_entry);
1617         return err;
1618 }
1619
1620 int efa_mmap(struct ib_ucontext *ibucontext,
1621              struct vm_area_struct *vma)
1622 {
1623         struct efa_ucontext *ucontext = to_eucontext(ibucontext);
1624         struct efa_dev *dev = to_edev(ibucontext->device);
1625         size_t length = vma->vm_end - vma->vm_start;
1626
1627         ibdev_dbg(&dev->ibdev,
1628                   "start %#lx, end %#lx, length = %#zx, pgoff = %#lx\n",
1629                   vma->vm_start, vma->vm_end, length, vma->vm_pgoff);
1630
1631         return __efa_mmap(dev, ucontext, vma);
1632 }
1633
1634 static int efa_ah_destroy(struct efa_dev *dev, struct efa_ah *ah)
1635 {
1636         struct efa_com_destroy_ah_params params = {
1637                 .ah = ah->ah,
1638                 .pdn = to_epd(ah->ibah.pd)->pdn,
1639         };
1640
1641         return efa_com_destroy_ah(&dev->edev, &params);
1642 }
1643
1644 int efa_create_ah(struct ib_ah *ibah,
1645                   struct rdma_ah_attr *ah_attr,
1646                   u32 flags,
1647                   struct ib_udata *udata)
1648 {
1649         struct efa_dev *dev = to_edev(ibah->device);
1650         struct efa_com_create_ah_params params = {};
1651         struct efa_ibv_create_ah_resp resp = {};
1652         struct efa_com_create_ah_result result;
1653         struct efa_ah *ah = to_eah(ibah);
1654         int err;
1655
1656         if (!(flags & RDMA_CREATE_AH_SLEEPABLE)) {
1657                 ibdev_dbg(&dev->ibdev,
1658                           "Create address handle is not supported in atomic context\n");
1659                 err = -EOPNOTSUPP;
1660                 goto err_out;
1661         }
1662
1663         if (udata->inlen &&
1664             !ib_is_udata_cleared(udata, 0, udata->inlen)) {
1665                 ibdev_dbg(&dev->ibdev, "Incompatible ABI params\n");
1666                 err = -EINVAL;
1667                 goto err_out;
1668         }
1669
1670         memcpy(params.dest_addr, ah_attr->grh.dgid.raw,
1671                sizeof(params.dest_addr));
1672         params.pdn = to_epd(ibah->pd)->pdn;
1673         err = efa_com_create_ah(&dev->edev, &params, &result);
1674         if (err)
1675                 goto err_out;
1676
1677         memcpy(ah->id, ah_attr->grh.dgid.raw, sizeof(ah->id));
1678         ah->ah = result.ah;
1679
1680         resp.efa_address_handle = result.ah;
1681
1682         if (udata->outlen) {
1683                 err = ib_copy_to_udata(udata, &resp,
1684                                        min(sizeof(resp), udata->outlen));
1685                 if (err) {
1686                         ibdev_dbg(&dev->ibdev,
1687                                   "Failed to copy udata for create_ah response\n");
1688                         goto err_destroy_ah;
1689                 }
1690         }
1691         ibdev_dbg(&dev->ibdev, "Created ah[%d]\n", ah->ah);
1692
1693         return 0;
1694
1695 err_destroy_ah:
1696         efa_ah_destroy(dev, ah);
1697 err_out:
1698         atomic64_inc(&dev->stats.sw_stats.create_ah_err);
1699         return err;
1700 }
1701
1702 void efa_destroy_ah(struct ib_ah *ibah, u32 flags)
1703 {
1704         struct efa_dev *dev = to_edev(ibah->pd->device);
1705         struct efa_ah *ah = to_eah(ibah);
1706
1707         ibdev_dbg(&dev->ibdev, "Destroy ah[%d]\n", ah->ah);
1708
1709         if (!(flags & RDMA_DESTROY_AH_SLEEPABLE)) {
1710                 ibdev_dbg(&dev->ibdev,
1711                           "Destroy address handle is not supported in atomic context\n");
1712                 return;
1713         }
1714
1715         efa_ah_destroy(dev, ah);
1716 }
1717
1718 struct rdma_hw_stats *efa_alloc_hw_stats(struct ib_device *ibdev, u8 port_num)
1719 {
1720         return rdma_alloc_hw_stats_struct(efa_stats_names,
1721                                           ARRAY_SIZE(efa_stats_names),
1722                                           RDMA_HW_STATS_DEFAULT_LIFESPAN);
1723 }
1724
1725 int efa_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
1726                      u8 port_num, int index)
1727 {
1728         struct efa_com_get_stats_params params = {};
1729         union efa_com_get_stats_result result;
1730         struct efa_dev *dev = to_edev(ibdev);
1731         struct efa_com_basic_stats *bs;
1732         struct efa_com_stats_admin *as;
1733         struct efa_stats *s;
1734         int err;
1735
1736         params.type = EFA_ADMIN_GET_STATS_TYPE_BASIC;
1737         params.scope = EFA_ADMIN_GET_STATS_SCOPE_ALL;
1738
1739         err = efa_com_get_stats(&dev->edev, &params, &result);
1740         if (err)
1741                 return err;
1742
1743         bs = &result.basic_stats;
1744         stats->value[EFA_TX_BYTES] = bs->tx_bytes;
1745         stats->value[EFA_TX_PKTS] = bs->tx_pkts;
1746         stats->value[EFA_RX_BYTES] = bs->rx_bytes;
1747         stats->value[EFA_RX_PKTS] = bs->rx_pkts;
1748         stats->value[EFA_RX_DROPS] = bs->rx_drops;
1749
1750         as = &dev->edev.aq.stats;
1751         stats->value[EFA_SUBMITTED_CMDS] = atomic64_read(&as->submitted_cmd);
1752         stats->value[EFA_COMPLETED_CMDS] = atomic64_read(&as->completed_cmd);
1753         stats->value[EFA_NO_COMPLETION_CMDS] = atomic64_read(&as->no_completion);
1754
1755         s = &dev->stats;
1756         stats->value[EFA_KEEP_ALIVE_RCVD] = atomic64_read(&s->keep_alive_rcvd);
1757         stats->value[EFA_ALLOC_PD_ERR] = atomic64_read(&s->sw_stats.alloc_pd_err);
1758         stats->value[EFA_CREATE_QP_ERR] = atomic64_read(&s->sw_stats.create_qp_err);
1759         stats->value[EFA_REG_MR_ERR] = atomic64_read(&s->sw_stats.reg_mr_err);
1760         stats->value[EFA_ALLOC_UCONTEXT_ERR] = atomic64_read(&s->sw_stats.alloc_ucontext_err);
1761         stats->value[EFA_CREATE_AH_ERR] = atomic64_read(&s->sw_stats.create_ah_err);
1762
1763         return ARRAY_SIZE(efa_stats_names);
1764 }
1765
1766 enum rdma_link_layer efa_port_link_layer(struct ib_device *ibdev,
1767                                          u8 port_num)
1768 {
1769         return IB_LINK_LAYER_UNSPECIFIED;
1770 }
1771