RDMA/hns: Fix wrong parameters when initial mtt of srq->idx_que
[linux-2.6-microblaze.git] / drivers / infiniband / hw / hns / hns_roce_srq.c
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3  * Copyright (c) 2018 Hisilicon Limited.
4  */
5
6 #include <rdma/ib_umem.h>
7 #include <rdma/hns-abi.h>
8 #include "hns_roce_device.h"
9 #include "hns_roce_cmd.h"
10 #include "hns_roce_hem.h"
11
12 void hns_roce_srq_event(struct hns_roce_dev *hr_dev, u32 srqn, int event_type)
13 {
14         struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
15         struct hns_roce_srq *srq;
16
17         xa_lock(&srq_table->xa);
18         srq = xa_load(&srq_table->xa, srqn & (hr_dev->caps.num_srqs - 1));
19         if (srq)
20                 atomic_inc(&srq->refcount);
21         xa_unlock(&srq_table->xa);
22
23         if (!srq) {
24                 dev_warn(hr_dev->dev, "Async event for bogus SRQ %08x\n", srqn);
25                 return;
26         }
27
28         srq->event(srq, event_type);
29
30         if (atomic_dec_and_test(&srq->refcount))
31                 complete(&srq->free);
32 }
33
34 static void hns_roce_ib_srq_event(struct hns_roce_srq *srq,
35                                   enum hns_roce_event event_type)
36 {
37         struct hns_roce_dev *hr_dev = to_hr_dev(srq->ibsrq.device);
38         struct ib_srq *ibsrq = &srq->ibsrq;
39         struct ib_event event;
40
41         if (ibsrq->event_handler) {
42                 event.device      = ibsrq->device;
43                 event.element.srq = ibsrq;
44                 switch (event_type) {
45                 case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
46                         event.event = IB_EVENT_SRQ_LIMIT_REACHED;
47                         break;
48                 case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
49                         event.event = IB_EVENT_SRQ_ERR;
50                         break;
51                 default:
52                         dev_err(hr_dev->dev,
53                            "hns_roce:Unexpected event type 0x%x on SRQ %06lx\n",
54                            event_type, srq->srqn);
55                         return;
56                 }
57
58                 ibsrq->event_handler(&event, ibsrq->srq_context);
59         }
60 }
61
62 static int hns_roce_sw2hw_srq(struct hns_roce_dev *dev,
63                               struct hns_roce_cmd_mailbox *mailbox,
64                               unsigned long srq_num)
65 {
66         return hns_roce_cmd_mbox(dev, mailbox->dma, 0, srq_num, 0,
67                                  HNS_ROCE_CMD_SW2HW_SRQ,
68                                  HNS_ROCE_CMD_TIMEOUT_MSECS);
69 }
70
71 static int hns_roce_hw2sw_srq(struct hns_roce_dev *dev,
72                              struct hns_roce_cmd_mailbox *mailbox,
73                              unsigned long srq_num)
74 {
75         return hns_roce_cmd_mbox(dev, 0, mailbox ? mailbox->dma : 0, srq_num,
76                                  mailbox ? 0 : 1, HNS_ROCE_CMD_HW2SW_SRQ,
77                                  HNS_ROCE_CMD_TIMEOUT_MSECS);
78 }
79
80 static int hns_roce_srq_alloc(struct hns_roce_dev *hr_dev, u32 pdn, u32 cqn,
81                               u16 xrcd, struct hns_roce_mtt *hr_mtt,
82                               u64 db_rec_addr, struct hns_roce_srq *srq)
83 {
84         struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
85         struct hns_roce_cmd_mailbox *mailbox;
86         dma_addr_t dma_handle_wqe;
87         dma_addr_t dma_handle_idx;
88         u64 *mtts_wqe;
89         u64 *mtts_idx;
90         int ret;
91
92         /* Get the physical address of srq buf */
93         mtts_wqe = hns_roce_table_find(hr_dev,
94                                        &hr_dev->mr_table.mtt_srqwqe_table,
95                                        srq->mtt.first_seg,
96                                        &dma_handle_wqe);
97         if (!mtts_wqe) {
98                 dev_err(hr_dev->dev,
99                         "SRQ alloc.Failed to find srq buf addr.\n");
100                 return -EINVAL;
101         }
102
103         /* Get physical address of idx que buf */
104         mtts_idx = hns_roce_table_find(hr_dev, &hr_dev->mr_table.mtt_idx_table,
105                                        srq->idx_que.mtt.first_seg,
106                                        &dma_handle_idx);
107         if (!mtts_idx) {
108                 dev_err(hr_dev->dev,
109                         "SRQ alloc.Failed to find idx que buf addr.\n");
110                 return -EINVAL;
111         }
112
113         ret = hns_roce_bitmap_alloc(&srq_table->bitmap, &srq->srqn);
114         if (ret == -1) {
115                 dev_err(hr_dev->dev, "SRQ alloc.Failed to alloc index.\n");
116                 return -ENOMEM;
117         }
118
119         ret = hns_roce_table_get(hr_dev, &srq_table->table, srq->srqn);
120         if (ret)
121                 goto err_out;
122
123         ret = xa_err(xa_store(&srq_table->xa, srq->srqn, srq, GFP_KERNEL));
124         if (ret)
125                 goto err_put;
126
127         mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
128         if (IS_ERR(mailbox)) {
129                 ret = PTR_ERR(mailbox);
130                 goto err_xa;
131         }
132
133         hr_dev->hw->write_srqc(hr_dev, srq, pdn, xrcd, cqn, mailbox->buf,
134                                mtts_wqe, mtts_idx, dma_handle_wqe,
135                                dma_handle_idx);
136
137         ret = hns_roce_sw2hw_srq(hr_dev, mailbox, srq->srqn);
138         hns_roce_free_cmd_mailbox(hr_dev, mailbox);
139         if (ret)
140                 goto err_xa;
141
142         atomic_set(&srq->refcount, 1);
143         init_completion(&srq->free);
144         return ret;
145
146 err_xa:
147         xa_erase(&srq_table->xa, srq->srqn);
148
149 err_put:
150         hns_roce_table_put(hr_dev, &srq_table->table, srq->srqn);
151
152 err_out:
153         hns_roce_bitmap_free(&srq_table->bitmap, srq->srqn, BITMAP_NO_RR);
154         return ret;
155 }
156
157 static void hns_roce_srq_free(struct hns_roce_dev *hr_dev,
158                               struct hns_roce_srq *srq)
159 {
160         struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
161         int ret;
162
163         ret = hns_roce_hw2sw_srq(hr_dev, NULL, srq->srqn);
164         if (ret)
165                 dev_err(hr_dev->dev, "HW2SW_SRQ failed (%d) for CQN %06lx\n",
166                         ret, srq->srqn);
167
168         xa_erase(&srq_table->xa, srq->srqn);
169
170         if (atomic_dec_and_test(&srq->refcount))
171                 complete(&srq->free);
172         wait_for_completion(&srq->free);
173
174         hns_roce_table_put(hr_dev, &srq_table->table, srq->srqn);
175         hns_roce_bitmap_free(&srq_table->bitmap, srq->srqn, BITMAP_NO_RR);
176 }
177
178 static int create_user_srq(struct hns_roce_srq *srq, struct ib_udata *udata,
179                            int srq_buf_size)
180 {
181         struct hns_roce_dev *hr_dev = to_hr_dev(srq->ibsrq.device);
182         struct hns_roce_ib_create_srq  ucmd;
183         struct hns_roce_buf *buf;
184         int ret;
185
186         if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)))
187                 return -EFAULT;
188
189         srq->umem = ib_umem_get(udata, ucmd.buf_addr, srq_buf_size, 0, 0);
190         if (IS_ERR(srq->umem))
191                 return PTR_ERR(srq->umem);
192
193         buf = &srq->buf;
194         buf->npages = (ib_umem_page_count(srq->umem) +
195                        (1 << hr_dev->caps.srqwqe_buf_pg_sz) - 1) /
196                       (1 << hr_dev->caps.srqwqe_buf_pg_sz);
197         buf->page_shift = PAGE_SHIFT + hr_dev->caps.srqwqe_buf_pg_sz;
198         ret = hns_roce_mtt_init(hr_dev, buf->npages, buf->page_shift,
199                                 &srq->mtt);
200         if (ret)
201                 goto err_user_buf;
202
203         ret = hns_roce_ib_umem_write_mtt(hr_dev, &srq->mtt, srq->umem);
204         if (ret)
205                 goto err_user_srq_mtt;
206
207         /* config index queue BA */
208         srq->idx_que.umem = ib_umem_get(udata, ucmd.que_addr,
209                                         srq->idx_que.buf_size, 0, 0);
210         if (IS_ERR(srq->idx_que.umem)) {
211                 dev_err(hr_dev->dev, "ib_umem_get error for index queue\n");
212                 ret = PTR_ERR(srq->idx_que.umem);
213                 goto err_user_srq_mtt;
214         }
215
216         buf = &srq->idx_que.idx_buf;
217         buf->npages = DIV_ROUND_UP(ib_umem_page_count(srq->idx_que.umem),
218                                    1 << hr_dev->caps.idx_buf_pg_sz);
219         buf->page_shift = PAGE_SHIFT + hr_dev->caps.idx_buf_pg_sz;
220         ret = hns_roce_mtt_init(hr_dev, buf->npages, buf->page_shift,
221                                 &srq->idx_que.mtt);
222         if (ret) {
223                 dev_err(hr_dev->dev, "hns_roce_mtt_init error for idx que\n");
224                 goto err_user_idx_mtt;
225         }
226
227         ret = hns_roce_ib_umem_write_mtt(hr_dev, &srq->idx_que.mtt,
228                                          srq->idx_que.umem);
229         if (ret) {
230                 dev_err(hr_dev->dev,
231                         "hns_roce_ib_umem_write_mtt error for idx que\n");
232                 goto err_user_idx_buf;
233         }
234
235         return 0;
236
237 err_user_idx_buf:
238         hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt);
239
240 err_user_idx_mtt:
241         ib_umem_release(srq->idx_que.umem);
242
243 err_user_srq_mtt:
244         hns_roce_mtt_cleanup(hr_dev, &srq->mtt);
245
246 err_user_buf:
247         ib_umem_release(srq->umem);
248
249         return ret;
250 }
251
252 static int hns_roce_create_idx_que(struct ib_pd *pd, struct hns_roce_srq *srq,
253                                    u32 page_shift)
254 {
255         struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
256         struct hns_roce_idx_que *idx_que = &srq->idx_que;
257
258         idx_que->bitmap = bitmap_zalloc(srq->max, GFP_KERNEL);
259         if (!idx_que->bitmap)
260                 return -ENOMEM;
261
262         idx_que->buf_size = srq->idx_que.buf_size;
263
264         if (hns_roce_buf_alloc(hr_dev, idx_que->buf_size, (1 << page_shift) * 2,
265                                &idx_que->idx_buf, page_shift)) {
266                 bitmap_free(idx_que->bitmap);
267                 return -ENOMEM;
268         }
269
270         return 0;
271 }
272
273 static int create_kernel_srq(struct hns_roce_srq *srq, int srq_buf_size)
274 {
275         struct hns_roce_dev *hr_dev = to_hr_dev(srq->ibsrq.device);
276         u32 page_shift = PAGE_SHIFT + hr_dev->caps.srqwqe_buf_pg_sz;
277         int ret;
278
279         if (hns_roce_buf_alloc(hr_dev, srq_buf_size, (1 << page_shift) * 2,
280                                &srq->buf, page_shift))
281                 return -ENOMEM;
282
283         srq->head = 0;
284         srq->tail = srq->max - 1;
285
286         ret = hns_roce_mtt_init(hr_dev, srq->buf.npages, srq->buf.page_shift,
287                                 &srq->mtt);
288         if (ret)
289                 goto err_kernel_buf;
290
291         ret = hns_roce_buf_write_mtt(hr_dev, &srq->mtt, &srq->buf);
292         if (ret)
293                 goto err_kernel_srq_mtt;
294
295         page_shift = PAGE_SHIFT + hr_dev->caps.idx_buf_pg_sz;
296         ret = hns_roce_create_idx_que(srq->ibsrq.pd, srq, page_shift);
297         if (ret) {
298                 dev_err(hr_dev->dev, "Create idx queue fail(%d)!\n", ret);
299                 goto err_kernel_srq_mtt;
300         }
301
302         /* Init mtt table for idx_que */
303         ret = hns_roce_mtt_init(hr_dev, srq->idx_que.idx_buf.npages,
304                                 srq->idx_que.idx_buf.page_shift,
305                                 &srq->idx_que.mtt);
306         if (ret)
307                 goto err_kernel_create_idx;
308
309         /* Write buffer address into the mtt table */
310         ret = hns_roce_buf_write_mtt(hr_dev, &srq->idx_que.mtt,
311                                      &srq->idx_que.idx_buf);
312         if (ret)
313                 goto err_kernel_idx_buf;
314
315         srq->wrid = kvmalloc_array(srq->max, sizeof(u64), GFP_KERNEL);
316         if (!srq->wrid) {
317                 ret = -ENOMEM;
318                 goto err_kernel_idx_buf;
319         }
320
321         return 0;
322
323 err_kernel_idx_buf:
324         hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt);
325
326 err_kernel_create_idx:
327         hns_roce_buf_free(hr_dev, srq->idx_que.buf_size,
328                           &srq->idx_que.idx_buf);
329         kfree(srq->idx_que.bitmap);
330
331 err_kernel_srq_mtt:
332         hns_roce_mtt_cleanup(hr_dev, &srq->mtt);
333
334 err_kernel_buf:
335         hns_roce_buf_free(hr_dev, srq_buf_size, &srq->buf);
336
337         return ret;
338 }
339
340 static void destroy_user_srq(struct hns_roce_dev *hr_dev,
341                              struct hns_roce_srq *srq)
342 {
343         hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt);
344         ib_umem_release(srq->idx_que.umem);
345         hns_roce_mtt_cleanup(hr_dev, &srq->mtt);
346         ib_umem_release(srq->umem);
347 }
348
349 static void destroy_kernel_srq(struct hns_roce_dev *hr_dev,
350                                struct hns_roce_srq *srq, int srq_buf_size)
351 {
352         kvfree(srq->wrid);
353         hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt);
354         hns_roce_buf_free(hr_dev, srq->idx_que.buf_size, &srq->idx_que.idx_buf);
355         kfree(srq->idx_que.bitmap);
356         hns_roce_mtt_cleanup(hr_dev, &srq->mtt);
357         hns_roce_buf_free(hr_dev, srq_buf_size, &srq->buf);
358 }
359
360 int hns_roce_create_srq(struct ib_srq *ib_srq,
361                         struct ib_srq_init_attr *srq_init_attr,
362                         struct ib_udata *udata)
363 {
364         struct hns_roce_dev *hr_dev = to_hr_dev(ib_srq->device);
365         struct hns_roce_ib_create_srq_resp resp = {};
366         struct hns_roce_srq *srq = to_hr_srq(ib_srq);
367         int srq_desc_size;
368         int srq_buf_size;
369         int ret = 0;
370         u32 cqn;
371
372         /* Check the actual SRQ wqe and SRQ sge num */
373         if (srq_init_attr->attr.max_wr >= hr_dev->caps.max_srq_wrs ||
374             srq_init_attr->attr.max_sge > hr_dev->caps.max_srq_sges)
375                 return -EINVAL;
376
377         mutex_init(&srq->mutex);
378         spin_lock_init(&srq->lock);
379
380         srq->max = roundup_pow_of_two(srq_init_attr->attr.max_wr + 1);
381         srq->max_gs = srq_init_attr->attr.max_sge;
382
383         srq_desc_size = max(16, 16 * srq->max_gs);
384
385         srq->wqe_shift = ilog2(srq_desc_size);
386
387         srq_buf_size = srq->max * srq_desc_size;
388
389         srq->idx_que.entry_sz = HNS_ROCE_IDX_QUE_ENTRY_SZ;
390         srq->idx_que.buf_size = srq->max * srq->idx_que.entry_sz;
391         srq->mtt.mtt_type = MTT_TYPE_SRQWQE;
392         srq->idx_que.mtt.mtt_type = MTT_TYPE_IDX;
393
394         if (udata) {
395                 ret = create_user_srq(srq, udata, srq_buf_size);
396                 if (ret) {
397                         dev_err(hr_dev->dev, "Create user srq failed\n");
398                         goto err_srq;
399                 }
400         } else {
401                 ret = create_kernel_srq(srq, srq_buf_size);
402                 if (ret) {
403                         dev_err(hr_dev->dev, "Create kernel srq failed\n");
404                         goto err_srq;
405                 }
406         }
407
408         cqn = ib_srq_has_cq(srq_init_attr->srq_type) ?
409               to_hr_cq(srq_init_attr->ext.cq)->cqn : 0;
410
411         srq->db_reg_l = hr_dev->reg_base + SRQ_DB_REG;
412
413         ret = hns_roce_srq_alloc(hr_dev, to_hr_pd(ib_srq->pd)->pdn, cqn, 0,
414                                  &srq->mtt, 0, srq);
415         if (ret)
416                 goto err_wrid;
417
418         srq->event = hns_roce_ib_srq_event;
419         resp.srqn = srq->srqn;
420
421         if (udata) {
422                 if (ib_copy_to_udata(udata, &resp,
423                                      min(udata->outlen, sizeof(resp)))) {
424                         ret = -EFAULT;
425                         goto err_srqc_alloc;
426                 }
427         }
428
429         return 0;
430
431 err_srqc_alloc:
432         hns_roce_srq_free(hr_dev, srq);
433
434 err_wrid:
435         if (udata)
436                 destroy_user_srq(hr_dev, srq);
437         else
438                 destroy_kernel_srq(hr_dev, srq, srq_buf_size);
439
440 err_srq:
441         return ret;
442 }
443
444 void hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
445 {
446         struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
447         struct hns_roce_srq *srq = to_hr_srq(ibsrq);
448
449         hns_roce_srq_free(hr_dev, srq);
450         hns_roce_mtt_cleanup(hr_dev, &srq->mtt);
451
452         if (udata) {
453                 hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt);
454         } else {
455                 kvfree(srq->wrid);
456                 hns_roce_buf_free(hr_dev, srq->max << srq->wqe_shift,
457                                   &srq->buf);
458         }
459         ib_umem_release(srq->idx_que.umem);
460         ib_umem_release(srq->umem);
461 }
462
463 int hns_roce_init_srq_table(struct hns_roce_dev *hr_dev)
464 {
465         struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
466
467         xa_init(&srq_table->xa);
468
469         return hns_roce_bitmap_init(&srq_table->bitmap, hr_dev->caps.num_srqs,
470                                     hr_dev->caps.num_srqs - 1,
471                                     hr_dev->caps.reserved_srqs, 0);
472 }
473
474 void hns_roce_cleanup_srq_table(struct hns_roce_dev *hr_dev)
475 {
476         hns_roce_bitmap_cleanup(&hr_dev->srq_table.bitmap);
477 }