Merge tag 'configfs-for-5.2' of git://git.infradead.org/users/hch/configfs
[linux-2.6-microblaze.git] / drivers / infiniband / hw / vmw_pvrdma / pvrdma_qp.c
1 /*
2  * Copyright (c) 2012-2016 VMware, Inc.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of EITHER the GNU General Public License
6  * version 2 as published by the Free Software Foundation or the BSD
7  * 2-Clause License. This program is distributed in the hope that it
8  * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
9  * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
10  * See the GNU General Public License version 2 for more details at
11  * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program available in the file COPYING in the main
15  * directory of this source tree.
16  *
17  * The BSD 2-Clause License
18  *
19  *     Redistribution and use in source and binary forms, with or
20  *     without modification, are permitted provided that the following
21  *     conditions are met:
22  *
23  *      - Redistributions of source code must retain the above
24  *        copyright notice, this list of conditions and the following
25  *        disclaimer.
26  *
27  *      - Redistributions in binary form must reproduce the above
28  *        copyright notice, this list of conditions and the following
29  *        disclaimer in the documentation and/or other materials
30  *        provided with the distribution.
31  *
32  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
33  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
34  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
36  * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
37  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
38  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
39  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
41  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
43  * OF THE POSSIBILITY OF SUCH DAMAGE.
44  */
45
46 #include <asm/page.h>
47 #include <linux/io.h>
48 #include <linux/wait.h>
49 #include <rdma/ib_addr.h>
50 #include <rdma/ib_smi.h>
51 #include <rdma/ib_user_verbs.h>
52
53 #include "pvrdma.h"
54
55 static inline void get_cqs(struct pvrdma_qp *qp, struct pvrdma_cq **send_cq,
56                            struct pvrdma_cq **recv_cq)
57 {
58         *send_cq = to_vcq(qp->ibqp.send_cq);
59         *recv_cq = to_vcq(qp->ibqp.recv_cq);
60 }
61
62 static void pvrdma_lock_cqs(struct pvrdma_cq *scq, struct pvrdma_cq *rcq,
63                             unsigned long *scq_flags,
64                             unsigned long *rcq_flags)
65         __acquires(scq->cq_lock) __acquires(rcq->cq_lock)
66 {
67         if (scq == rcq) {
68                 spin_lock_irqsave(&scq->cq_lock, *scq_flags);
69                 __acquire(rcq->cq_lock);
70         } else if (scq->cq_handle < rcq->cq_handle) {
71                 spin_lock_irqsave(&scq->cq_lock, *scq_flags);
72                 spin_lock_irqsave_nested(&rcq->cq_lock, *rcq_flags,
73                                          SINGLE_DEPTH_NESTING);
74         } else {
75                 spin_lock_irqsave(&rcq->cq_lock, *rcq_flags);
76                 spin_lock_irqsave_nested(&scq->cq_lock, *scq_flags,
77                                          SINGLE_DEPTH_NESTING);
78         }
79 }
80
81 static void pvrdma_unlock_cqs(struct pvrdma_cq *scq, struct pvrdma_cq *rcq,
82                               unsigned long *scq_flags,
83                               unsigned long *rcq_flags)
84         __releases(scq->cq_lock) __releases(rcq->cq_lock)
85 {
86         if (scq == rcq) {
87                 __release(rcq->cq_lock);
88                 spin_unlock_irqrestore(&scq->cq_lock, *scq_flags);
89         } else if (scq->cq_handle < rcq->cq_handle) {
90                 spin_unlock_irqrestore(&rcq->cq_lock, *rcq_flags);
91                 spin_unlock_irqrestore(&scq->cq_lock, *scq_flags);
92         } else {
93                 spin_unlock_irqrestore(&scq->cq_lock, *scq_flags);
94                 spin_unlock_irqrestore(&rcq->cq_lock, *rcq_flags);
95         }
96 }
97
98 static void pvrdma_reset_qp(struct pvrdma_qp *qp)
99 {
100         struct pvrdma_cq *scq, *rcq;
101         unsigned long scq_flags, rcq_flags;
102
103         /* Clean up cqes */
104         get_cqs(qp, &scq, &rcq);
105         pvrdma_lock_cqs(scq, rcq, &scq_flags, &rcq_flags);
106
107         _pvrdma_flush_cqe(qp, scq);
108         if (scq != rcq)
109                 _pvrdma_flush_cqe(qp, rcq);
110
111         pvrdma_unlock_cqs(scq, rcq, &scq_flags, &rcq_flags);
112
113         /*
114          * Reset queuepair. The checks are because usermode queuepairs won't
115          * have kernel ringstates.
116          */
117         if (qp->rq.ring) {
118                 atomic_set(&qp->rq.ring->cons_head, 0);
119                 atomic_set(&qp->rq.ring->prod_tail, 0);
120         }
121         if (qp->sq.ring) {
122                 atomic_set(&qp->sq.ring->cons_head, 0);
123                 atomic_set(&qp->sq.ring->prod_tail, 0);
124         }
125 }
126
127 static int pvrdma_set_rq_size(struct pvrdma_dev *dev,
128                               struct ib_qp_cap *req_cap,
129                               struct pvrdma_qp *qp)
130 {
131         if (req_cap->max_recv_wr > dev->dsr->caps.max_qp_wr ||
132             req_cap->max_recv_sge > dev->dsr->caps.max_sge) {
133                 dev_warn(&dev->pdev->dev, "recv queue size invalid\n");
134                 return -EINVAL;
135         }
136
137         qp->rq.wqe_cnt = roundup_pow_of_two(max(1U, req_cap->max_recv_wr));
138         qp->rq.max_sg = roundup_pow_of_two(max(1U, req_cap->max_recv_sge));
139
140         /* Write back */
141         req_cap->max_recv_wr = qp->rq.wqe_cnt;
142         req_cap->max_recv_sge = qp->rq.max_sg;
143
144         qp->rq.wqe_size = roundup_pow_of_two(sizeof(struct pvrdma_rq_wqe_hdr) +
145                                              sizeof(struct pvrdma_sge) *
146                                              qp->rq.max_sg);
147         qp->npages_recv = (qp->rq.wqe_cnt * qp->rq.wqe_size + PAGE_SIZE - 1) /
148                           PAGE_SIZE;
149
150         return 0;
151 }
152
153 static int pvrdma_set_sq_size(struct pvrdma_dev *dev, struct ib_qp_cap *req_cap,
154                               struct pvrdma_qp *qp)
155 {
156         if (req_cap->max_send_wr > dev->dsr->caps.max_qp_wr ||
157             req_cap->max_send_sge > dev->dsr->caps.max_sge) {
158                 dev_warn(&dev->pdev->dev, "send queue size invalid\n");
159                 return -EINVAL;
160         }
161
162         qp->sq.wqe_cnt = roundup_pow_of_two(max(1U, req_cap->max_send_wr));
163         qp->sq.max_sg = roundup_pow_of_two(max(1U, req_cap->max_send_sge));
164
165         /* Write back */
166         req_cap->max_send_wr = qp->sq.wqe_cnt;
167         req_cap->max_send_sge = qp->sq.max_sg;
168
169         qp->sq.wqe_size = roundup_pow_of_two(sizeof(struct pvrdma_sq_wqe_hdr) +
170                                              sizeof(struct pvrdma_sge) *
171                                              qp->sq.max_sg);
172         /* Note: one extra page for the header. */
173         qp->npages_send = PVRDMA_QP_NUM_HEADER_PAGES +
174                           (qp->sq.wqe_cnt * qp->sq.wqe_size + PAGE_SIZE - 1) /
175                                                                 PAGE_SIZE;
176
177         return 0;
178 }
179
180 /**
181  * pvrdma_create_qp - create queue pair
182  * @pd: protection domain
183  * @init_attr: queue pair attributes
184  * @udata: user data
185  *
186  * @return: the ib_qp pointer on success, otherwise returns an errno.
187  */
188 struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
189                                struct ib_qp_init_attr *init_attr,
190                                struct ib_udata *udata)
191 {
192         struct pvrdma_qp *qp = NULL;
193         struct pvrdma_dev *dev = to_vdev(pd->device);
194         union pvrdma_cmd_req req;
195         union pvrdma_cmd_resp rsp;
196         struct pvrdma_cmd_create_qp *cmd = &req.create_qp;
197         struct pvrdma_cmd_create_qp_resp *resp = &rsp.create_qp_resp;
198         struct pvrdma_create_qp ucmd;
199         unsigned long flags;
200         int ret;
201         bool is_srq = !!init_attr->srq;
202
203         if (init_attr->create_flags) {
204                 dev_warn(&dev->pdev->dev,
205                          "invalid create queuepair flags %#x\n",
206                          init_attr->create_flags);
207                 return ERR_PTR(-EINVAL);
208         }
209
210         if (init_attr->qp_type != IB_QPT_RC &&
211             init_attr->qp_type != IB_QPT_UD &&
212             init_attr->qp_type != IB_QPT_GSI) {
213                 dev_warn(&dev->pdev->dev, "queuepair type %d not supported\n",
214                          init_attr->qp_type);
215                 return ERR_PTR(-EINVAL);
216         }
217
218         if (is_srq && !dev->dsr->caps.max_srq) {
219                 dev_warn(&dev->pdev->dev,
220                          "SRQs not supported by device\n");
221                 return ERR_PTR(-EINVAL);
222         }
223
224         if (!atomic_add_unless(&dev->num_qps, 1, dev->dsr->caps.max_qp))
225                 return ERR_PTR(-ENOMEM);
226
227         switch (init_attr->qp_type) {
228         case IB_QPT_GSI:
229                 if (init_attr->port_num == 0 ||
230                     init_attr->port_num > pd->device->phys_port_cnt ||
231                     udata) {
232                         dev_warn(&dev->pdev->dev, "invalid queuepair attrs\n");
233                         ret = -EINVAL;
234                         goto err_qp;
235                 }
236                 /* fall through */
237         case IB_QPT_RC:
238         case IB_QPT_UD:
239                 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
240                 if (!qp) {
241                         ret = -ENOMEM;
242                         goto err_qp;
243                 }
244
245                 spin_lock_init(&qp->sq.lock);
246                 spin_lock_init(&qp->rq.lock);
247                 mutex_init(&qp->mutex);
248                 refcount_set(&qp->refcnt, 1);
249                 init_completion(&qp->free);
250
251                 qp->state = IB_QPS_RESET;
252                 qp->is_kernel = !udata;
253
254                 if (!qp->is_kernel) {
255                         dev_dbg(&dev->pdev->dev,
256                                 "create queuepair from user space\n");
257
258                         if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
259                                 ret = -EFAULT;
260                                 goto err_qp;
261                         }
262
263                         if (!is_srq) {
264                                 /* set qp->sq.wqe_cnt, shift, buf_size.. */
265                                 qp->rumem = ib_umem_get(udata, ucmd.rbuf_addr,
266                                                         ucmd.rbuf_size, 0, 0);
267                                 if (IS_ERR(qp->rumem)) {
268                                         ret = PTR_ERR(qp->rumem);
269                                         goto err_qp;
270                                 }
271                                 qp->srq = NULL;
272                         } else {
273                                 qp->rumem = NULL;
274                                 qp->srq = to_vsrq(init_attr->srq);
275                         }
276
277                         qp->sumem = ib_umem_get(udata, ucmd.sbuf_addr,
278                                                 ucmd.sbuf_size, 0, 0);
279                         if (IS_ERR(qp->sumem)) {
280                                 if (!is_srq)
281                                         ib_umem_release(qp->rumem);
282                                 ret = PTR_ERR(qp->sumem);
283                                 goto err_qp;
284                         }
285
286                         qp->npages_send = ib_umem_page_count(qp->sumem);
287                         if (!is_srq)
288                                 qp->npages_recv = ib_umem_page_count(qp->rumem);
289                         else
290                                 qp->npages_recv = 0;
291                         qp->npages = qp->npages_send + qp->npages_recv;
292                 } else {
293                         ret = pvrdma_set_sq_size(to_vdev(pd->device),
294                                                  &init_attr->cap, qp);
295                         if (ret)
296                                 goto err_qp;
297
298                         ret = pvrdma_set_rq_size(to_vdev(pd->device),
299                                                  &init_attr->cap, qp);
300                         if (ret)
301                                 goto err_qp;
302
303                         qp->npages = qp->npages_send + qp->npages_recv;
304
305                         /* Skip header page. */
306                         qp->sq.offset = PVRDMA_QP_NUM_HEADER_PAGES * PAGE_SIZE;
307
308                         /* Recv queue pages are after send pages. */
309                         qp->rq.offset = qp->npages_send * PAGE_SIZE;
310                 }
311
312                 if (qp->npages < 0 || qp->npages > PVRDMA_PAGE_DIR_MAX_PAGES) {
313                         dev_warn(&dev->pdev->dev,
314                                  "overflow pages in queuepair\n");
315                         ret = -EINVAL;
316                         goto err_umem;
317                 }
318
319                 ret = pvrdma_page_dir_init(dev, &qp->pdir, qp->npages,
320                                            qp->is_kernel);
321                 if (ret) {
322                         dev_warn(&dev->pdev->dev,
323                                  "could not allocate page directory\n");
324                         goto err_umem;
325                 }
326
327                 if (!qp->is_kernel) {
328                         pvrdma_page_dir_insert_umem(&qp->pdir, qp->sumem, 0);
329                         if (!is_srq)
330                                 pvrdma_page_dir_insert_umem(&qp->pdir,
331                                                             qp->rumem,
332                                                             qp->npages_send);
333                 } else {
334                         /* Ring state is always the first page. */
335                         qp->sq.ring = qp->pdir.pages[0];
336                         qp->rq.ring = is_srq ? NULL : &qp->sq.ring[1];
337                 }
338                 break;
339         default:
340                 ret = -EINVAL;
341                 goto err_qp;
342         }
343
344         /* Not supported */
345         init_attr->cap.max_inline_data = 0;
346
347         memset(cmd, 0, sizeof(*cmd));
348         cmd->hdr.cmd = PVRDMA_CMD_CREATE_QP;
349         cmd->pd_handle = to_vpd(pd)->pd_handle;
350         cmd->send_cq_handle = to_vcq(init_attr->send_cq)->cq_handle;
351         cmd->recv_cq_handle = to_vcq(init_attr->recv_cq)->cq_handle;
352         if (is_srq)
353                 cmd->srq_handle = to_vsrq(init_attr->srq)->srq_handle;
354         else
355                 cmd->srq_handle = 0;
356         cmd->max_send_wr = init_attr->cap.max_send_wr;
357         cmd->max_recv_wr = init_attr->cap.max_recv_wr;
358         cmd->max_send_sge = init_attr->cap.max_send_sge;
359         cmd->max_recv_sge = init_attr->cap.max_recv_sge;
360         cmd->max_inline_data = init_attr->cap.max_inline_data;
361         cmd->sq_sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0;
362         cmd->qp_type = ib_qp_type_to_pvrdma(init_attr->qp_type);
363         cmd->is_srq = is_srq;
364         cmd->lkey = 0;
365         cmd->access_flags = IB_ACCESS_LOCAL_WRITE;
366         cmd->total_chunks = qp->npages;
367         cmd->send_chunks = qp->npages_send - PVRDMA_QP_NUM_HEADER_PAGES;
368         cmd->pdir_dma = qp->pdir.dir_dma;
369
370         dev_dbg(&dev->pdev->dev, "create queuepair with %d, %d, %d, %d\n",
371                 cmd->max_send_wr, cmd->max_recv_wr, cmd->max_send_sge,
372                 cmd->max_recv_sge);
373
374         ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_QP_RESP);
375         if (ret < 0) {
376                 dev_warn(&dev->pdev->dev,
377                          "could not create queuepair, error: %d\n", ret);
378                 goto err_pdir;
379         }
380
381         /* max_send_wr/_recv_wr/_send_sge/_recv_sge/_inline_data */
382         qp->qp_handle = resp->qpn;
383         qp->port = init_attr->port_num;
384         qp->ibqp.qp_num = resp->qpn;
385         spin_lock_irqsave(&dev->qp_tbl_lock, flags);
386         dev->qp_tbl[qp->qp_handle % dev->dsr->caps.max_qp] = qp;
387         spin_unlock_irqrestore(&dev->qp_tbl_lock, flags);
388
389         return &qp->ibqp;
390
391 err_pdir:
392         pvrdma_page_dir_cleanup(dev, &qp->pdir);
393 err_umem:
394         if (!qp->is_kernel) {
395                 if (qp->rumem)
396                         ib_umem_release(qp->rumem);
397                 if (qp->sumem)
398                         ib_umem_release(qp->sumem);
399         }
400 err_qp:
401         kfree(qp);
402         atomic_dec(&dev->num_qps);
403
404         return ERR_PTR(ret);
405 }
406
407 static void pvrdma_free_qp(struct pvrdma_qp *qp)
408 {
409         struct pvrdma_dev *dev = to_vdev(qp->ibqp.device);
410         struct pvrdma_cq *scq;
411         struct pvrdma_cq *rcq;
412         unsigned long flags, scq_flags, rcq_flags;
413
414         /* In case cq is polling */
415         get_cqs(qp, &scq, &rcq);
416         pvrdma_lock_cqs(scq, rcq, &scq_flags, &rcq_flags);
417
418         _pvrdma_flush_cqe(qp, scq);
419         if (scq != rcq)
420                 _pvrdma_flush_cqe(qp, rcq);
421
422         spin_lock_irqsave(&dev->qp_tbl_lock, flags);
423         dev->qp_tbl[qp->qp_handle] = NULL;
424         spin_unlock_irqrestore(&dev->qp_tbl_lock, flags);
425
426         pvrdma_unlock_cqs(scq, rcq, &scq_flags, &rcq_flags);
427
428         if (refcount_dec_and_test(&qp->refcnt))
429                 complete(&qp->free);
430         wait_for_completion(&qp->free);
431
432         if (!qp->is_kernel) {
433                 if (qp->rumem)
434                         ib_umem_release(qp->rumem);
435                 if (qp->sumem)
436                         ib_umem_release(qp->sumem);
437         }
438
439         pvrdma_page_dir_cleanup(dev, &qp->pdir);
440
441         kfree(qp);
442
443         atomic_dec(&dev->num_qps);
444 }
445
446 /**
447  * pvrdma_destroy_qp - destroy a queue pair
448  * @qp: the queue pair to destroy
449  * @udata: user data or null for kernel object
450  *
451  * @return: 0 on success.
452  */
453 int pvrdma_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
454 {
455         struct pvrdma_qp *vqp = to_vqp(qp);
456         union pvrdma_cmd_req req;
457         struct pvrdma_cmd_destroy_qp *cmd = &req.destroy_qp;
458         int ret;
459
460         memset(cmd, 0, sizeof(*cmd));
461         cmd->hdr.cmd = PVRDMA_CMD_DESTROY_QP;
462         cmd->qp_handle = vqp->qp_handle;
463
464         ret = pvrdma_cmd_post(to_vdev(qp->device), &req, NULL, 0);
465         if (ret < 0)
466                 dev_warn(&to_vdev(qp->device)->pdev->dev,
467                          "destroy queuepair failed, error: %d\n", ret);
468
469         pvrdma_free_qp(vqp);
470
471         return 0;
472 }
473
474 /**
475  * pvrdma_modify_qp - modify queue pair attributes
476  * @ibqp: the queue pair
477  * @attr: the new queue pair's attributes
478  * @attr_mask: attributes mask
479  * @udata: user data
480  *
481  * @returns 0 on success, otherwise returns an errno.
482  */
483 int pvrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
484                      int attr_mask, struct ib_udata *udata)
485 {
486         struct pvrdma_dev *dev = to_vdev(ibqp->device);
487         struct pvrdma_qp *qp = to_vqp(ibqp);
488         union pvrdma_cmd_req req;
489         union pvrdma_cmd_resp rsp;
490         struct pvrdma_cmd_modify_qp *cmd = &req.modify_qp;
491         enum ib_qp_state cur_state, next_state;
492         int ret;
493
494         /* Sanity checking. Should need lock here */
495         mutex_lock(&qp->mutex);
496         cur_state = (attr_mask & IB_QP_CUR_STATE) ? attr->cur_qp_state :
497                 qp->state;
498         next_state = (attr_mask & IB_QP_STATE) ? attr->qp_state : cur_state;
499
500         if (!ib_modify_qp_is_ok(cur_state, next_state, ibqp->qp_type,
501                                 attr_mask)) {
502                 ret = -EINVAL;
503                 goto out;
504         }
505
506         if (attr_mask & IB_QP_PORT) {
507                 if (attr->port_num == 0 ||
508                     attr->port_num > ibqp->device->phys_port_cnt) {
509                         ret = -EINVAL;
510                         goto out;
511                 }
512         }
513
514         if (attr_mask & IB_QP_MIN_RNR_TIMER) {
515                 if (attr->min_rnr_timer > 31) {
516                         ret = -EINVAL;
517                         goto out;
518                 }
519         }
520
521         if (attr_mask & IB_QP_PKEY_INDEX) {
522                 if (attr->pkey_index >= dev->dsr->caps.max_pkeys) {
523                         ret = -EINVAL;
524                         goto out;
525                 }
526         }
527
528         if (attr_mask & IB_QP_QKEY)
529                 qp->qkey = attr->qkey;
530
531         if (cur_state == next_state && cur_state == IB_QPS_RESET) {
532                 ret = 0;
533                 goto out;
534         }
535
536         qp->state = next_state;
537         memset(cmd, 0, sizeof(*cmd));
538         cmd->hdr.cmd = PVRDMA_CMD_MODIFY_QP;
539         cmd->qp_handle = qp->qp_handle;
540         cmd->attr_mask = ib_qp_attr_mask_to_pvrdma(attr_mask);
541         cmd->attrs.qp_state = ib_qp_state_to_pvrdma(attr->qp_state);
542         cmd->attrs.cur_qp_state =
543                 ib_qp_state_to_pvrdma(attr->cur_qp_state);
544         cmd->attrs.path_mtu = ib_mtu_to_pvrdma(attr->path_mtu);
545         cmd->attrs.path_mig_state =
546                 ib_mig_state_to_pvrdma(attr->path_mig_state);
547         cmd->attrs.qkey = attr->qkey;
548         cmd->attrs.rq_psn = attr->rq_psn;
549         cmd->attrs.sq_psn = attr->sq_psn;
550         cmd->attrs.dest_qp_num = attr->dest_qp_num;
551         cmd->attrs.qp_access_flags =
552                 ib_access_flags_to_pvrdma(attr->qp_access_flags);
553         cmd->attrs.pkey_index = attr->pkey_index;
554         cmd->attrs.alt_pkey_index = attr->alt_pkey_index;
555         cmd->attrs.en_sqd_async_notify = attr->en_sqd_async_notify;
556         cmd->attrs.sq_draining = attr->sq_draining;
557         cmd->attrs.max_rd_atomic = attr->max_rd_atomic;
558         cmd->attrs.max_dest_rd_atomic = attr->max_dest_rd_atomic;
559         cmd->attrs.min_rnr_timer = attr->min_rnr_timer;
560         cmd->attrs.port_num = attr->port_num;
561         cmd->attrs.timeout = attr->timeout;
562         cmd->attrs.retry_cnt = attr->retry_cnt;
563         cmd->attrs.rnr_retry = attr->rnr_retry;
564         cmd->attrs.alt_port_num = attr->alt_port_num;
565         cmd->attrs.alt_timeout = attr->alt_timeout;
566         ib_qp_cap_to_pvrdma(&cmd->attrs.cap, &attr->cap);
567         rdma_ah_attr_to_pvrdma(&cmd->attrs.ah_attr, &attr->ah_attr);
568         rdma_ah_attr_to_pvrdma(&cmd->attrs.alt_ah_attr, &attr->alt_ah_attr);
569
570         ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_MODIFY_QP_RESP);
571         if (ret < 0) {
572                 dev_warn(&dev->pdev->dev,
573                          "could not modify queuepair, error: %d\n", ret);
574         } else if (rsp.hdr.err > 0) {
575                 dev_warn(&dev->pdev->dev,
576                          "cannot modify queuepair, error: %d\n", rsp.hdr.err);
577                 ret = -EINVAL;
578         }
579
580         if (ret == 0 && next_state == IB_QPS_RESET)
581                 pvrdma_reset_qp(qp);
582
583 out:
584         mutex_unlock(&qp->mutex);
585
586         return ret;
587 }
588
589 static inline void *get_sq_wqe(struct pvrdma_qp *qp, unsigned int n)
590 {
591         return pvrdma_page_dir_get_ptr(&qp->pdir,
592                                        qp->sq.offset + n * qp->sq.wqe_size);
593 }
594
595 static inline void *get_rq_wqe(struct pvrdma_qp *qp, unsigned int n)
596 {
597         return pvrdma_page_dir_get_ptr(&qp->pdir,
598                                        qp->rq.offset + n * qp->rq.wqe_size);
599 }
600
601 static int set_reg_seg(struct pvrdma_sq_wqe_hdr *wqe_hdr,
602                        const struct ib_reg_wr *wr)
603 {
604         struct pvrdma_user_mr *mr = to_vmr(wr->mr);
605
606         wqe_hdr->wr.fast_reg.iova_start = mr->ibmr.iova;
607         wqe_hdr->wr.fast_reg.pl_pdir_dma = mr->pdir.dir_dma;
608         wqe_hdr->wr.fast_reg.page_shift = mr->page_shift;
609         wqe_hdr->wr.fast_reg.page_list_len = mr->npages;
610         wqe_hdr->wr.fast_reg.length = mr->ibmr.length;
611         wqe_hdr->wr.fast_reg.access_flags = wr->access;
612         wqe_hdr->wr.fast_reg.rkey = wr->key;
613
614         return pvrdma_page_dir_insert_page_list(&mr->pdir, mr->pages,
615                                                 mr->npages);
616 }
617
618 /**
619  * pvrdma_post_send - post send work request entries on a QP
620  * @ibqp: the QP
621  * @wr: work request list to post
622  * @bad_wr: the first bad WR returned
623  *
624  * @return: 0 on success, otherwise errno returned.
625  */
626 int pvrdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
627                      const struct ib_send_wr **bad_wr)
628 {
629         struct pvrdma_qp *qp = to_vqp(ibqp);
630         struct pvrdma_dev *dev = to_vdev(ibqp->device);
631         unsigned long flags;
632         struct pvrdma_sq_wqe_hdr *wqe_hdr;
633         struct pvrdma_sge *sge;
634         int i, ret;
635
636         /*
637          * In states lower than RTS, we can fail immediately. In other states,
638          * just post and let the device figure it out.
639          */
640         if (qp->state < IB_QPS_RTS) {
641                 *bad_wr = wr;
642                 return -EINVAL;
643         }
644
645         spin_lock_irqsave(&qp->sq.lock, flags);
646
647         while (wr) {
648                 unsigned int tail = 0;
649
650                 if (unlikely(!pvrdma_idx_ring_has_space(
651                                 qp->sq.ring, qp->sq.wqe_cnt, &tail))) {
652                         dev_warn_ratelimited(&dev->pdev->dev,
653                                              "send queue is full\n");
654                         *bad_wr = wr;
655                         ret = -ENOMEM;
656                         goto out;
657                 }
658
659                 if (unlikely(wr->num_sge > qp->sq.max_sg || wr->num_sge < 0)) {
660                         dev_warn_ratelimited(&dev->pdev->dev,
661                                              "send SGE overflow\n");
662                         *bad_wr = wr;
663                         ret = -EINVAL;
664                         goto out;
665                 }
666
667                 if (unlikely(wr->opcode < 0)) {
668                         dev_warn_ratelimited(&dev->pdev->dev,
669                                              "invalid send opcode\n");
670                         *bad_wr = wr;
671                         ret = -EINVAL;
672                         goto out;
673                 }
674
675                 /*
676                  * Only support UD, RC.
677                  * Need to check opcode table for thorough checking.
678                  * opcode               _UD     _UC     _RC
679                  * _SEND                x       x       x
680                  * _SEND_WITH_IMM       x       x       x
681                  * _RDMA_WRITE                  x       x
682                  * _RDMA_WRITE_WITH_IMM         x       x
683                  * _LOCAL_INV                   x       x
684                  * _SEND_WITH_INV               x       x
685                  * _RDMA_READ                           x
686                  * _ATOMIC_CMP_AND_SWP                  x
687                  * _ATOMIC_FETCH_AND_ADD                x
688                  * _MASK_ATOMIC_CMP_AND_SWP             x
689                  * _MASK_ATOMIC_FETCH_AND_ADD           x
690                  * _REG_MR                              x
691                  *
692                  */
693                 if (qp->ibqp.qp_type != IB_QPT_UD &&
694                     qp->ibqp.qp_type != IB_QPT_RC &&
695                         wr->opcode != IB_WR_SEND) {
696                         dev_warn_ratelimited(&dev->pdev->dev,
697                                              "unsupported queuepair type\n");
698                         *bad_wr = wr;
699                         ret = -EINVAL;
700                         goto out;
701                 } else if (qp->ibqp.qp_type == IB_QPT_UD ||
702                            qp->ibqp.qp_type == IB_QPT_GSI) {
703                         if (wr->opcode != IB_WR_SEND &&
704                             wr->opcode != IB_WR_SEND_WITH_IMM) {
705                                 dev_warn_ratelimited(&dev->pdev->dev,
706                                                      "invalid send opcode\n");
707                                 *bad_wr = wr;
708                                 ret = -EINVAL;
709                                 goto out;
710                         }
711                 }
712
713                 wqe_hdr = (struct pvrdma_sq_wqe_hdr *)get_sq_wqe(qp, tail);
714                 memset(wqe_hdr, 0, sizeof(*wqe_hdr));
715                 wqe_hdr->wr_id = wr->wr_id;
716                 wqe_hdr->num_sge = wr->num_sge;
717                 wqe_hdr->opcode = ib_wr_opcode_to_pvrdma(wr->opcode);
718                 wqe_hdr->send_flags = ib_send_flags_to_pvrdma(wr->send_flags);
719                 if (wr->opcode == IB_WR_SEND_WITH_IMM ||
720                     wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
721                         wqe_hdr->ex.imm_data = wr->ex.imm_data;
722
723                 if (unlikely(wqe_hdr->opcode == PVRDMA_WR_ERROR)) {
724                         *bad_wr = wr;
725                         ret = -EINVAL;
726                         goto out;
727                 }
728
729                 switch (qp->ibqp.qp_type) {
730                 case IB_QPT_GSI:
731                 case IB_QPT_UD:
732                         if (unlikely(!ud_wr(wr)->ah)) {
733                                 dev_warn_ratelimited(&dev->pdev->dev,
734                                                      "invalid address handle\n");
735                                 *bad_wr = wr;
736                                 ret = -EINVAL;
737                                 goto out;
738                         }
739
740                         /*
741                          * Use qkey from qp context if high order bit set,
742                          * otherwise from work request.
743                          */
744                         wqe_hdr->wr.ud.remote_qpn = ud_wr(wr)->remote_qpn;
745                         wqe_hdr->wr.ud.remote_qkey =
746                                 ud_wr(wr)->remote_qkey & 0x80000000 ?
747                                 qp->qkey : ud_wr(wr)->remote_qkey;
748                         wqe_hdr->wr.ud.av = to_vah(ud_wr(wr)->ah)->av;
749
750                         break;
751                 case IB_QPT_RC:
752                         switch (wr->opcode) {
753                         case IB_WR_RDMA_READ:
754                         case IB_WR_RDMA_WRITE:
755                         case IB_WR_RDMA_WRITE_WITH_IMM:
756                                 wqe_hdr->wr.rdma.remote_addr =
757                                         rdma_wr(wr)->remote_addr;
758                                 wqe_hdr->wr.rdma.rkey = rdma_wr(wr)->rkey;
759                                 break;
760                         case IB_WR_LOCAL_INV:
761                         case IB_WR_SEND_WITH_INV:
762                                 wqe_hdr->ex.invalidate_rkey =
763                                         wr->ex.invalidate_rkey;
764                                 break;
765                         case IB_WR_ATOMIC_CMP_AND_SWP:
766                         case IB_WR_ATOMIC_FETCH_AND_ADD:
767                                 wqe_hdr->wr.atomic.remote_addr =
768                                         atomic_wr(wr)->remote_addr;
769                                 wqe_hdr->wr.atomic.rkey = atomic_wr(wr)->rkey;
770                                 wqe_hdr->wr.atomic.compare_add =
771                                         atomic_wr(wr)->compare_add;
772                                 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP)
773                                         wqe_hdr->wr.atomic.swap =
774                                                 atomic_wr(wr)->swap;
775                                 break;
776                         case IB_WR_REG_MR:
777                                 ret = set_reg_seg(wqe_hdr, reg_wr(wr));
778                                 if (ret < 0) {
779                                         dev_warn_ratelimited(&dev->pdev->dev,
780                                                              "Failed to set fast register work request\n");
781                                         *bad_wr = wr;
782                                         goto out;
783                                 }
784                                 break;
785                         default:
786                                 break;
787                         }
788
789                         break;
790                 default:
791                         dev_warn_ratelimited(&dev->pdev->dev,
792                                              "invalid queuepair type\n");
793                         ret = -EINVAL;
794                         *bad_wr = wr;
795                         goto out;
796                 }
797
798                 sge = (struct pvrdma_sge *)(wqe_hdr + 1);
799                 for (i = 0; i < wr->num_sge; i++) {
800                         /* Need to check wqe_size 0 or max size */
801                         sge->addr = wr->sg_list[i].addr;
802                         sge->length = wr->sg_list[i].length;
803                         sge->lkey = wr->sg_list[i].lkey;
804                         sge++;
805                 }
806
807                 /* Make sure wqe is written before index update */
808                 smp_wmb();
809
810                 /* Update shared sq ring */
811                 pvrdma_idx_ring_inc(&qp->sq.ring->prod_tail,
812                                     qp->sq.wqe_cnt);
813
814                 wr = wr->next;
815         }
816
817         ret = 0;
818
819 out:
820         spin_unlock_irqrestore(&qp->sq.lock, flags);
821
822         if (!ret)
823                 pvrdma_write_uar_qp(dev, PVRDMA_UAR_QP_SEND | qp->qp_handle);
824
825         return ret;
826 }
827
828 /**
829  * pvrdma_post_receive - post receive work request entries on a QP
830  * @ibqp: the QP
831  * @wr: the work request list to post
832  * @bad_wr: the first bad WR returned
833  *
834  * @return: 0 on success, otherwise errno returned.
835  */
836 int pvrdma_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
837                      const struct ib_recv_wr **bad_wr)
838 {
839         struct pvrdma_dev *dev = to_vdev(ibqp->device);
840         unsigned long flags;
841         struct pvrdma_qp *qp = to_vqp(ibqp);
842         struct pvrdma_rq_wqe_hdr *wqe_hdr;
843         struct pvrdma_sge *sge;
844         int ret = 0;
845         int i;
846
847         /*
848          * In the RESET state, we can fail immediately. For other states,
849          * just post and let the device figure it out.
850          */
851         if (qp->state == IB_QPS_RESET) {
852                 *bad_wr = wr;
853                 return -EINVAL;
854         }
855
856         if (qp->srq) {
857                 dev_warn(&dev->pdev->dev, "QP associated with SRQ\n");
858                 *bad_wr = wr;
859                 return -EINVAL;
860         }
861
862         spin_lock_irqsave(&qp->rq.lock, flags);
863
864         while (wr) {
865                 unsigned int tail = 0;
866
867                 if (unlikely(wr->num_sge > qp->rq.max_sg ||
868                              wr->num_sge < 0)) {
869                         ret = -EINVAL;
870                         *bad_wr = wr;
871                         dev_warn_ratelimited(&dev->pdev->dev,
872                                              "recv SGE overflow\n");
873                         goto out;
874                 }
875
876                 if (unlikely(!pvrdma_idx_ring_has_space(
877                                 qp->rq.ring, qp->rq.wqe_cnt, &tail))) {
878                         ret = -ENOMEM;
879                         *bad_wr = wr;
880                         dev_warn_ratelimited(&dev->pdev->dev,
881                                              "recv queue full\n");
882                         goto out;
883                 }
884
885                 wqe_hdr = (struct pvrdma_rq_wqe_hdr *)get_rq_wqe(qp, tail);
886                 wqe_hdr->wr_id = wr->wr_id;
887                 wqe_hdr->num_sge = wr->num_sge;
888                 wqe_hdr->total_len = 0;
889
890                 sge = (struct pvrdma_sge *)(wqe_hdr + 1);
891                 for (i = 0; i < wr->num_sge; i++) {
892                         sge->addr = wr->sg_list[i].addr;
893                         sge->length = wr->sg_list[i].length;
894                         sge->lkey = wr->sg_list[i].lkey;
895                         sge++;
896                 }
897
898                 /* Make sure wqe is written before index update */
899                 smp_wmb();
900
901                 /* Update shared rq ring */
902                 pvrdma_idx_ring_inc(&qp->rq.ring->prod_tail,
903                                     qp->rq.wqe_cnt);
904
905                 wr = wr->next;
906         }
907
908         spin_unlock_irqrestore(&qp->rq.lock, flags);
909
910         pvrdma_write_uar_qp(dev, PVRDMA_UAR_QP_RECV | qp->qp_handle);
911
912         return ret;
913
914 out:
915         spin_unlock_irqrestore(&qp->rq.lock, flags);
916
917         return ret;
918 }
919
920 /**
921  * pvrdma_query_qp - query a queue pair's attributes
922  * @ibqp: the queue pair to query
923  * @attr: the queue pair's attributes
924  * @attr_mask: attributes mask
925  * @init_attr: initial queue pair attributes
926  *
927  * @returns 0 on success, otherwise returns an errno.
928  */
929 int pvrdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
930                     int attr_mask, struct ib_qp_init_attr *init_attr)
931 {
932         struct pvrdma_dev *dev = to_vdev(ibqp->device);
933         struct pvrdma_qp *qp = to_vqp(ibqp);
934         union pvrdma_cmd_req req;
935         union pvrdma_cmd_resp rsp;
936         struct pvrdma_cmd_query_qp *cmd = &req.query_qp;
937         struct pvrdma_cmd_query_qp_resp *resp = &rsp.query_qp_resp;
938         int ret = 0;
939
940         mutex_lock(&qp->mutex);
941
942         if (qp->state == IB_QPS_RESET) {
943                 attr->qp_state = IB_QPS_RESET;
944                 goto out;
945         }
946
947         memset(cmd, 0, sizeof(*cmd));
948         cmd->hdr.cmd = PVRDMA_CMD_QUERY_QP;
949         cmd->qp_handle = qp->qp_handle;
950         cmd->attr_mask = ib_qp_attr_mask_to_pvrdma(attr_mask);
951
952         ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_QUERY_QP_RESP);
953         if (ret < 0) {
954                 dev_warn(&dev->pdev->dev,
955                          "could not query queuepair, error: %d\n", ret);
956                 goto out;
957         }
958
959         attr->qp_state = pvrdma_qp_state_to_ib(resp->attrs.qp_state);
960         attr->cur_qp_state =
961                 pvrdma_qp_state_to_ib(resp->attrs.cur_qp_state);
962         attr->path_mtu = pvrdma_mtu_to_ib(resp->attrs.path_mtu);
963         attr->path_mig_state =
964                 pvrdma_mig_state_to_ib(resp->attrs.path_mig_state);
965         attr->qkey = resp->attrs.qkey;
966         attr->rq_psn = resp->attrs.rq_psn;
967         attr->sq_psn = resp->attrs.sq_psn;
968         attr->dest_qp_num = resp->attrs.dest_qp_num;
969         attr->qp_access_flags =
970                 pvrdma_access_flags_to_ib(resp->attrs.qp_access_flags);
971         attr->pkey_index = resp->attrs.pkey_index;
972         attr->alt_pkey_index = resp->attrs.alt_pkey_index;
973         attr->en_sqd_async_notify = resp->attrs.en_sqd_async_notify;
974         attr->sq_draining = resp->attrs.sq_draining;
975         attr->max_rd_atomic = resp->attrs.max_rd_atomic;
976         attr->max_dest_rd_atomic = resp->attrs.max_dest_rd_atomic;
977         attr->min_rnr_timer = resp->attrs.min_rnr_timer;
978         attr->port_num = resp->attrs.port_num;
979         attr->timeout = resp->attrs.timeout;
980         attr->retry_cnt = resp->attrs.retry_cnt;
981         attr->rnr_retry = resp->attrs.rnr_retry;
982         attr->alt_port_num = resp->attrs.alt_port_num;
983         attr->alt_timeout = resp->attrs.alt_timeout;
984         pvrdma_qp_cap_to_ib(&attr->cap, &resp->attrs.cap);
985         pvrdma_ah_attr_to_rdma(&attr->ah_attr, &resp->attrs.ah_attr);
986         pvrdma_ah_attr_to_rdma(&attr->alt_ah_attr, &resp->attrs.alt_ah_attr);
987
988         qp->state = attr->qp_state;
989
990         ret = 0;
991
992 out:
993         attr->cur_qp_state = attr->qp_state;
994
995         init_attr->event_handler = qp->ibqp.event_handler;
996         init_attr->qp_context = qp->ibqp.qp_context;
997         init_attr->send_cq = qp->ibqp.send_cq;
998         init_attr->recv_cq = qp->ibqp.recv_cq;
999         init_attr->srq = qp->ibqp.srq;
1000         init_attr->xrcd = NULL;
1001         init_attr->cap = attr->cap;
1002         init_attr->sq_sig_type = 0;
1003         init_attr->qp_type = qp->ibqp.qp_type;
1004         init_attr->create_flags = 0;
1005         init_attr->port_num = qp->port;
1006
1007         mutex_unlock(&qp->mutex);
1008         return ret;
1009 }