Merge remote-tracking branches 'asoc/fix/adsp', 'asoc/fix/amd', 'asoc/fix/atmel'...
[linux-2.6-microblaze.git] / drivers / infiniband / hw / cxgb4 / qp.c
1 /*
2  * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <linux/module.h>
34
35 #include "iw_cxgb4.h"
36
37 static int db_delay_usecs = 1;
38 module_param(db_delay_usecs, int, 0644);
39 MODULE_PARM_DESC(db_delay_usecs, "Usecs to delay awaiting db fifo to drain");
40
41 static int ocqp_support = 1;
42 module_param(ocqp_support, int, 0644);
43 MODULE_PARM_DESC(ocqp_support, "Support on-chip SQs (default=1)");
44
45 int db_fc_threshold = 1000;
46 module_param(db_fc_threshold, int, 0644);
47 MODULE_PARM_DESC(db_fc_threshold,
48                  "QP count/threshold that triggers"
49                  " automatic db flow control mode (default = 1000)");
50
51 int db_coalescing_threshold;
52 module_param(db_coalescing_threshold, int, 0644);
53 MODULE_PARM_DESC(db_coalescing_threshold,
54                  "QP count/threshold that triggers"
55                  " disabling db coalescing (default = 0)");
56
57 static int max_fr_immd = T4_MAX_FR_IMMD;
58 module_param(max_fr_immd, int, 0644);
59 MODULE_PARM_DESC(max_fr_immd, "fastreg threshold for using DSGL instead of immedate");
60
61 static int alloc_ird(struct c4iw_dev *dev, u32 ird)
62 {
63         int ret = 0;
64
65         spin_lock_irq(&dev->lock);
66         if (ird <= dev->avail_ird)
67                 dev->avail_ird -= ird;
68         else
69                 ret = -ENOMEM;
70         spin_unlock_irq(&dev->lock);
71
72         if (ret)
73                 dev_warn(&dev->rdev.lldi.pdev->dev,
74                          "device IRD resources exhausted\n");
75
76         return ret;
77 }
78
79 static void free_ird(struct c4iw_dev *dev, int ird)
80 {
81         spin_lock_irq(&dev->lock);
82         dev->avail_ird += ird;
83         spin_unlock_irq(&dev->lock);
84 }
85
86 static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state)
87 {
88         unsigned long flag;
89         spin_lock_irqsave(&qhp->lock, flag);
90         qhp->attr.state = state;
91         spin_unlock_irqrestore(&qhp->lock, flag);
92 }
93
94 static void dealloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
95 {
96         c4iw_ocqp_pool_free(rdev, sq->dma_addr, sq->memsize);
97 }
98
99 static void dealloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
100 {
101         dma_free_coherent(&(rdev->lldi.pdev->dev), sq->memsize, sq->queue,
102                           pci_unmap_addr(sq, mapping));
103 }
104
105 static void dealloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
106 {
107         if (t4_sq_onchip(sq))
108                 dealloc_oc_sq(rdev, sq);
109         else
110                 dealloc_host_sq(rdev, sq);
111 }
112
113 static int alloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
114 {
115         if (!ocqp_support || !ocqp_supported(&rdev->lldi))
116                 return -ENOSYS;
117         sq->dma_addr = c4iw_ocqp_pool_alloc(rdev, sq->memsize);
118         if (!sq->dma_addr)
119                 return -ENOMEM;
120         sq->phys_addr = rdev->oc_mw_pa + sq->dma_addr -
121                         rdev->lldi.vr->ocq.start;
122         sq->queue = (__force union t4_wr *)(rdev->oc_mw_kva + sq->dma_addr -
123                                             rdev->lldi.vr->ocq.start);
124         sq->flags |= T4_SQ_ONCHIP;
125         return 0;
126 }
127
128 static int alloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
129 {
130         sq->queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev), sq->memsize,
131                                        &(sq->dma_addr), GFP_KERNEL);
132         if (!sq->queue)
133                 return -ENOMEM;
134         sq->phys_addr = virt_to_phys(sq->queue);
135         pci_unmap_addr_set(sq, mapping, sq->dma_addr);
136         return 0;
137 }
138
139 static int alloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq, int user)
140 {
141         int ret = -ENOSYS;
142         if (user)
143                 ret = alloc_oc_sq(rdev, sq);
144         if (ret)
145                 ret = alloc_host_sq(rdev, sq);
146         return ret;
147 }
148
149 static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
150                       struct c4iw_dev_ucontext *uctx)
151 {
152         /*
153          * uP clears EQ contexts when the connection exits rdma mode,
154          * so no need to post a RESET WR for these EQs.
155          */
156         dma_free_coherent(&(rdev->lldi.pdev->dev),
157                           wq->rq.memsize, wq->rq.queue,
158                           dma_unmap_addr(&wq->rq, mapping));
159         dealloc_sq(rdev, &wq->sq);
160         c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
161         kfree(wq->rq.sw_rq);
162         kfree(wq->sq.sw_sq);
163         c4iw_put_qpid(rdev, wq->rq.qid, uctx);
164         c4iw_put_qpid(rdev, wq->sq.qid, uctx);
165         return 0;
166 }
167
168 /*
169  * Determine the BAR2 virtual address and qid. If pbar2_pa is not NULL,
170  * then this is a user mapping so compute the page-aligned physical address
171  * for mapping.
172  */
173 void __iomem *c4iw_bar2_addrs(struct c4iw_rdev *rdev, unsigned int qid,
174                               enum cxgb4_bar2_qtype qtype,
175                               unsigned int *pbar2_qid, u64 *pbar2_pa)
176 {
177         u64 bar2_qoffset;
178         int ret;
179
180         ret = cxgb4_bar2_sge_qregs(rdev->lldi.ports[0], qid, qtype,
181                                    pbar2_pa ? 1 : 0,
182                                    &bar2_qoffset, pbar2_qid);
183         if (ret)
184                 return NULL;
185
186         if (pbar2_pa)
187                 *pbar2_pa = (rdev->bar2_pa + bar2_qoffset) & PAGE_MASK;
188
189         if (is_t4(rdev->lldi.adapter_type))
190                 return NULL;
191
192         return rdev->bar2_kva + bar2_qoffset;
193 }
194
195 static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
196                      struct t4_cq *rcq, struct t4_cq *scq,
197                      struct c4iw_dev_ucontext *uctx,
198                      struct c4iw_wr_wait *wr_waitp)
199 {
200         int user = (uctx != &rdev->uctx);
201         struct fw_ri_res_wr *res_wr;
202         struct fw_ri_res *res;
203         int wr_len;
204         struct sk_buff *skb;
205         int ret = 0;
206         int eqsize;
207
208         wq->sq.qid = c4iw_get_qpid(rdev, uctx);
209         if (!wq->sq.qid)
210                 return -ENOMEM;
211
212         wq->rq.qid = c4iw_get_qpid(rdev, uctx);
213         if (!wq->rq.qid) {
214                 ret = -ENOMEM;
215                 goto free_sq_qid;
216         }
217
218         if (!user) {
219                 wq->sq.sw_sq = kzalloc(wq->sq.size * sizeof *wq->sq.sw_sq,
220                                  GFP_KERNEL);
221                 if (!wq->sq.sw_sq) {
222                         ret = -ENOMEM;
223                         goto free_rq_qid;
224                 }
225
226                 wq->rq.sw_rq = kzalloc(wq->rq.size * sizeof *wq->rq.sw_rq,
227                                  GFP_KERNEL);
228                 if (!wq->rq.sw_rq) {
229                         ret = -ENOMEM;
230                         goto free_sw_sq;
231                 }
232         }
233
234         /*
235          * RQT must be a power of 2 and at least 16 deep.
236          */
237         wq->rq.rqt_size = roundup_pow_of_two(max_t(u16, wq->rq.size, 16));
238         wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size);
239         if (!wq->rq.rqt_hwaddr) {
240                 ret = -ENOMEM;
241                 goto free_sw_rq;
242         }
243
244         ret = alloc_sq(rdev, &wq->sq, user);
245         if (ret)
246                 goto free_hwaddr;
247         memset(wq->sq.queue, 0, wq->sq.memsize);
248         dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);
249
250         wq->rq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev),
251                                           wq->rq.memsize, &(wq->rq.dma_addr),
252                                           GFP_KERNEL);
253         if (!wq->rq.queue) {
254                 ret = -ENOMEM;
255                 goto free_sq;
256         }
257         pr_debug("sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n",
258                  wq->sq.queue,
259                  (unsigned long long)virt_to_phys(wq->sq.queue),
260                  wq->rq.queue,
261                  (unsigned long long)virt_to_phys(wq->rq.queue));
262         memset(wq->rq.queue, 0, wq->rq.memsize);
263         dma_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr);
264
265         wq->db = rdev->lldi.db_reg;
266
267         wq->sq.bar2_va = c4iw_bar2_addrs(rdev, wq->sq.qid, T4_BAR2_QTYPE_EGRESS,
268                                          &wq->sq.bar2_qid,
269                                          user ? &wq->sq.bar2_pa : NULL);
270         wq->rq.bar2_va = c4iw_bar2_addrs(rdev, wq->rq.qid, T4_BAR2_QTYPE_EGRESS,
271                                          &wq->rq.bar2_qid,
272                                          user ? &wq->rq.bar2_pa : NULL);
273
274         /*
275          * User mode must have bar2 access.
276          */
277         if (user && (!wq->sq.bar2_pa || !wq->rq.bar2_pa)) {
278                 pr_warn("%s: sqid %u or rqid %u not in BAR2 range\n",
279                         pci_name(rdev->lldi.pdev), wq->sq.qid, wq->rq.qid);
280                 goto free_dma;
281         }
282
283         wq->rdev = rdev;
284         wq->rq.msn = 1;
285
286         /* build fw_ri_res_wr */
287         wr_len = sizeof *res_wr + 2 * sizeof *res;
288
289         skb = alloc_skb(wr_len, GFP_KERNEL);
290         if (!skb) {
291                 ret = -ENOMEM;
292                 goto free_dma;
293         }
294         set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
295
296         res_wr = __skb_put_zero(skb, wr_len);
297         res_wr->op_nres = cpu_to_be32(
298                         FW_WR_OP_V(FW_RI_RES_WR) |
299                         FW_RI_RES_WR_NRES_V(2) |
300                         FW_WR_COMPL_F);
301         res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
302         res_wr->cookie = (uintptr_t)wr_waitp;
303         res = res_wr->res;
304         res->u.sqrq.restype = FW_RI_RES_TYPE_SQ;
305         res->u.sqrq.op = FW_RI_RES_OP_WRITE;
306
307         /*
308          * eqsize is the number of 64B entries plus the status page size.
309          */
310         eqsize = wq->sq.size * T4_SQ_NUM_SLOTS +
311                 rdev->hw_queue.t4_eq_status_entries;
312
313         res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
314                 FW_RI_RES_WR_HOSTFCMODE_V(0) |  /* no host cidx updates */
315                 FW_RI_RES_WR_CPRIO_V(0) |       /* don't keep in chip cache */
316                 FW_RI_RES_WR_PCIECHN_V(0) |     /* set by uP at ri_init time */
317                 (t4_sq_onchip(&wq->sq) ? FW_RI_RES_WR_ONCHIP_F : 0) |
318                 FW_RI_RES_WR_IQID_V(scq->cqid));
319         res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
320                 FW_RI_RES_WR_DCAEN_V(0) |
321                 FW_RI_RES_WR_DCACPU_V(0) |
322                 FW_RI_RES_WR_FBMIN_V(2) |
323                 (t4_sq_onchip(&wq->sq) ? FW_RI_RES_WR_FBMAX_V(2) :
324                                          FW_RI_RES_WR_FBMAX_V(3)) |
325                 FW_RI_RES_WR_CIDXFTHRESHO_V(0) |
326                 FW_RI_RES_WR_CIDXFTHRESH_V(0) |
327                 FW_RI_RES_WR_EQSIZE_V(eqsize));
328         res->u.sqrq.eqid = cpu_to_be32(wq->sq.qid);
329         res->u.sqrq.eqaddr = cpu_to_be64(wq->sq.dma_addr);
330         res++;
331         res->u.sqrq.restype = FW_RI_RES_TYPE_RQ;
332         res->u.sqrq.op = FW_RI_RES_OP_WRITE;
333
334         /*
335          * eqsize is the number of 64B entries plus the status page size.
336          */
337         eqsize = wq->rq.size * T4_RQ_NUM_SLOTS +
338                 rdev->hw_queue.t4_eq_status_entries;
339         res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
340                 FW_RI_RES_WR_HOSTFCMODE_V(0) |  /* no host cidx updates */
341                 FW_RI_RES_WR_CPRIO_V(0) |       /* don't keep in chip cache */
342                 FW_RI_RES_WR_PCIECHN_V(0) |     /* set by uP at ri_init time */
343                 FW_RI_RES_WR_IQID_V(rcq->cqid));
344         res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
345                 FW_RI_RES_WR_DCAEN_V(0) |
346                 FW_RI_RES_WR_DCACPU_V(0) |
347                 FW_RI_RES_WR_FBMIN_V(2) |
348                 FW_RI_RES_WR_FBMAX_V(3) |
349                 FW_RI_RES_WR_CIDXFTHRESHO_V(0) |
350                 FW_RI_RES_WR_CIDXFTHRESH_V(0) |
351                 FW_RI_RES_WR_EQSIZE_V(eqsize));
352         res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid);
353         res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr);
354
355         c4iw_init_wr_wait(wr_waitp);
356         ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, wq->sq.qid, __func__);
357         if (ret)
358                 goto free_dma;
359
360         pr_debug("sqid 0x%x rqid 0x%x kdb 0x%p sq_bar2_addr %p rq_bar2_addr %p\n",
361                  wq->sq.qid, wq->rq.qid, wq->db,
362                  wq->sq.bar2_va, wq->rq.bar2_va);
363
364         return 0;
365 free_dma:
366         dma_free_coherent(&(rdev->lldi.pdev->dev),
367                           wq->rq.memsize, wq->rq.queue,
368                           dma_unmap_addr(&wq->rq, mapping));
369 free_sq:
370         dealloc_sq(rdev, &wq->sq);
371 free_hwaddr:
372         c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
373 free_sw_rq:
374         kfree(wq->rq.sw_rq);
375 free_sw_sq:
376         kfree(wq->sq.sw_sq);
377 free_rq_qid:
378         c4iw_put_qpid(rdev, wq->rq.qid, uctx);
379 free_sq_qid:
380         c4iw_put_qpid(rdev, wq->sq.qid, uctx);
381         return ret;
382 }
383
384 static int build_immd(struct t4_sq *sq, struct fw_ri_immd *immdp,
385                       struct ib_send_wr *wr, int max, u32 *plenp)
386 {
387         u8 *dstp, *srcp;
388         u32 plen = 0;
389         int i;
390         int rem, len;
391
392         dstp = (u8 *)immdp->data;
393         for (i = 0; i < wr->num_sge; i++) {
394                 if ((plen + wr->sg_list[i].length) > max)
395                         return -EMSGSIZE;
396                 srcp = (u8 *)(unsigned long)wr->sg_list[i].addr;
397                 plen += wr->sg_list[i].length;
398                 rem = wr->sg_list[i].length;
399                 while (rem) {
400                         if (dstp == (u8 *)&sq->queue[sq->size])
401                                 dstp = (u8 *)sq->queue;
402                         if (rem <= (u8 *)&sq->queue[sq->size] - dstp)
403                                 len = rem;
404                         else
405                                 len = (u8 *)&sq->queue[sq->size] - dstp;
406                         memcpy(dstp, srcp, len);
407                         dstp += len;
408                         srcp += len;
409                         rem -= len;
410                 }
411         }
412         len = roundup(plen + sizeof *immdp, 16) - (plen + sizeof *immdp);
413         if (len)
414                 memset(dstp, 0, len);
415         immdp->op = FW_RI_DATA_IMMD;
416         immdp->r1 = 0;
417         immdp->r2 = 0;
418         immdp->immdlen = cpu_to_be32(plen);
419         *plenp = plen;
420         return 0;
421 }
422
423 static int build_isgl(__be64 *queue_start, __be64 *queue_end,
424                       struct fw_ri_isgl *isglp, struct ib_sge *sg_list,
425                       int num_sge, u32 *plenp)
426
427 {
428         int i;
429         u32 plen = 0;
430         __be64 *flitp = (__be64 *)isglp->sge;
431
432         for (i = 0; i < num_sge; i++) {
433                 if ((plen + sg_list[i].length) < plen)
434                         return -EMSGSIZE;
435                 plen += sg_list[i].length;
436                 *flitp = cpu_to_be64(((u64)sg_list[i].lkey << 32) |
437                                      sg_list[i].length);
438                 if (++flitp == queue_end)
439                         flitp = queue_start;
440                 *flitp = cpu_to_be64(sg_list[i].addr);
441                 if (++flitp == queue_end)
442                         flitp = queue_start;
443         }
444         *flitp = (__force __be64)0;
445         isglp->op = FW_RI_DATA_ISGL;
446         isglp->r1 = 0;
447         isglp->nsge = cpu_to_be16(num_sge);
448         isglp->r2 = 0;
449         if (plenp)
450                 *plenp = plen;
451         return 0;
452 }
453
454 static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe,
455                            struct ib_send_wr *wr, u8 *len16)
456 {
457         u32 plen;
458         int size;
459         int ret;
460
461         if (wr->num_sge > T4_MAX_SEND_SGE)
462                 return -EINVAL;
463         switch (wr->opcode) {
464         case IB_WR_SEND:
465                 if (wr->send_flags & IB_SEND_SOLICITED)
466                         wqe->send.sendop_pkd = cpu_to_be32(
467                                 FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND_WITH_SE));
468                 else
469                         wqe->send.sendop_pkd = cpu_to_be32(
470                                 FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND));
471                 wqe->send.stag_inv = 0;
472                 break;
473         case IB_WR_SEND_WITH_INV:
474                 if (wr->send_flags & IB_SEND_SOLICITED)
475                         wqe->send.sendop_pkd = cpu_to_be32(
476                                 FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND_WITH_SE_INV));
477                 else
478                         wqe->send.sendop_pkd = cpu_to_be32(
479                                 FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND_WITH_INV));
480                 wqe->send.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
481                 break;
482
483         default:
484                 return -EINVAL;
485         }
486         wqe->send.r3 = 0;
487         wqe->send.r4 = 0;
488
489         plen = 0;
490         if (wr->num_sge) {
491                 if (wr->send_flags & IB_SEND_INLINE) {
492                         ret = build_immd(sq, wqe->send.u.immd_src, wr,
493                                          T4_MAX_SEND_INLINE, &plen);
494                         if (ret)
495                                 return ret;
496                         size = sizeof wqe->send + sizeof(struct fw_ri_immd) +
497                                plen;
498                 } else {
499                         ret = build_isgl((__be64 *)sq->queue,
500                                          (__be64 *)&sq->queue[sq->size],
501                                          wqe->send.u.isgl_src,
502                                          wr->sg_list, wr->num_sge, &plen);
503                         if (ret)
504                                 return ret;
505                         size = sizeof wqe->send + sizeof(struct fw_ri_isgl) +
506                                wr->num_sge * sizeof(struct fw_ri_sge);
507                 }
508         } else {
509                 wqe->send.u.immd_src[0].op = FW_RI_DATA_IMMD;
510                 wqe->send.u.immd_src[0].r1 = 0;
511                 wqe->send.u.immd_src[0].r2 = 0;
512                 wqe->send.u.immd_src[0].immdlen = 0;
513                 size = sizeof wqe->send + sizeof(struct fw_ri_immd);
514                 plen = 0;
515         }
516         *len16 = DIV_ROUND_UP(size, 16);
517         wqe->send.plen = cpu_to_be32(plen);
518         return 0;
519 }
520
521 static int build_rdma_write(struct t4_sq *sq, union t4_wr *wqe,
522                             struct ib_send_wr *wr, u8 *len16)
523 {
524         u32 plen;
525         int size;
526         int ret;
527
528         if (wr->num_sge > T4_MAX_SEND_SGE)
529                 return -EINVAL;
530         wqe->write.r2 = 0;
531         wqe->write.stag_sink = cpu_to_be32(rdma_wr(wr)->rkey);
532         wqe->write.to_sink = cpu_to_be64(rdma_wr(wr)->remote_addr);
533         if (wr->num_sge) {
534                 if (wr->send_flags & IB_SEND_INLINE) {
535                         ret = build_immd(sq, wqe->write.u.immd_src, wr,
536                                          T4_MAX_WRITE_INLINE, &plen);
537                         if (ret)
538                                 return ret;
539                         size = sizeof wqe->write + sizeof(struct fw_ri_immd) +
540                                plen;
541                 } else {
542                         ret = build_isgl((__be64 *)sq->queue,
543                                          (__be64 *)&sq->queue[sq->size],
544                                          wqe->write.u.isgl_src,
545                                          wr->sg_list, wr->num_sge, &plen);
546                         if (ret)
547                                 return ret;
548                         size = sizeof wqe->write + sizeof(struct fw_ri_isgl) +
549                                wr->num_sge * sizeof(struct fw_ri_sge);
550                 }
551         } else {
552                 wqe->write.u.immd_src[0].op = FW_RI_DATA_IMMD;
553                 wqe->write.u.immd_src[0].r1 = 0;
554                 wqe->write.u.immd_src[0].r2 = 0;
555                 wqe->write.u.immd_src[0].immdlen = 0;
556                 size = sizeof wqe->write + sizeof(struct fw_ri_immd);
557                 plen = 0;
558         }
559         *len16 = DIV_ROUND_UP(size, 16);
560         wqe->write.plen = cpu_to_be32(plen);
561         return 0;
562 }
563
564 static int build_rdma_read(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
565 {
566         if (wr->num_sge > 1)
567                 return -EINVAL;
568         if (wr->num_sge && wr->sg_list[0].length) {
569                 wqe->read.stag_src = cpu_to_be32(rdma_wr(wr)->rkey);
570                 wqe->read.to_src_hi = cpu_to_be32((u32)(rdma_wr(wr)->remote_addr
571                                                         >> 32));
572                 wqe->read.to_src_lo = cpu_to_be32((u32)rdma_wr(wr)->remote_addr);
573                 wqe->read.stag_sink = cpu_to_be32(wr->sg_list[0].lkey);
574                 wqe->read.plen = cpu_to_be32(wr->sg_list[0].length);
575                 wqe->read.to_sink_hi = cpu_to_be32((u32)(wr->sg_list[0].addr
576                                                          >> 32));
577                 wqe->read.to_sink_lo = cpu_to_be32((u32)(wr->sg_list[0].addr));
578         } else {
579                 wqe->read.stag_src = cpu_to_be32(2);
580                 wqe->read.to_src_hi = 0;
581                 wqe->read.to_src_lo = 0;
582                 wqe->read.stag_sink = cpu_to_be32(2);
583                 wqe->read.plen = 0;
584                 wqe->read.to_sink_hi = 0;
585                 wqe->read.to_sink_lo = 0;
586         }
587         wqe->read.r2 = 0;
588         wqe->read.r5 = 0;
589         *len16 = DIV_ROUND_UP(sizeof wqe->read, 16);
590         return 0;
591 }
592
593 static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe,
594                            struct ib_recv_wr *wr, u8 *len16)
595 {
596         int ret;
597
598         ret = build_isgl((__be64 *)qhp->wq.rq.queue,
599                          (__be64 *)&qhp->wq.rq.queue[qhp->wq.rq.size],
600                          &wqe->recv.isgl, wr->sg_list, wr->num_sge, NULL);
601         if (ret)
602                 return ret;
603         *len16 = DIV_ROUND_UP(sizeof wqe->recv +
604                               wr->num_sge * sizeof(struct fw_ri_sge), 16);
605         return 0;
606 }
607
608 static void build_tpte_memreg(struct fw_ri_fr_nsmr_tpte_wr *fr,
609                               struct ib_reg_wr *wr, struct c4iw_mr *mhp,
610                               u8 *len16)
611 {
612         __be64 *p = (__be64 *)fr->pbl;
613
614         fr->r2 = cpu_to_be32(0);
615         fr->stag = cpu_to_be32(mhp->ibmr.rkey);
616
617         fr->tpte.valid_to_pdid = cpu_to_be32(FW_RI_TPTE_VALID_F |
618                 FW_RI_TPTE_STAGKEY_V((mhp->ibmr.rkey & FW_RI_TPTE_STAGKEY_M)) |
619                 FW_RI_TPTE_STAGSTATE_V(1) |
620                 FW_RI_TPTE_STAGTYPE_V(FW_RI_STAG_NSMR) |
621                 FW_RI_TPTE_PDID_V(mhp->attr.pdid));
622         fr->tpte.locread_to_qpid = cpu_to_be32(
623                 FW_RI_TPTE_PERM_V(c4iw_ib_to_tpt_access(wr->access)) |
624                 FW_RI_TPTE_ADDRTYPE_V(FW_RI_VA_BASED_TO) |
625                 FW_RI_TPTE_PS_V(ilog2(wr->mr->page_size) - 12));
626         fr->tpte.nosnoop_pbladdr = cpu_to_be32(FW_RI_TPTE_PBLADDR_V(
627                 PBL_OFF(&mhp->rhp->rdev, mhp->attr.pbl_addr)>>3));
628         fr->tpte.dca_mwbcnt_pstag = cpu_to_be32(0);
629         fr->tpte.len_hi = cpu_to_be32(0);
630         fr->tpte.len_lo = cpu_to_be32(mhp->ibmr.length);
631         fr->tpte.va_hi = cpu_to_be32(mhp->ibmr.iova >> 32);
632         fr->tpte.va_lo_fbo = cpu_to_be32(mhp->ibmr.iova & 0xffffffff);
633
634         p[0] = cpu_to_be64((u64)mhp->mpl[0]);
635         p[1] = cpu_to_be64((u64)mhp->mpl[1]);
636
637         *len16 = DIV_ROUND_UP(sizeof(*fr), 16);
638 }
639
640 static int build_memreg(struct t4_sq *sq, union t4_wr *wqe,
641                         struct ib_reg_wr *wr, struct c4iw_mr *mhp, u8 *len16,
642                         bool dsgl_supported)
643 {
644         struct fw_ri_immd *imdp;
645         __be64 *p;
646         int i;
647         int pbllen = roundup(mhp->mpl_len * sizeof(u64), 32);
648         int rem;
649
650         if (mhp->mpl_len > t4_max_fr_depth(dsgl_supported && use_dsgl))
651                 return -EINVAL;
652
653         wqe->fr.qpbinde_to_dcacpu = 0;
654         wqe->fr.pgsz_shift = ilog2(wr->mr->page_size) - 12;
655         wqe->fr.addr_type = FW_RI_VA_BASED_TO;
656         wqe->fr.mem_perms = c4iw_ib_to_tpt_access(wr->access);
657         wqe->fr.len_hi = 0;
658         wqe->fr.len_lo = cpu_to_be32(mhp->ibmr.length);
659         wqe->fr.stag = cpu_to_be32(wr->key);
660         wqe->fr.va_hi = cpu_to_be32(mhp->ibmr.iova >> 32);
661         wqe->fr.va_lo_fbo = cpu_to_be32(mhp->ibmr.iova &
662                                         0xffffffff);
663
664         if (dsgl_supported && use_dsgl && (pbllen > max_fr_immd)) {
665                 struct fw_ri_dsgl *sglp;
666
667                 for (i = 0; i < mhp->mpl_len; i++)
668                         mhp->mpl[i] = (__force u64)cpu_to_be64((u64)mhp->mpl[i]);
669
670                 sglp = (struct fw_ri_dsgl *)(&wqe->fr + 1);
671                 sglp->op = FW_RI_DATA_DSGL;
672                 sglp->r1 = 0;
673                 sglp->nsge = cpu_to_be16(1);
674                 sglp->addr0 = cpu_to_be64(mhp->mpl_addr);
675                 sglp->len0 = cpu_to_be32(pbllen);
676
677                 *len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*sglp), 16);
678         } else {
679                 imdp = (struct fw_ri_immd *)(&wqe->fr + 1);
680                 imdp->op = FW_RI_DATA_IMMD;
681                 imdp->r1 = 0;
682                 imdp->r2 = 0;
683                 imdp->immdlen = cpu_to_be32(pbllen);
684                 p = (__be64 *)(imdp + 1);
685                 rem = pbllen;
686                 for (i = 0; i < mhp->mpl_len; i++) {
687                         *p = cpu_to_be64((u64)mhp->mpl[i]);
688                         rem -= sizeof(*p);
689                         if (++p == (__be64 *)&sq->queue[sq->size])
690                                 p = (__be64 *)sq->queue;
691                 }
692                 while (rem) {
693                         *p = 0;
694                         rem -= sizeof(*p);
695                         if (++p == (__be64 *)&sq->queue[sq->size])
696                                 p = (__be64 *)sq->queue;
697                 }
698                 *len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*imdp)
699                                       + pbllen, 16);
700         }
701         return 0;
702 }
703
704 static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
705 {
706         wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
707         wqe->inv.r2 = 0;
708         *len16 = DIV_ROUND_UP(sizeof wqe->inv, 16);
709         return 0;
710 }
711
712 static void free_qp_work(struct work_struct *work)
713 {
714         struct c4iw_ucontext *ucontext;
715         struct c4iw_qp *qhp;
716         struct c4iw_dev *rhp;
717
718         qhp = container_of(work, struct c4iw_qp, free_work);
719         ucontext = qhp->ucontext;
720         rhp = qhp->rhp;
721
722         pr_debug("qhp %p ucontext %p\n", qhp, ucontext);
723         destroy_qp(&rhp->rdev, &qhp->wq,
724                    ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
725
726         if (ucontext)
727                 c4iw_put_ucontext(ucontext);
728         c4iw_put_wr_wait(qhp->wr_waitp);
729         kfree(qhp);
730 }
731
732 static void queue_qp_free(struct kref *kref)
733 {
734         struct c4iw_qp *qhp;
735
736         qhp = container_of(kref, struct c4iw_qp, kref);
737         pr_debug("qhp %p\n", qhp);
738         queue_work(qhp->rhp->rdev.free_workq, &qhp->free_work);
739 }
740
741 void c4iw_qp_add_ref(struct ib_qp *qp)
742 {
743         pr_debug("ib_qp %p\n", qp);
744         kref_get(&to_c4iw_qp(qp)->kref);
745 }
746
747 void c4iw_qp_rem_ref(struct ib_qp *qp)
748 {
749         pr_debug("ib_qp %p\n", qp);
750         kref_put(&to_c4iw_qp(qp)->kref, queue_qp_free);
751 }
752
753 static void add_to_fc_list(struct list_head *head, struct list_head *entry)
754 {
755         if (list_empty(entry))
756                 list_add_tail(entry, head);
757 }
758
759 static int ring_kernel_sq_db(struct c4iw_qp *qhp, u16 inc)
760 {
761         unsigned long flags;
762
763         spin_lock_irqsave(&qhp->rhp->lock, flags);
764         spin_lock(&qhp->lock);
765         if (qhp->rhp->db_state == NORMAL)
766                 t4_ring_sq_db(&qhp->wq, inc, NULL);
767         else {
768                 add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry);
769                 qhp->wq.sq.wq_pidx_inc += inc;
770         }
771         spin_unlock(&qhp->lock);
772         spin_unlock_irqrestore(&qhp->rhp->lock, flags);
773         return 0;
774 }
775
776 static int ring_kernel_rq_db(struct c4iw_qp *qhp, u16 inc)
777 {
778         unsigned long flags;
779
780         spin_lock_irqsave(&qhp->rhp->lock, flags);
781         spin_lock(&qhp->lock);
782         if (qhp->rhp->db_state == NORMAL)
783                 t4_ring_rq_db(&qhp->wq, inc, NULL);
784         else {
785                 add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry);
786                 qhp->wq.rq.wq_pidx_inc += inc;
787         }
788         spin_unlock(&qhp->lock);
789         spin_unlock_irqrestore(&qhp->rhp->lock, flags);
790         return 0;
791 }
792
793 static void complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr)
794 {
795         struct t4_cqe cqe = {};
796         struct c4iw_cq *schp;
797         unsigned long flag;
798         struct t4_cq *cq;
799
800         schp = to_c4iw_cq(qhp->ibqp.send_cq);
801         cq = &schp->cq;
802
803         cqe.u.drain_cookie = wr->wr_id;
804         cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
805                                  CQE_OPCODE_V(C4IW_DRAIN_OPCODE) |
806                                  CQE_TYPE_V(1) |
807                                  CQE_SWCQE_V(1) |
808                                  CQE_QPID_V(qhp->wq.sq.qid));
809
810         spin_lock_irqsave(&schp->lock, flag);
811         cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
812         cq->sw_queue[cq->sw_pidx] = cqe;
813         t4_swcq_produce(cq);
814         spin_unlock_irqrestore(&schp->lock, flag);
815
816         if (t4_clear_cq_armed(&schp->cq)) {
817                 spin_lock_irqsave(&schp->comp_handler_lock, flag);
818                 (*schp->ibcq.comp_handler)(&schp->ibcq,
819                                            schp->ibcq.cq_context);
820                 spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
821         }
822 }
823
824 static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr)
825 {
826         struct t4_cqe cqe = {};
827         struct c4iw_cq *rchp;
828         unsigned long flag;
829         struct t4_cq *cq;
830
831         rchp = to_c4iw_cq(qhp->ibqp.recv_cq);
832         cq = &rchp->cq;
833
834         cqe.u.drain_cookie = wr->wr_id;
835         cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
836                                  CQE_OPCODE_V(C4IW_DRAIN_OPCODE) |
837                                  CQE_TYPE_V(0) |
838                                  CQE_SWCQE_V(1) |
839                                  CQE_QPID_V(qhp->wq.sq.qid));
840
841         spin_lock_irqsave(&rchp->lock, flag);
842         cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
843         cq->sw_queue[cq->sw_pidx] = cqe;
844         t4_swcq_produce(cq);
845         spin_unlock_irqrestore(&rchp->lock, flag);
846
847         if (t4_clear_cq_armed(&rchp->cq)) {
848                 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
849                 (*rchp->ibcq.comp_handler)(&rchp->ibcq,
850                                            rchp->ibcq.cq_context);
851                 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
852         }
853 }
854
855 int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
856                    struct ib_send_wr **bad_wr)
857 {
858         int err = 0;
859         u8 len16 = 0;
860         enum fw_wr_opcodes fw_opcode = 0;
861         enum fw_ri_wr_flags fw_flags;
862         struct c4iw_qp *qhp;
863         union t4_wr *wqe = NULL;
864         u32 num_wrs;
865         struct t4_swsqe *swsqe;
866         unsigned long flag;
867         u16 idx = 0;
868
869         qhp = to_c4iw_qp(ibqp);
870         spin_lock_irqsave(&qhp->lock, flag);
871
872         /*
873          * If the qp has been flushed, then just insert a special
874          * drain cqe.
875          */
876         if (qhp->wq.flushed) {
877                 spin_unlock_irqrestore(&qhp->lock, flag);
878                 complete_sq_drain_wr(qhp, wr);
879                 return err;
880         }
881         num_wrs = t4_sq_avail(&qhp->wq);
882         if (num_wrs == 0) {
883                 spin_unlock_irqrestore(&qhp->lock, flag);
884                 *bad_wr = wr;
885                 return -ENOMEM;
886         }
887         while (wr) {
888                 if (num_wrs == 0) {
889                         err = -ENOMEM;
890                         *bad_wr = wr;
891                         break;
892                 }
893                 wqe = (union t4_wr *)((u8 *)qhp->wq.sq.queue +
894                       qhp->wq.sq.wq_pidx * T4_EQ_ENTRY_SIZE);
895
896                 fw_flags = 0;
897                 if (wr->send_flags & IB_SEND_SOLICITED)
898                         fw_flags |= FW_RI_SOLICITED_EVENT_FLAG;
899                 if (wr->send_flags & IB_SEND_SIGNALED || qhp->sq_sig_all)
900                         fw_flags |= FW_RI_COMPLETION_FLAG;
901                 swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx];
902                 switch (wr->opcode) {
903                 case IB_WR_SEND_WITH_INV:
904                 case IB_WR_SEND:
905                         if (wr->send_flags & IB_SEND_FENCE)
906                                 fw_flags |= FW_RI_READ_FENCE_FLAG;
907                         fw_opcode = FW_RI_SEND_WR;
908                         if (wr->opcode == IB_WR_SEND)
909                                 swsqe->opcode = FW_RI_SEND;
910                         else
911                                 swsqe->opcode = FW_RI_SEND_WITH_INV;
912                         err = build_rdma_send(&qhp->wq.sq, wqe, wr, &len16);
913                         break;
914                 case IB_WR_RDMA_WRITE:
915                         fw_opcode = FW_RI_RDMA_WRITE_WR;
916                         swsqe->opcode = FW_RI_RDMA_WRITE;
917                         err = build_rdma_write(&qhp->wq.sq, wqe, wr, &len16);
918                         break;
919                 case IB_WR_RDMA_READ:
920                 case IB_WR_RDMA_READ_WITH_INV:
921                         fw_opcode = FW_RI_RDMA_READ_WR;
922                         swsqe->opcode = FW_RI_READ_REQ;
923                         if (wr->opcode == IB_WR_RDMA_READ_WITH_INV) {
924                                 c4iw_invalidate_mr(qhp->rhp,
925                                                    wr->sg_list[0].lkey);
926                                 fw_flags = FW_RI_RDMA_READ_INVALIDATE;
927                         } else {
928                                 fw_flags = 0;
929                         }
930                         err = build_rdma_read(wqe, wr, &len16);
931                         if (err)
932                                 break;
933                         swsqe->read_len = wr->sg_list[0].length;
934                         if (!qhp->wq.sq.oldest_read)
935                                 qhp->wq.sq.oldest_read = swsqe;
936                         break;
937                 case IB_WR_REG_MR: {
938                         struct c4iw_mr *mhp = to_c4iw_mr(reg_wr(wr)->mr);
939
940                         swsqe->opcode = FW_RI_FAST_REGISTER;
941                         if (qhp->rhp->rdev.lldi.fr_nsmr_tpte_wr_support &&
942                             !mhp->attr.state && mhp->mpl_len <= 2) {
943                                 fw_opcode = FW_RI_FR_NSMR_TPTE_WR;
944                                 build_tpte_memreg(&wqe->fr_tpte, reg_wr(wr),
945                                                   mhp, &len16);
946                         } else {
947                                 fw_opcode = FW_RI_FR_NSMR_WR;
948                                 err = build_memreg(&qhp->wq.sq, wqe, reg_wr(wr),
949                                        mhp, &len16,
950                                        qhp->rhp->rdev.lldi.ulptx_memwrite_dsgl);
951                                 if (err)
952                                         break;
953                         }
954                         mhp->attr.state = 1;
955                         break;
956                 }
957                 case IB_WR_LOCAL_INV:
958                         if (wr->send_flags & IB_SEND_FENCE)
959                                 fw_flags |= FW_RI_LOCAL_FENCE_FLAG;
960                         fw_opcode = FW_RI_INV_LSTAG_WR;
961                         swsqe->opcode = FW_RI_LOCAL_INV;
962                         err = build_inv_stag(wqe, wr, &len16);
963                         c4iw_invalidate_mr(qhp->rhp, wr->ex.invalidate_rkey);
964                         break;
965                 default:
966                         pr_warn("%s post of type=%d TBD!\n", __func__,
967                                 wr->opcode);
968                         err = -EINVAL;
969                 }
970                 if (err) {
971                         *bad_wr = wr;
972                         break;
973                 }
974                 swsqe->idx = qhp->wq.sq.pidx;
975                 swsqe->complete = 0;
976                 swsqe->signaled = (wr->send_flags & IB_SEND_SIGNALED) ||
977                                   qhp->sq_sig_all;
978                 swsqe->flushed = 0;
979                 swsqe->wr_id = wr->wr_id;
980                 if (c4iw_wr_log) {
981                         swsqe->sge_ts = cxgb4_read_sge_timestamp(
982                                         qhp->rhp->rdev.lldi.ports[0]);
983                         getnstimeofday(&swsqe->host_ts);
984                 }
985
986                 init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16);
987
988                 pr_debug("cookie 0x%llx pidx 0x%x opcode 0x%x read_len %u\n",
989                          (unsigned long long)wr->wr_id, qhp->wq.sq.pidx,
990                          swsqe->opcode, swsqe->read_len);
991                 wr = wr->next;
992                 num_wrs--;
993                 t4_sq_produce(&qhp->wq, len16);
994                 idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
995         }
996         if (!qhp->rhp->rdev.status_page->db_off) {
997                 t4_ring_sq_db(&qhp->wq, idx, wqe);
998                 spin_unlock_irqrestore(&qhp->lock, flag);
999         } else {
1000                 spin_unlock_irqrestore(&qhp->lock, flag);
1001                 ring_kernel_sq_db(qhp, idx);
1002         }
1003         return err;
1004 }
1005
1006 int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
1007                       struct ib_recv_wr **bad_wr)
1008 {
1009         int err = 0;
1010         struct c4iw_qp *qhp;
1011         union t4_recv_wr *wqe = NULL;
1012         u32 num_wrs;
1013         u8 len16 = 0;
1014         unsigned long flag;
1015         u16 idx = 0;
1016
1017         qhp = to_c4iw_qp(ibqp);
1018         spin_lock_irqsave(&qhp->lock, flag);
1019
1020         /*
1021          * If the qp has been flushed, then just insert a special
1022          * drain cqe.
1023          */
1024         if (qhp->wq.flushed) {
1025                 spin_unlock_irqrestore(&qhp->lock, flag);
1026                 complete_rq_drain_wr(qhp, wr);
1027                 return err;
1028         }
1029         num_wrs = t4_rq_avail(&qhp->wq);
1030         if (num_wrs == 0) {
1031                 spin_unlock_irqrestore(&qhp->lock, flag);
1032                 *bad_wr = wr;
1033                 return -ENOMEM;
1034         }
1035         while (wr) {
1036                 if (wr->num_sge > T4_MAX_RECV_SGE) {
1037                         err = -EINVAL;
1038                         *bad_wr = wr;
1039                         break;
1040                 }
1041                 wqe = (union t4_recv_wr *)((u8 *)qhp->wq.rq.queue +
1042                                            qhp->wq.rq.wq_pidx *
1043                                            T4_EQ_ENTRY_SIZE);
1044                 if (num_wrs)
1045                         err = build_rdma_recv(qhp, wqe, wr, &len16);
1046                 else
1047                         err = -ENOMEM;
1048                 if (err) {
1049                         *bad_wr = wr;
1050                         break;
1051                 }
1052
1053                 qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].wr_id = wr->wr_id;
1054                 if (c4iw_wr_log) {
1055                         qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].sge_ts =
1056                                 cxgb4_read_sge_timestamp(
1057                                                 qhp->rhp->rdev.lldi.ports[0]);
1058                         getnstimeofday(
1059                                 &qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].host_ts);
1060                 }
1061
1062                 wqe->recv.opcode = FW_RI_RECV_WR;
1063                 wqe->recv.r1 = 0;
1064                 wqe->recv.wrid = qhp->wq.rq.pidx;
1065                 wqe->recv.r2[0] = 0;
1066                 wqe->recv.r2[1] = 0;
1067                 wqe->recv.r2[2] = 0;
1068                 wqe->recv.len16 = len16;
1069                 pr_debug("cookie 0x%llx pidx %u\n",
1070                          (unsigned long long)wr->wr_id, qhp->wq.rq.pidx);
1071                 t4_rq_produce(&qhp->wq, len16);
1072                 idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
1073                 wr = wr->next;
1074                 num_wrs--;
1075         }
1076         if (!qhp->rhp->rdev.status_page->db_off) {
1077                 t4_ring_rq_db(&qhp->wq, idx, wqe);
1078                 spin_unlock_irqrestore(&qhp->lock, flag);
1079         } else {
1080                 spin_unlock_irqrestore(&qhp->lock, flag);
1081                 ring_kernel_rq_db(qhp, idx);
1082         }
1083         return err;
1084 }
1085
1086 static inline void build_term_codes(struct t4_cqe *err_cqe, u8 *layer_type,
1087                                     u8 *ecode)
1088 {
1089         int status;
1090         int tagged;
1091         int opcode;
1092         int rqtype;
1093         int send_inv;
1094
1095         if (!err_cqe) {
1096                 *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
1097                 *ecode = 0;
1098                 return;
1099         }
1100
1101         status = CQE_STATUS(err_cqe);
1102         opcode = CQE_OPCODE(err_cqe);
1103         rqtype = RQ_TYPE(err_cqe);
1104         send_inv = (opcode == FW_RI_SEND_WITH_INV) ||
1105                    (opcode == FW_RI_SEND_WITH_SE_INV);
1106         tagged = (opcode == FW_RI_RDMA_WRITE) ||
1107                  (rqtype && (opcode == FW_RI_READ_RESP));
1108
1109         switch (status) {
1110         case T4_ERR_STAG:
1111                 if (send_inv) {
1112                         *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
1113                         *ecode = RDMAP_CANT_INV_STAG;
1114                 } else {
1115                         *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1116                         *ecode = RDMAP_INV_STAG;
1117                 }
1118                 break;
1119         case T4_ERR_PDID:
1120                 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1121                 if ((opcode == FW_RI_SEND_WITH_INV) ||
1122                     (opcode == FW_RI_SEND_WITH_SE_INV))
1123                         *ecode = RDMAP_CANT_INV_STAG;
1124                 else
1125                         *ecode = RDMAP_STAG_NOT_ASSOC;
1126                 break;
1127         case T4_ERR_QPID:
1128                 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1129                 *ecode = RDMAP_STAG_NOT_ASSOC;
1130                 break;
1131         case T4_ERR_ACCESS:
1132                 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1133                 *ecode = RDMAP_ACC_VIOL;
1134                 break;
1135         case T4_ERR_WRAP:
1136                 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1137                 *ecode = RDMAP_TO_WRAP;
1138                 break;
1139         case T4_ERR_BOUND:
1140                 if (tagged) {
1141                         *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
1142                         *ecode = DDPT_BASE_BOUNDS;
1143                 } else {
1144                         *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1145                         *ecode = RDMAP_BASE_BOUNDS;
1146                 }
1147                 break;
1148         case T4_ERR_INVALIDATE_SHARED_MR:
1149         case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
1150                 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
1151                 *ecode = RDMAP_CANT_INV_STAG;
1152                 break;
1153         case T4_ERR_ECC:
1154         case T4_ERR_ECC_PSTAG:
1155         case T4_ERR_INTERNAL_ERR:
1156                 *layer_type = LAYER_RDMAP|RDMAP_LOCAL_CATA;
1157                 *ecode = 0;
1158                 break;
1159         case T4_ERR_OUT_OF_RQE:
1160                 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1161                 *ecode = DDPU_INV_MSN_NOBUF;
1162                 break;
1163         case T4_ERR_PBL_ADDR_BOUND:
1164                 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
1165                 *ecode = DDPT_BASE_BOUNDS;
1166                 break;
1167         case T4_ERR_CRC:
1168                 *layer_type = LAYER_MPA|DDP_LLP;
1169                 *ecode = MPA_CRC_ERR;
1170                 break;
1171         case T4_ERR_MARKER:
1172                 *layer_type = LAYER_MPA|DDP_LLP;
1173                 *ecode = MPA_MARKER_ERR;
1174                 break;
1175         case T4_ERR_PDU_LEN_ERR:
1176                 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1177                 *ecode = DDPU_MSG_TOOBIG;
1178                 break;
1179         case T4_ERR_DDP_VERSION:
1180                 if (tagged) {
1181                         *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
1182                         *ecode = DDPT_INV_VERS;
1183                 } else {
1184                         *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1185                         *ecode = DDPU_INV_VERS;
1186                 }
1187                 break;
1188         case T4_ERR_RDMA_VERSION:
1189                 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
1190                 *ecode = RDMAP_INV_VERS;
1191                 break;
1192         case T4_ERR_OPCODE:
1193                 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
1194                 *ecode = RDMAP_INV_OPCODE;
1195                 break;
1196         case T4_ERR_DDP_QUEUE_NUM:
1197                 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1198                 *ecode = DDPU_INV_QN;
1199                 break;
1200         case T4_ERR_MSN:
1201         case T4_ERR_MSN_GAP:
1202         case T4_ERR_MSN_RANGE:
1203         case T4_ERR_IRD_OVERFLOW:
1204                 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1205                 *ecode = DDPU_INV_MSN_RANGE;
1206                 break;
1207         case T4_ERR_TBIT:
1208                 *layer_type = LAYER_DDP|DDP_LOCAL_CATA;
1209                 *ecode = 0;
1210                 break;
1211         case T4_ERR_MO:
1212                 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1213                 *ecode = DDPU_INV_MO;
1214                 break;
1215         default:
1216                 *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
1217                 *ecode = 0;
1218                 break;
1219         }
1220 }
1221
1222 static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
1223                            gfp_t gfp)
1224 {
1225         struct fw_ri_wr *wqe;
1226         struct sk_buff *skb;
1227         struct terminate_message *term;
1228
1229         pr_debug("qhp %p qid 0x%x tid %u\n", qhp, qhp->wq.sq.qid,
1230                  qhp->ep->hwtid);
1231
1232         skb = skb_dequeue(&qhp->ep->com.ep_skb_list);
1233         if (WARN_ON(!skb))
1234                 return;
1235
1236         set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
1237
1238         wqe = __skb_put(skb, sizeof(*wqe));
1239         memset(wqe, 0, sizeof *wqe);
1240         wqe->op_compl = cpu_to_be32(FW_WR_OP_V(FW_RI_INIT_WR));
1241         wqe->flowid_len16 = cpu_to_be32(
1242                 FW_WR_FLOWID_V(qhp->ep->hwtid) |
1243                 FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
1244
1245         wqe->u.terminate.type = FW_RI_TYPE_TERMINATE;
1246         wqe->u.terminate.immdlen = cpu_to_be32(sizeof *term);
1247         term = (struct terminate_message *)wqe->u.terminate.termmsg;
1248         if (qhp->attr.layer_etype == (LAYER_MPA|DDP_LLP)) {
1249                 term->layer_etype = qhp->attr.layer_etype;
1250                 term->ecode = qhp->attr.ecode;
1251         } else
1252                 build_term_codes(err_cqe, &term->layer_etype, &term->ecode);
1253         c4iw_ofld_send(&qhp->rhp->rdev, skb);
1254 }
1255
1256 /*
1257  * Assumes qhp lock is held.
1258  */
1259 static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
1260                        struct c4iw_cq *schp)
1261 {
1262         int count;
1263         int rq_flushed, sq_flushed;
1264         unsigned long flag;
1265
1266         pr_debug("qhp %p rchp %p schp %p\n", qhp, rchp, schp);
1267
1268         /* locking hierarchy: cqs lock first, then qp lock. */
1269         spin_lock_irqsave(&rchp->lock, flag);
1270         if (schp != rchp)
1271                 spin_lock(&schp->lock);
1272         spin_lock(&qhp->lock);
1273
1274         if (qhp->wq.flushed) {
1275                 spin_unlock(&qhp->lock);
1276                 if (schp != rchp)
1277                         spin_unlock(&schp->lock);
1278                 spin_unlock_irqrestore(&rchp->lock, flag);
1279                 return;
1280         }
1281         qhp->wq.flushed = 1;
1282         t4_set_wq_in_error(&qhp->wq);
1283
1284         c4iw_flush_hw_cq(rchp);
1285         c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count);
1286         rq_flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
1287
1288         if (schp != rchp)
1289                 c4iw_flush_hw_cq(schp);
1290         sq_flushed = c4iw_flush_sq(qhp);
1291
1292         spin_unlock(&qhp->lock);
1293         if (schp != rchp)
1294                 spin_unlock(&schp->lock);
1295         spin_unlock_irqrestore(&rchp->lock, flag);
1296
1297         if (schp == rchp) {
1298                 if ((rq_flushed || sq_flushed) &&
1299                     t4_clear_cq_armed(&rchp->cq)) {
1300                         spin_lock_irqsave(&rchp->comp_handler_lock, flag);
1301                         (*rchp->ibcq.comp_handler)(&rchp->ibcq,
1302                                                    rchp->ibcq.cq_context);
1303                         spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
1304                 }
1305         } else {
1306                 if (rq_flushed && t4_clear_cq_armed(&rchp->cq)) {
1307                         spin_lock_irqsave(&rchp->comp_handler_lock, flag);
1308                         (*rchp->ibcq.comp_handler)(&rchp->ibcq,
1309                                                    rchp->ibcq.cq_context);
1310                         spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
1311                 }
1312                 if (sq_flushed && t4_clear_cq_armed(&schp->cq)) {
1313                         spin_lock_irqsave(&schp->comp_handler_lock, flag);
1314                         (*schp->ibcq.comp_handler)(&schp->ibcq,
1315                                                    schp->ibcq.cq_context);
1316                         spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
1317                 }
1318         }
1319 }
1320
1321 static void flush_qp(struct c4iw_qp *qhp)
1322 {
1323         struct c4iw_cq *rchp, *schp;
1324         unsigned long flag;
1325
1326         rchp = to_c4iw_cq(qhp->ibqp.recv_cq);
1327         schp = to_c4iw_cq(qhp->ibqp.send_cq);
1328
1329         if (qhp->ibqp.uobject) {
1330                 t4_set_wq_in_error(&qhp->wq);
1331                 t4_set_cq_in_error(&rchp->cq);
1332                 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
1333                 (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
1334                 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
1335                 if (schp != rchp) {
1336                         t4_set_cq_in_error(&schp->cq);
1337                         spin_lock_irqsave(&schp->comp_handler_lock, flag);
1338                         (*schp->ibcq.comp_handler)(&schp->ibcq,
1339                                         schp->ibcq.cq_context);
1340                         spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
1341                 }
1342                 return;
1343         }
1344         __flush_qp(qhp, rchp, schp);
1345 }
1346
1347 static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1348                      struct c4iw_ep *ep)
1349 {
1350         struct fw_ri_wr *wqe;
1351         int ret;
1352         struct sk_buff *skb;
1353
1354         pr_debug("qhp %p qid 0x%x tid %u\n", qhp, qhp->wq.sq.qid, ep->hwtid);
1355
1356         skb = skb_dequeue(&ep->com.ep_skb_list);
1357         if (WARN_ON(!skb))
1358                 return -ENOMEM;
1359
1360         set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
1361
1362         wqe = __skb_put(skb, sizeof(*wqe));
1363         memset(wqe, 0, sizeof *wqe);
1364         wqe->op_compl = cpu_to_be32(
1365                 FW_WR_OP_V(FW_RI_INIT_WR) |
1366                 FW_WR_COMPL_F);
1367         wqe->flowid_len16 = cpu_to_be32(
1368                 FW_WR_FLOWID_V(ep->hwtid) |
1369                 FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
1370         wqe->cookie = (uintptr_t)ep->com.wr_waitp;
1371
1372         wqe->u.fini.type = FW_RI_TYPE_FINI;
1373
1374         ret = c4iw_ref_send_wait(&rhp->rdev, skb, ep->com.wr_waitp,
1375                                  qhp->ep->hwtid, qhp->wq.sq.qid, __func__);
1376
1377         pr_debug("ret %d\n", ret);
1378         return ret;
1379 }
1380
1381 static void build_rtr_msg(u8 p2p_type, struct fw_ri_init *init)
1382 {
1383         pr_debug("p2p_type = %d\n", p2p_type);
1384         memset(&init->u, 0, sizeof init->u);
1385         switch (p2p_type) {
1386         case FW_RI_INIT_P2PTYPE_RDMA_WRITE:
1387                 init->u.write.opcode = FW_RI_RDMA_WRITE_WR;
1388                 init->u.write.stag_sink = cpu_to_be32(1);
1389                 init->u.write.to_sink = cpu_to_be64(1);
1390                 init->u.write.u.immd_src[0].op = FW_RI_DATA_IMMD;
1391                 init->u.write.len16 = DIV_ROUND_UP(sizeof init->u.write +
1392                                                    sizeof(struct fw_ri_immd),
1393                                                    16);
1394                 break;
1395         case FW_RI_INIT_P2PTYPE_READ_REQ:
1396                 init->u.write.opcode = FW_RI_RDMA_READ_WR;
1397                 init->u.read.stag_src = cpu_to_be32(1);
1398                 init->u.read.to_src_lo = cpu_to_be32(1);
1399                 init->u.read.stag_sink = cpu_to_be32(1);
1400                 init->u.read.to_sink_lo = cpu_to_be32(1);
1401                 init->u.read.len16 = DIV_ROUND_UP(sizeof init->u.read, 16);
1402                 break;
1403         }
1404 }
1405
1406 static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
1407 {
1408         struct fw_ri_wr *wqe;
1409         int ret;
1410         struct sk_buff *skb;
1411
1412         pr_debug("qhp %p qid 0x%x tid %u ird %u ord %u\n", qhp,
1413                  qhp->wq.sq.qid, qhp->ep->hwtid, qhp->ep->ird, qhp->ep->ord);
1414
1415         skb = alloc_skb(sizeof *wqe, GFP_KERNEL);
1416         if (!skb) {
1417                 ret = -ENOMEM;
1418                 goto out;
1419         }
1420         ret = alloc_ird(rhp, qhp->attr.max_ird);
1421         if (ret) {
1422                 qhp->attr.max_ird = 0;
1423                 kfree_skb(skb);
1424                 goto out;
1425         }
1426         set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
1427
1428         wqe = __skb_put(skb, sizeof(*wqe));
1429         memset(wqe, 0, sizeof *wqe);
1430         wqe->op_compl = cpu_to_be32(
1431                 FW_WR_OP_V(FW_RI_INIT_WR) |
1432                 FW_WR_COMPL_F);
1433         wqe->flowid_len16 = cpu_to_be32(
1434                 FW_WR_FLOWID_V(qhp->ep->hwtid) |
1435                 FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
1436
1437         wqe->cookie = (uintptr_t)qhp->ep->com.wr_waitp;
1438
1439         wqe->u.init.type = FW_RI_TYPE_INIT;
1440         wqe->u.init.mpareqbit_p2ptype =
1441                 FW_RI_WR_MPAREQBIT_V(qhp->attr.mpa_attr.initiator) |
1442                 FW_RI_WR_P2PTYPE_V(qhp->attr.mpa_attr.p2p_type);
1443         wqe->u.init.mpa_attrs = FW_RI_MPA_IETF_ENABLE;
1444         if (qhp->attr.mpa_attr.recv_marker_enabled)
1445                 wqe->u.init.mpa_attrs |= FW_RI_MPA_RX_MARKER_ENABLE;
1446         if (qhp->attr.mpa_attr.xmit_marker_enabled)
1447                 wqe->u.init.mpa_attrs |= FW_RI_MPA_TX_MARKER_ENABLE;
1448         if (qhp->attr.mpa_attr.crc_enabled)
1449                 wqe->u.init.mpa_attrs |= FW_RI_MPA_CRC_ENABLE;
1450
1451         wqe->u.init.qp_caps = FW_RI_QP_RDMA_READ_ENABLE |
1452                             FW_RI_QP_RDMA_WRITE_ENABLE |
1453                             FW_RI_QP_BIND_ENABLE;
1454         if (!qhp->ibqp.uobject)
1455                 wqe->u.init.qp_caps |= FW_RI_QP_FAST_REGISTER_ENABLE |
1456                                      FW_RI_QP_STAG0_ENABLE;
1457         wqe->u.init.nrqe = cpu_to_be16(t4_rqes_posted(&qhp->wq));
1458         wqe->u.init.pdid = cpu_to_be32(qhp->attr.pd);
1459         wqe->u.init.qpid = cpu_to_be32(qhp->wq.sq.qid);
1460         wqe->u.init.sq_eqid = cpu_to_be32(qhp->wq.sq.qid);
1461         wqe->u.init.rq_eqid = cpu_to_be32(qhp->wq.rq.qid);
1462         wqe->u.init.scqid = cpu_to_be32(qhp->attr.scq);
1463         wqe->u.init.rcqid = cpu_to_be32(qhp->attr.rcq);
1464         wqe->u.init.ord_max = cpu_to_be32(qhp->attr.max_ord);
1465         wqe->u.init.ird_max = cpu_to_be32(qhp->attr.max_ird);
1466         wqe->u.init.iss = cpu_to_be32(qhp->ep->snd_seq);
1467         wqe->u.init.irs = cpu_to_be32(qhp->ep->rcv_seq);
1468         wqe->u.init.hwrqsize = cpu_to_be32(qhp->wq.rq.rqt_size);
1469         wqe->u.init.hwrqaddr = cpu_to_be32(qhp->wq.rq.rqt_hwaddr -
1470                                          rhp->rdev.lldi.vr->rq.start);
1471         if (qhp->attr.mpa_attr.initiator)
1472                 build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init);
1473
1474         ret = c4iw_ref_send_wait(&rhp->rdev, skb, qhp->ep->com.wr_waitp,
1475                                  qhp->ep->hwtid, qhp->wq.sq.qid, __func__);
1476         if (!ret)
1477                 goto out;
1478
1479         free_ird(rhp, qhp->attr.max_ird);
1480 out:
1481         pr_debug("ret %d\n", ret);
1482         return ret;
1483 }
1484
1485 int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1486                    enum c4iw_qp_attr_mask mask,
1487                    struct c4iw_qp_attributes *attrs,
1488                    int internal)
1489 {
1490         int ret = 0;
1491         struct c4iw_qp_attributes newattr = qhp->attr;
1492         int disconnect = 0;
1493         int terminate = 0;
1494         int abort = 0;
1495         int free = 0;
1496         struct c4iw_ep *ep = NULL;
1497
1498         pr_debug("qhp %p sqid 0x%x rqid 0x%x ep %p state %d -> %d\n",
1499                  qhp, qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep, qhp->attr.state,
1500                  (mask & C4IW_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1);
1501
1502         mutex_lock(&qhp->mutex);
1503
1504         /* Process attr changes if in IDLE */
1505         if (mask & C4IW_QP_ATTR_VALID_MODIFY) {
1506                 if (qhp->attr.state != C4IW_QP_STATE_IDLE) {
1507                         ret = -EIO;
1508                         goto out;
1509                 }
1510                 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_READ)
1511                         newattr.enable_rdma_read = attrs->enable_rdma_read;
1512                 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_WRITE)
1513                         newattr.enable_rdma_write = attrs->enable_rdma_write;
1514                 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_BIND)
1515                         newattr.enable_bind = attrs->enable_bind;
1516                 if (mask & C4IW_QP_ATTR_MAX_ORD) {
1517                         if (attrs->max_ord > c4iw_max_read_depth) {
1518                                 ret = -EINVAL;
1519                                 goto out;
1520                         }
1521                         newattr.max_ord = attrs->max_ord;
1522                 }
1523                 if (mask & C4IW_QP_ATTR_MAX_IRD) {
1524                         if (attrs->max_ird > cur_max_read_depth(rhp)) {
1525                                 ret = -EINVAL;
1526                                 goto out;
1527                         }
1528                         newattr.max_ird = attrs->max_ird;
1529                 }
1530                 qhp->attr = newattr;
1531         }
1532
1533         if (mask & C4IW_QP_ATTR_SQ_DB) {
1534                 ret = ring_kernel_sq_db(qhp, attrs->sq_db_inc);
1535                 goto out;
1536         }
1537         if (mask & C4IW_QP_ATTR_RQ_DB) {
1538                 ret = ring_kernel_rq_db(qhp, attrs->rq_db_inc);
1539                 goto out;
1540         }
1541
1542         if (!(mask & C4IW_QP_ATTR_NEXT_STATE))
1543                 goto out;
1544         if (qhp->attr.state == attrs->next_state)
1545                 goto out;
1546
1547         switch (qhp->attr.state) {
1548         case C4IW_QP_STATE_IDLE:
1549                 switch (attrs->next_state) {
1550                 case C4IW_QP_STATE_RTS:
1551                         if (!(mask & C4IW_QP_ATTR_LLP_STREAM_HANDLE)) {
1552                                 ret = -EINVAL;
1553                                 goto out;
1554                         }
1555                         if (!(mask & C4IW_QP_ATTR_MPA_ATTR)) {
1556                                 ret = -EINVAL;
1557                                 goto out;
1558                         }
1559                         qhp->attr.mpa_attr = attrs->mpa_attr;
1560                         qhp->attr.llp_stream_handle = attrs->llp_stream_handle;
1561                         qhp->ep = qhp->attr.llp_stream_handle;
1562                         set_state(qhp, C4IW_QP_STATE_RTS);
1563
1564                         /*
1565                          * Ref the endpoint here and deref when we
1566                          * disassociate the endpoint from the QP.  This
1567                          * happens in CLOSING->IDLE transition or *->ERROR
1568                          * transition.
1569                          */
1570                         c4iw_get_ep(&qhp->ep->com);
1571                         ret = rdma_init(rhp, qhp);
1572                         if (ret)
1573                                 goto err;
1574                         break;
1575                 case C4IW_QP_STATE_ERROR:
1576                         set_state(qhp, C4IW_QP_STATE_ERROR);
1577                         flush_qp(qhp);
1578                         break;
1579                 default:
1580                         ret = -EINVAL;
1581                         goto out;
1582                 }
1583                 break;
1584         case C4IW_QP_STATE_RTS:
1585                 switch (attrs->next_state) {
1586                 case C4IW_QP_STATE_CLOSING:
1587                         t4_set_wq_in_error(&qhp->wq);
1588                         set_state(qhp, C4IW_QP_STATE_CLOSING);
1589                         ep = qhp->ep;
1590                         if (!internal) {
1591                                 abort = 0;
1592                                 disconnect = 1;
1593                                 c4iw_get_ep(&qhp->ep->com);
1594                         }
1595                         ret = rdma_fini(rhp, qhp, ep);
1596                         if (ret)
1597                                 goto err;
1598                         break;
1599                 case C4IW_QP_STATE_TERMINATE:
1600                         t4_set_wq_in_error(&qhp->wq);
1601                         set_state(qhp, C4IW_QP_STATE_TERMINATE);
1602                         qhp->attr.layer_etype = attrs->layer_etype;
1603                         qhp->attr.ecode = attrs->ecode;
1604                         ep = qhp->ep;
1605                         if (!internal) {
1606                                 c4iw_get_ep(&qhp->ep->com);
1607                                 terminate = 1;
1608                                 disconnect = 1;
1609                         } else {
1610                                 terminate = qhp->attr.send_term;
1611                                 ret = rdma_fini(rhp, qhp, ep);
1612                                 if (ret)
1613                                         goto err;
1614                         }
1615                         break;
1616                 case C4IW_QP_STATE_ERROR:
1617                         t4_set_wq_in_error(&qhp->wq);
1618                         set_state(qhp, C4IW_QP_STATE_ERROR);
1619                         if (!internal) {
1620                                 abort = 1;
1621                                 disconnect = 1;
1622                                 ep = qhp->ep;
1623                                 c4iw_get_ep(&qhp->ep->com);
1624                         }
1625                         goto err;
1626                         break;
1627                 default:
1628                         ret = -EINVAL;
1629                         goto out;
1630                 }
1631                 break;
1632         case C4IW_QP_STATE_CLOSING:
1633
1634                 /*
1635                  * Allow kernel users to move to ERROR for qp draining.
1636                  */
1637                 if (!internal && (qhp->ibqp.uobject || attrs->next_state !=
1638                                   C4IW_QP_STATE_ERROR)) {
1639                         ret = -EINVAL;
1640                         goto out;
1641                 }
1642                 switch (attrs->next_state) {
1643                 case C4IW_QP_STATE_IDLE:
1644                         flush_qp(qhp);
1645                         set_state(qhp, C4IW_QP_STATE_IDLE);
1646                         qhp->attr.llp_stream_handle = NULL;
1647                         c4iw_put_ep(&qhp->ep->com);
1648                         qhp->ep = NULL;
1649                         wake_up(&qhp->wait);
1650                         break;
1651                 case C4IW_QP_STATE_ERROR:
1652                         goto err;
1653                 default:
1654                         ret = -EINVAL;
1655                         goto err;
1656                 }
1657                 break;
1658         case C4IW_QP_STATE_ERROR:
1659                 if (attrs->next_state != C4IW_QP_STATE_IDLE) {
1660                         ret = -EINVAL;
1661                         goto out;
1662                 }
1663                 if (!t4_sq_empty(&qhp->wq) || !t4_rq_empty(&qhp->wq)) {
1664                         ret = -EINVAL;
1665                         goto out;
1666                 }
1667                 set_state(qhp, C4IW_QP_STATE_IDLE);
1668                 break;
1669         case C4IW_QP_STATE_TERMINATE:
1670                 if (!internal) {
1671                         ret = -EINVAL;
1672                         goto out;
1673                 }
1674                 goto err;
1675                 break;
1676         default:
1677                 pr_err("%s in a bad state %d\n", __func__, qhp->attr.state);
1678                 ret = -EINVAL;
1679                 goto err;
1680                 break;
1681         }
1682         goto out;
1683 err:
1684         pr_debug("disassociating ep %p qpid 0x%x\n", qhp->ep,
1685                  qhp->wq.sq.qid);
1686
1687         /* disassociate the LLP connection */
1688         qhp->attr.llp_stream_handle = NULL;
1689         if (!ep)
1690                 ep = qhp->ep;
1691         qhp->ep = NULL;
1692         set_state(qhp, C4IW_QP_STATE_ERROR);
1693         free = 1;
1694         abort = 1;
1695         flush_qp(qhp);
1696         wake_up(&qhp->wait);
1697 out:
1698         mutex_unlock(&qhp->mutex);
1699
1700         if (terminate)
1701                 post_terminate(qhp, NULL, internal ? GFP_ATOMIC : GFP_KERNEL);
1702
1703         /*
1704          * If disconnect is 1, then we need to initiate a disconnect
1705          * on the EP.  This can be a normal close (RTS->CLOSING) or
1706          * an abnormal close (RTS/CLOSING->ERROR).
1707          */
1708         if (disconnect) {
1709                 c4iw_ep_disconnect(ep, abort, internal ? GFP_ATOMIC :
1710                                                          GFP_KERNEL);
1711                 c4iw_put_ep(&ep->com);
1712         }
1713
1714         /*
1715          * If free is 1, then we've disassociated the EP from the QP
1716          * and we need to dereference the EP.
1717          */
1718         if (free)
1719                 c4iw_put_ep(&ep->com);
1720         pr_debug("exit state %d\n", qhp->attr.state);
1721         return ret;
1722 }
1723
1724 int c4iw_destroy_qp(struct ib_qp *ib_qp)
1725 {
1726         struct c4iw_dev *rhp;
1727         struct c4iw_qp *qhp;
1728         struct c4iw_qp_attributes attrs;
1729
1730         qhp = to_c4iw_qp(ib_qp);
1731         rhp = qhp->rhp;
1732
1733         attrs.next_state = C4IW_QP_STATE_ERROR;
1734         if (qhp->attr.state == C4IW_QP_STATE_TERMINATE)
1735                 c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1736         else
1737                 c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
1738         wait_event(qhp->wait, !qhp->ep);
1739
1740         remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
1741
1742         spin_lock_irq(&rhp->lock);
1743         if (!list_empty(&qhp->db_fc_entry))
1744                 list_del_init(&qhp->db_fc_entry);
1745         spin_unlock_irq(&rhp->lock);
1746         free_ird(rhp, qhp->attr.max_ird);
1747
1748         c4iw_qp_rem_ref(ib_qp);
1749
1750         pr_debug("ib_qp %p qpid 0x%0x\n", ib_qp, qhp->wq.sq.qid);
1751         return 0;
1752 }
1753
1754 struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
1755                              struct ib_udata *udata)
1756 {
1757         struct c4iw_dev *rhp;
1758         struct c4iw_qp *qhp;
1759         struct c4iw_pd *php;
1760         struct c4iw_cq *schp;
1761         struct c4iw_cq *rchp;
1762         struct c4iw_create_qp_resp uresp;
1763         unsigned int sqsize, rqsize;
1764         struct c4iw_ucontext *ucontext;
1765         int ret;
1766         struct c4iw_mm_entry *sq_key_mm, *rq_key_mm = NULL, *sq_db_key_mm;
1767         struct c4iw_mm_entry *rq_db_key_mm = NULL, *ma_sync_key_mm = NULL;
1768
1769         pr_debug("ib_pd %p\n", pd);
1770
1771         if (attrs->qp_type != IB_QPT_RC)
1772                 return ERR_PTR(-EINVAL);
1773
1774         php = to_c4iw_pd(pd);
1775         rhp = php->rhp;
1776         schp = get_chp(rhp, ((struct c4iw_cq *)attrs->send_cq)->cq.cqid);
1777         rchp = get_chp(rhp, ((struct c4iw_cq *)attrs->recv_cq)->cq.cqid);
1778         if (!schp || !rchp)
1779                 return ERR_PTR(-EINVAL);
1780
1781         if (attrs->cap.max_inline_data > T4_MAX_SEND_INLINE)
1782                 return ERR_PTR(-EINVAL);
1783
1784         if (attrs->cap.max_recv_wr > rhp->rdev.hw_queue.t4_max_rq_size)
1785                 return ERR_PTR(-E2BIG);
1786         rqsize = attrs->cap.max_recv_wr + 1;
1787         if (rqsize < 8)
1788                 rqsize = 8;
1789
1790         if (attrs->cap.max_send_wr > rhp->rdev.hw_queue.t4_max_sq_size)
1791                 return ERR_PTR(-E2BIG);
1792         sqsize = attrs->cap.max_send_wr + 1;
1793         if (sqsize < 8)
1794                 sqsize = 8;
1795
1796         ucontext = pd->uobject ? to_c4iw_ucontext(pd->uobject->context) : NULL;
1797
1798         qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
1799         if (!qhp)
1800                 return ERR_PTR(-ENOMEM);
1801
1802         qhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
1803         if (!qhp->wr_waitp) {
1804                 ret = -ENOMEM;
1805                 goto err_free_qhp;
1806         }
1807
1808         qhp->wq.sq.size = sqsize;
1809         qhp->wq.sq.memsize =
1810                 (sqsize + rhp->rdev.hw_queue.t4_eq_status_entries) *
1811                 sizeof(*qhp->wq.sq.queue) + 16 * sizeof(__be64);
1812         qhp->wq.sq.flush_cidx = -1;
1813         qhp->wq.rq.size = rqsize;
1814         qhp->wq.rq.memsize =
1815                 (rqsize + rhp->rdev.hw_queue.t4_eq_status_entries) *
1816                 sizeof(*qhp->wq.rq.queue);
1817
1818         if (ucontext) {
1819                 qhp->wq.sq.memsize = roundup(qhp->wq.sq.memsize, PAGE_SIZE);
1820                 qhp->wq.rq.memsize = roundup(qhp->wq.rq.memsize, PAGE_SIZE);
1821         }
1822
1823         ret = create_qp(&rhp->rdev, &qhp->wq, &schp->cq, &rchp->cq,
1824                         ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
1825                         qhp->wr_waitp);
1826         if (ret)
1827                 goto err_free_wr_wait;
1828
1829         attrs->cap.max_recv_wr = rqsize - 1;
1830         attrs->cap.max_send_wr = sqsize - 1;
1831         attrs->cap.max_inline_data = T4_MAX_SEND_INLINE;
1832
1833         qhp->rhp = rhp;
1834         qhp->attr.pd = php->pdid;
1835         qhp->attr.scq = ((struct c4iw_cq *) attrs->send_cq)->cq.cqid;
1836         qhp->attr.rcq = ((struct c4iw_cq *) attrs->recv_cq)->cq.cqid;
1837         qhp->attr.sq_num_entries = attrs->cap.max_send_wr;
1838         qhp->attr.rq_num_entries = attrs->cap.max_recv_wr;
1839         qhp->attr.sq_max_sges = attrs->cap.max_send_sge;
1840         qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge;
1841         qhp->attr.rq_max_sges = attrs->cap.max_recv_sge;
1842         qhp->attr.state = C4IW_QP_STATE_IDLE;
1843         qhp->attr.next_state = C4IW_QP_STATE_IDLE;
1844         qhp->attr.enable_rdma_read = 1;
1845         qhp->attr.enable_rdma_write = 1;
1846         qhp->attr.enable_bind = 1;
1847         qhp->attr.max_ord = 0;
1848         qhp->attr.max_ird = 0;
1849         qhp->sq_sig_all = attrs->sq_sig_type == IB_SIGNAL_ALL_WR;
1850         spin_lock_init(&qhp->lock);
1851         mutex_init(&qhp->mutex);
1852         init_waitqueue_head(&qhp->wait);
1853         kref_init(&qhp->kref);
1854         INIT_WORK(&qhp->free_work, free_qp_work);
1855
1856         ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
1857         if (ret)
1858                 goto err_destroy_qp;
1859
1860         if (udata && ucontext) {
1861                 sq_key_mm = kmalloc(sizeof(*sq_key_mm), GFP_KERNEL);
1862                 if (!sq_key_mm) {
1863                         ret = -ENOMEM;
1864                         goto err_remove_handle;
1865                 }
1866                 rq_key_mm = kmalloc(sizeof(*rq_key_mm), GFP_KERNEL);
1867                 if (!rq_key_mm) {
1868                         ret = -ENOMEM;
1869                         goto err_free_sq_key;
1870                 }
1871                 sq_db_key_mm = kmalloc(sizeof(*sq_db_key_mm), GFP_KERNEL);
1872                 if (!sq_db_key_mm) {
1873                         ret = -ENOMEM;
1874                         goto err_free_rq_key;
1875                 }
1876                 rq_db_key_mm = kmalloc(sizeof(*rq_db_key_mm), GFP_KERNEL);
1877                 if (!rq_db_key_mm) {
1878                         ret = -ENOMEM;
1879                         goto err_free_sq_db_key;
1880                 }
1881                 if (t4_sq_onchip(&qhp->wq.sq)) {
1882                         ma_sync_key_mm = kmalloc(sizeof(*ma_sync_key_mm),
1883                                                  GFP_KERNEL);
1884                         if (!ma_sync_key_mm) {
1885                                 ret = -ENOMEM;
1886                                 goto err_free_rq_db_key;
1887                         }
1888                         uresp.flags = C4IW_QPF_ONCHIP;
1889                 } else
1890                         uresp.flags = 0;
1891                 uresp.qid_mask = rhp->rdev.qpmask;
1892                 uresp.sqid = qhp->wq.sq.qid;
1893                 uresp.sq_size = qhp->wq.sq.size;
1894                 uresp.sq_memsize = qhp->wq.sq.memsize;
1895                 uresp.rqid = qhp->wq.rq.qid;
1896                 uresp.rq_size = qhp->wq.rq.size;
1897                 uresp.rq_memsize = qhp->wq.rq.memsize;
1898                 spin_lock(&ucontext->mmap_lock);
1899                 if (ma_sync_key_mm) {
1900                         uresp.ma_sync_key = ucontext->key;
1901                         ucontext->key += PAGE_SIZE;
1902                 } else {
1903                         uresp.ma_sync_key =  0;
1904                 }
1905                 uresp.sq_key = ucontext->key;
1906                 ucontext->key += PAGE_SIZE;
1907                 uresp.rq_key = ucontext->key;
1908                 ucontext->key += PAGE_SIZE;
1909                 uresp.sq_db_gts_key = ucontext->key;
1910                 ucontext->key += PAGE_SIZE;
1911                 uresp.rq_db_gts_key = ucontext->key;
1912                 ucontext->key += PAGE_SIZE;
1913                 spin_unlock(&ucontext->mmap_lock);
1914                 ret = ib_copy_to_udata(udata, &uresp, sizeof uresp);
1915                 if (ret)
1916                         goto err_free_ma_sync_key;
1917                 sq_key_mm->key = uresp.sq_key;
1918                 sq_key_mm->addr = qhp->wq.sq.phys_addr;
1919                 sq_key_mm->len = PAGE_ALIGN(qhp->wq.sq.memsize);
1920                 insert_mmap(ucontext, sq_key_mm);
1921                 rq_key_mm->key = uresp.rq_key;
1922                 rq_key_mm->addr = virt_to_phys(qhp->wq.rq.queue);
1923                 rq_key_mm->len = PAGE_ALIGN(qhp->wq.rq.memsize);
1924                 insert_mmap(ucontext, rq_key_mm);
1925                 sq_db_key_mm->key = uresp.sq_db_gts_key;
1926                 sq_db_key_mm->addr = (u64)(unsigned long)qhp->wq.sq.bar2_pa;
1927                 sq_db_key_mm->len = PAGE_SIZE;
1928                 insert_mmap(ucontext, sq_db_key_mm);
1929                 rq_db_key_mm->key = uresp.rq_db_gts_key;
1930                 rq_db_key_mm->addr = (u64)(unsigned long)qhp->wq.rq.bar2_pa;
1931                 rq_db_key_mm->len = PAGE_SIZE;
1932                 insert_mmap(ucontext, rq_db_key_mm);
1933                 if (ma_sync_key_mm) {
1934                         ma_sync_key_mm->key = uresp.ma_sync_key;
1935                         ma_sync_key_mm->addr =
1936                                 (pci_resource_start(rhp->rdev.lldi.pdev, 0) +
1937                                 PCIE_MA_SYNC_A) & PAGE_MASK;
1938                         ma_sync_key_mm->len = PAGE_SIZE;
1939                         insert_mmap(ucontext, ma_sync_key_mm);
1940                 }
1941
1942                 c4iw_get_ucontext(ucontext);
1943                 qhp->ucontext = ucontext;
1944         }
1945         qhp->ibqp.qp_num = qhp->wq.sq.qid;
1946         INIT_LIST_HEAD(&qhp->db_fc_entry);
1947         pr_debug("sq id %u size %u memsize %zu num_entries %u rq id %u size %u memsize %zu num_entries %u\n",
1948                  qhp->wq.sq.qid, qhp->wq.sq.size, qhp->wq.sq.memsize,
1949                  attrs->cap.max_send_wr, qhp->wq.rq.qid, qhp->wq.rq.size,
1950                  qhp->wq.rq.memsize, attrs->cap.max_recv_wr);
1951         return &qhp->ibqp;
1952 err_free_ma_sync_key:
1953         kfree(ma_sync_key_mm);
1954 err_free_rq_db_key:
1955         kfree(rq_db_key_mm);
1956 err_free_sq_db_key:
1957         kfree(sq_db_key_mm);
1958 err_free_rq_key:
1959         kfree(rq_key_mm);
1960 err_free_sq_key:
1961         kfree(sq_key_mm);
1962 err_remove_handle:
1963         remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
1964 err_destroy_qp:
1965         destroy_qp(&rhp->rdev, &qhp->wq,
1966                    ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
1967 err_free_wr_wait:
1968         c4iw_put_wr_wait(qhp->wr_waitp);
1969 err_free_qhp:
1970         kfree(qhp);
1971         return ERR_PTR(ret);
1972 }
1973
1974 int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1975                       int attr_mask, struct ib_udata *udata)
1976 {
1977         struct c4iw_dev *rhp;
1978         struct c4iw_qp *qhp;
1979         enum c4iw_qp_attr_mask mask = 0;
1980         struct c4iw_qp_attributes attrs;
1981
1982         pr_debug("ib_qp %p\n", ibqp);
1983
1984         /* iwarp does not support the RTR state */
1985         if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))
1986                 attr_mask &= ~IB_QP_STATE;
1987
1988         /* Make sure we still have something left to do */
1989         if (!attr_mask)
1990                 return 0;
1991
1992         memset(&attrs, 0, sizeof attrs);
1993         qhp = to_c4iw_qp(ibqp);
1994         rhp = qhp->rhp;
1995
1996         attrs.next_state = c4iw_convert_state(attr->qp_state);
1997         attrs.enable_rdma_read = (attr->qp_access_flags &
1998                                IB_ACCESS_REMOTE_READ) ?  1 : 0;
1999         attrs.enable_rdma_write = (attr->qp_access_flags &
2000                                 IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
2001         attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0;
2002
2003
2004         mask |= (attr_mask & IB_QP_STATE) ? C4IW_QP_ATTR_NEXT_STATE : 0;
2005         mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ?
2006                         (C4IW_QP_ATTR_ENABLE_RDMA_READ |
2007                          C4IW_QP_ATTR_ENABLE_RDMA_WRITE |
2008                          C4IW_QP_ATTR_ENABLE_RDMA_BIND) : 0;
2009
2010         /*
2011          * Use SQ_PSN and RQ_PSN to pass in IDX_INC values for
2012          * ringing the queue db when we're in DB_FULL mode.
2013          * Only allow this on T4 devices.
2014          */
2015         attrs.sq_db_inc = attr->sq_psn;
2016         attrs.rq_db_inc = attr->rq_psn;
2017         mask |= (attr_mask & IB_QP_SQ_PSN) ? C4IW_QP_ATTR_SQ_DB : 0;
2018         mask |= (attr_mask & IB_QP_RQ_PSN) ? C4IW_QP_ATTR_RQ_DB : 0;
2019         if (!is_t4(to_c4iw_qp(ibqp)->rhp->rdev.lldi.adapter_type) &&
2020             (mask & (C4IW_QP_ATTR_SQ_DB|C4IW_QP_ATTR_RQ_DB)))
2021                 return -EINVAL;
2022
2023         return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0);
2024 }
2025
2026 struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn)
2027 {
2028         pr_debug("ib_dev %p qpn 0x%x\n", dev, qpn);
2029         return (struct ib_qp *)get_qhp(to_c4iw_dev(dev), qpn);
2030 }
2031
2032 int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
2033                      int attr_mask, struct ib_qp_init_attr *init_attr)
2034 {
2035         struct c4iw_qp *qhp = to_c4iw_qp(ibqp);
2036
2037         memset(attr, 0, sizeof *attr);
2038         memset(init_attr, 0, sizeof *init_attr);
2039         attr->qp_state = to_ib_qp_state(qhp->attr.state);
2040         init_attr->cap.max_send_wr = qhp->attr.sq_num_entries;
2041         init_attr->cap.max_recv_wr = qhp->attr.rq_num_entries;
2042         init_attr->cap.max_send_sge = qhp->attr.sq_max_sges;
2043         init_attr->cap.max_recv_sge = qhp->attr.sq_max_sges;
2044         init_attr->cap.max_inline_data = T4_MAX_SEND_INLINE;
2045         init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0;
2046         return 0;
2047 }