Merge tag 'powerpc-5.18-5' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc...
[linux-2.6-microblaze.git] / drivers / infiniband / sw / rxe / rxe_resp.c
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3  * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4  * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5  */
6
7 #include <linux/skbuff.h>
8
9 #include "rxe.h"
10 #include "rxe_loc.h"
11 #include "rxe_queue.h"
12
13 enum resp_states {
14         RESPST_NONE,
15         RESPST_GET_REQ,
16         RESPST_CHK_PSN,
17         RESPST_CHK_OP_SEQ,
18         RESPST_CHK_OP_VALID,
19         RESPST_CHK_RESOURCE,
20         RESPST_CHK_LENGTH,
21         RESPST_CHK_RKEY,
22         RESPST_EXECUTE,
23         RESPST_READ_REPLY,
24         RESPST_COMPLETE,
25         RESPST_ACKNOWLEDGE,
26         RESPST_CLEANUP,
27         RESPST_DUPLICATE_REQUEST,
28         RESPST_ERR_MALFORMED_WQE,
29         RESPST_ERR_UNSUPPORTED_OPCODE,
30         RESPST_ERR_MISALIGNED_ATOMIC,
31         RESPST_ERR_PSN_OUT_OF_SEQ,
32         RESPST_ERR_MISSING_OPCODE_FIRST,
33         RESPST_ERR_MISSING_OPCODE_LAST_C,
34         RESPST_ERR_MISSING_OPCODE_LAST_D1E,
35         RESPST_ERR_TOO_MANY_RDMA_ATM_REQ,
36         RESPST_ERR_RNR,
37         RESPST_ERR_RKEY_VIOLATION,
38         RESPST_ERR_INVALIDATE_RKEY,
39         RESPST_ERR_LENGTH,
40         RESPST_ERR_CQ_OVERFLOW,
41         RESPST_ERROR,
42         RESPST_RESET,
43         RESPST_DONE,
44         RESPST_EXIT,
45 };
46
47 static char *resp_state_name[] = {
48         [RESPST_NONE]                           = "NONE",
49         [RESPST_GET_REQ]                        = "GET_REQ",
50         [RESPST_CHK_PSN]                        = "CHK_PSN",
51         [RESPST_CHK_OP_SEQ]                     = "CHK_OP_SEQ",
52         [RESPST_CHK_OP_VALID]                   = "CHK_OP_VALID",
53         [RESPST_CHK_RESOURCE]                   = "CHK_RESOURCE",
54         [RESPST_CHK_LENGTH]                     = "CHK_LENGTH",
55         [RESPST_CHK_RKEY]                       = "CHK_RKEY",
56         [RESPST_EXECUTE]                        = "EXECUTE",
57         [RESPST_READ_REPLY]                     = "READ_REPLY",
58         [RESPST_COMPLETE]                       = "COMPLETE",
59         [RESPST_ACKNOWLEDGE]                    = "ACKNOWLEDGE",
60         [RESPST_CLEANUP]                        = "CLEANUP",
61         [RESPST_DUPLICATE_REQUEST]              = "DUPLICATE_REQUEST",
62         [RESPST_ERR_MALFORMED_WQE]              = "ERR_MALFORMED_WQE",
63         [RESPST_ERR_UNSUPPORTED_OPCODE]         = "ERR_UNSUPPORTED_OPCODE",
64         [RESPST_ERR_MISALIGNED_ATOMIC]          = "ERR_MISALIGNED_ATOMIC",
65         [RESPST_ERR_PSN_OUT_OF_SEQ]             = "ERR_PSN_OUT_OF_SEQ",
66         [RESPST_ERR_MISSING_OPCODE_FIRST]       = "ERR_MISSING_OPCODE_FIRST",
67         [RESPST_ERR_MISSING_OPCODE_LAST_C]      = "ERR_MISSING_OPCODE_LAST_C",
68         [RESPST_ERR_MISSING_OPCODE_LAST_D1E]    = "ERR_MISSING_OPCODE_LAST_D1E",
69         [RESPST_ERR_TOO_MANY_RDMA_ATM_REQ]      = "ERR_TOO_MANY_RDMA_ATM_REQ",
70         [RESPST_ERR_RNR]                        = "ERR_RNR",
71         [RESPST_ERR_RKEY_VIOLATION]             = "ERR_RKEY_VIOLATION",
72         [RESPST_ERR_INVALIDATE_RKEY]            = "ERR_INVALIDATE_RKEY_VIOLATION",
73         [RESPST_ERR_LENGTH]                     = "ERR_LENGTH",
74         [RESPST_ERR_CQ_OVERFLOW]                = "ERR_CQ_OVERFLOW",
75         [RESPST_ERROR]                          = "ERROR",
76         [RESPST_RESET]                          = "RESET",
77         [RESPST_DONE]                           = "DONE",
78         [RESPST_EXIT]                           = "EXIT",
79 };
80
81 /* rxe_recv calls here to add a request packet to the input queue */
82 void rxe_resp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb)
83 {
84         int must_sched;
85         struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
86
87         skb_queue_tail(&qp->req_pkts, skb);
88
89         must_sched = (pkt->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST) ||
90                         (skb_queue_len(&qp->req_pkts) > 1);
91
92         rxe_run_task(&qp->resp.task, must_sched);
93 }
94
95 static inline enum resp_states get_req(struct rxe_qp *qp,
96                                        struct rxe_pkt_info **pkt_p)
97 {
98         struct sk_buff *skb;
99
100         if (qp->resp.state == QP_STATE_ERROR) {
101                 while ((skb = skb_dequeue(&qp->req_pkts))) {
102                         rxe_put(qp);
103                         kfree_skb(skb);
104                         ib_device_put(qp->ibqp.device);
105                 }
106
107                 /* go drain recv wr queue */
108                 return RESPST_CHK_RESOURCE;
109         }
110
111         skb = skb_peek(&qp->req_pkts);
112         if (!skb)
113                 return RESPST_EXIT;
114
115         *pkt_p = SKB_TO_PKT(skb);
116
117         return (qp->resp.res) ? RESPST_READ_REPLY : RESPST_CHK_PSN;
118 }
119
120 static enum resp_states check_psn(struct rxe_qp *qp,
121                                   struct rxe_pkt_info *pkt)
122 {
123         int diff = psn_compare(pkt->psn, qp->resp.psn);
124         struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
125
126         switch (qp_type(qp)) {
127         case IB_QPT_RC:
128                 if (diff > 0) {
129                         if (qp->resp.sent_psn_nak)
130                                 return RESPST_CLEANUP;
131
132                         qp->resp.sent_psn_nak = 1;
133                         rxe_counter_inc(rxe, RXE_CNT_OUT_OF_SEQ_REQ);
134                         return RESPST_ERR_PSN_OUT_OF_SEQ;
135
136                 } else if (diff < 0) {
137                         rxe_counter_inc(rxe, RXE_CNT_DUP_REQ);
138                         return RESPST_DUPLICATE_REQUEST;
139                 }
140
141                 if (qp->resp.sent_psn_nak)
142                         qp->resp.sent_psn_nak = 0;
143
144                 break;
145
146         case IB_QPT_UC:
147                 if (qp->resp.drop_msg || diff != 0) {
148                         if (pkt->mask & RXE_START_MASK) {
149                                 qp->resp.drop_msg = 0;
150                                 return RESPST_CHK_OP_SEQ;
151                         }
152
153                         qp->resp.drop_msg = 1;
154                         return RESPST_CLEANUP;
155                 }
156                 break;
157         default:
158                 break;
159         }
160
161         return RESPST_CHK_OP_SEQ;
162 }
163
164 static enum resp_states check_op_seq(struct rxe_qp *qp,
165                                      struct rxe_pkt_info *pkt)
166 {
167         switch (qp_type(qp)) {
168         case IB_QPT_RC:
169                 switch (qp->resp.opcode) {
170                 case IB_OPCODE_RC_SEND_FIRST:
171                 case IB_OPCODE_RC_SEND_MIDDLE:
172                         switch (pkt->opcode) {
173                         case IB_OPCODE_RC_SEND_MIDDLE:
174                         case IB_OPCODE_RC_SEND_LAST:
175                         case IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE:
176                         case IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE:
177                                 return RESPST_CHK_OP_VALID;
178                         default:
179                                 return RESPST_ERR_MISSING_OPCODE_LAST_C;
180                         }
181
182                 case IB_OPCODE_RC_RDMA_WRITE_FIRST:
183                 case IB_OPCODE_RC_RDMA_WRITE_MIDDLE:
184                         switch (pkt->opcode) {
185                         case IB_OPCODE_RC_RDMA_WRITE_MIDDLE:
186                         case IB_OPCODE_RC_RDMA_WRITE_LAST:
187                         case IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
188                                 return RESPST_CHK_OP_VALID;
189                         default:
190                                 return RESPST_ERR_MISSING_OPCODE_LAST_C;
191                         }
192
193                 default:
194                         switch (pkt->opcode) {
195                         case IB_OPCODE_RC_SEND_MIDDLE:
196                         case IB_OPCODE_RC_SEND_LAST:
197                         case IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE:
198                         case IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE:
199                         case IB_OPCODE_RC_RDMA_WRITE_MIDDLE:
200                         case IB_OPCODE_RC_RDMA_WRITE_LAST:
201                         case IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
202                                 return RESPST_ERR_MISSING_OPCODE_FIRST;
203                         default:
204                                 return RESPST_CHK_OP_VALID;
205                         }
206                 }
207                 break;
208
209         case IB_QPT_UC:
210                 switch (qp->resp.opcode) {
211                 case IB_OPCODE_UC_SEND_FIRST:
212                 case IB_OPCODE_UC_SEND_MIDDLE:
213                         switch (pkt->opcode) {
214                         case IB_OPCODE_UC_SEND_MIDDLE:
215                         case IB_OPCODE_UC_SEND_LAST:
216                         case IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE:
217                                 return RESPST_CHK_OP_VALID;
218                         default:
219                                 return RESPST_ERR_MISSING_OPCODE_LAST_D1E;
220                         }
221
222                 case IB_OPCODE_UC_RDMA_WRITE_FIRST:
223                 case IB_OPCODE_UC_RDMA_WRITE_MIDDLE:
224                         switch (pkt->opcode) {
225                         case IB_OPCODE_UC_RDMA_WRITE_MIDDLE:
226                         case IB_OPCODE_UC_RDMA_WRITE_LAST:
227                         case IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
228                                 return RESPST_CHK_OP_VALID;
229                         default:
230                                 return RESPST_ERR_MISSING_OPCODE_LAST_D1E;
231                         }
232
233                 default:
234                         switch (pkt->opcode) {
235                         case IB_OPCODE_UC_SEND_MIDDLE:
236                         case IB_OPCODE_UC_SEND_LAST:
237                         case IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE:
238                         case IB_OPCODE_UC_RDMA_WRITE_MIDDLE:
239                         case IB_OPCODE_UC_RDMA_WRITE_LAST:
240                         case IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
241                                 qp->resp.drop_msg = 1;
242                                 return RESPST_CLEANUP;
243                         default:
244                                 return RESPST_CHK_OP_VALID;
245                         }
246                 }
247                 break;
248
249         default:
250                 return RESPST_CHK_OP_VALID;
251         }
252 }
253
254 static enum resp_states check_op_valid(struct rxe_qp *qp,
255                                        struct rxe_pkt_info *pkt)
256 {
257         switch (qp_type(qp)) {
258         case IB_QPT_RC:
259                 if (((pkt->mask & RXE_READ_MASK) &&
260                      !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_READ)) ||
261                     ((pkt->mask & RXE_WRITE_MASK) &&
262                      !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_WRITE)) ||
263                     ((pkt->mask & RXE_ATOMIC_MASK) &&
264                      !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_ATOMIC))) {
265                         return RESPST_ERR_UNSUPPORTED_OPCODE;
266                 }
267
268                 break;
269
270         case IB_QPT_UC:
271                 if ((pkt->mask & RXE_WRITE_MASK) &&
272                     !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_WRITE)) {
273                         qp->resp.drop_msg = 1;
274                         return RESPST_CLEANUP;
275                 }
276
277                 break;
278
279         case IB_QPT_UD:
280         case IB_QPT_SMI:
281         case IB_QPT_GSI:
282                 break;
283
284         default:
285                 WARN_ON_ONCE(1);
286                 break;
287         }
288
289         return RESPST_CHK_RESOURCE;
290 }
291
292 static enum resp_states get_srq_wqe(struct rxe_qp *qp)
293 {
294         struct rxe_srq *srq = qp->srq;
295         struct rxe_queue *q = srq->rq.queue;
296         struct rxe_recv_wqe *wqe;
297         struct ib_event ev;
298         unsigned int count;
299         size_t size;
300         unsigned long flags;
301
302         if (srq->error)
303                 return RESPST_ERR_RNR;
304
305         spin_lock_irqsave(&srq->rq.consumer_lock, flags);
306
307         wqe = queue_head(q, QUEUE_TYPE_FROM_CLIENT);
308         if (!wqe) {
309                 spin_unlock_irqrestore(&srq->rq.consumer_lock, flags);
310                 return RESPST_ERR_RNR;
311         }
312
313         /* don't trust user space data */
314         if (unlikely(wqe->dma.num_sge > srq->rq.max_sge)) {
315                 spin_unlock_irqrestore(&srq->rq.consumer_lock, flags);
316                 pr_warn("%s: invalid num_sge in SRQ entry\n", __func__);
317                 return RESPST_ERR_MALFORMED_WQE;
318         }
319         size = sizeof(*wqe) + wqe->dma.num_sge*sizeof(struct rxe_sge);
320         memcpy(&qp->resp.srq_wqe, wqe, size);
321
322         qp->resp.wqe = &qp->resp.srq_wqe.wqe;
323         queue_advance_consumer(q, QUEUE_TYPE_FROM_CLIENT);
324         count = queue_count(q, QUEUE_TYPE_FROM_CLIENT);
325
326         if (srq->limit && srq->ibsrq.event_handler && (count < srq->limit)) {
327                 srq->limit = 0;
328                 goto event;
329         }
330
331         spin_unlock_irqrestore(&srq->rq.consumer_lock, flags);
332         return RESPST_CHK_LENGTH;
333
334 event:
335         spin_unlock_irqrestore(&srq->rq.consumer_lock, flags);
336         ev.device = qp->ibqp.device;
337         ev.element.srq = qp->ibqp.srq;
338         ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
339         srq->ibsrq.event_handler(&ev, srq->ibsrq.srq_context);
340         return RESPST_CHK_LENGTH;
341 }
342
343 static enum resp_states check_resource(struct rxe_qp *qp,
344                                        struct rxe_pkt_info *pkt)
345 {
346         struct rxe_srq *srq = qp->srq;
347
348         if (qp->resp.state == QP_STATE_ERROR) {
349                 if (qp->resp.wqe) {
350                         qp->resp.status = IB_WC_WR_FLUSH_ERR;
351                         return RESPST_COMPLETE;
352                 } else if (!srq) {
353                         qp->resp.wqe = queue_head(qp->rq.queue,
354                                         QUEUE_TYPE_FROM_CLIENT);
355                         if (qp->resp.wqe) {
356                                 qp->resp.status = IB_WC_WR_FLUSH_ERR;
357                                 return RESPST_COMPLETE;
358                         } else {
359                                 return RESPST_EXIT;
360                         }
361                 } else {
362                         return RESPST_EXIT;
363                 }
364         }
365
366         if (pkt->mask & RXE_READ_OR_ATOMIC_MASK) {
367                 /* it is the requesters job to not send
368                  * too many read/atomic ops, we just
369                  * recycle the responder resource queue
370                  */
371                 if (likely(qp->attr.max_dest_rd_atomic > 0))
372                         return RESPST_CHK_LENGTH;
373                 else
374                         return RESPST_ERR_TOO_MANY_RDMA_ATM_REQ;
375         }
376
377         if (pkt->mask & RXE_RWR_MASK) {
378                 if (srq)
379                         return get_srq_wqe(qp);
380
381                 qp->resp.wqe = queue_head(qp->rq.queue,
382                                 QUEUE_TYPE_FROM_CLIENT);
383                 return (qp->resp.wqe) ? RESPST_CHK_LENGTH : RESPST_ERR_RNR;
384         }
385
386         return RESPST_CHK_LENGTH;
387 }
388
389 static enum resp_states check_length(struct rxe_qp *qp,
390                                      struct rxe_pkt_info *pkt)
391 {
392         switch (qp_type(qp)) {
393         case IB_QPT_RC:
394                 return RESPST_CHK_RKEY;
395
396         case IB_QPT_UC:
397                 return RESPST_CHK_RKEY;
398
399         default:
400                 return RESPST_CHK_RKEY;
401         }
402 }
403
404 static enum resp_states check_rkey(struct rxe_qp *qp,
405                                    struct rxe_pkt_info *pkt)
406 {
407         struct rxe_mr *mr = NULL;
408         struct rxe_mw *mw = NULL;
409         u64 va;
410         u32 rkey;
411         u32 resid;
412         u32 pktlen;
413         int mtu = qp->mtu;
414         enum resp_states state;
415         int access;
416
417         if (pkt->mask & RXE_READ_OR_WRITE_MASK) {
418                 if (pkt->mask & RXE_RETH_MASK) {
419                         qp->resp.va = reth_va(pkt);
420                         qp->resp.offset = 0;
421                         qp->resp.rkey = reth_rkey(pkt);
422                         qp->resp.resid = reth_len(pkt);
423                         qp->resp.length = reth_len(pkt);
424                 }
425                 access = (pkt->mask & RXE_READ_MASK) ? IB_ACCESS_REMOTE_READ
426                                                      : IB_ACCESS_REMOTE_WRITE;
427         } else if (pkt->mask & RXE_ATOMIC_MASK) {
428                 qp->resp.va = atmeth_va(pkt);
429                 qp->resp.offset = 0;
430                 qp->resp.rkey = atmeth_rkey(pkt);
431                 qp->resp.resid = sizeof(u64);
432                 access = IB_ACCESS_REMOTE_ATOMIC;
433         } else {
434                 return RESPST_EXECUTE;
435         }
436
437         /* A zero-byte op is not required to set an addr or rkey. */
438         if ((pkt->mask & RXE_READ_OR_WRITE_MASK) &&
439             (pkt->mask & RXE_RETH_MASK) &&
440             reth_len(pkt) == 0) {
441                 return RESPST_EXECUTE;
442         }
443
444         va      = qp->resp.va;
445         rkey    = qp->resp.rkey;
446         resid   = qp->resp.resid;
447         pktlen  = payload_size(pkt);
448
449         if (rkey_is_mw(rkey)) {
450                 mw = rxe_lookup_mw(qp, access, rkey);
451                 if (!mw) {
452                         pr_err("%s: no MW matches rkey %#x\n", __func__, rkey);
453                         state = RESPST_ERR_RKEY_VIOLATION;
454                         goto err;
455                 }
456
457                 mr = mw->mr;
458                 if (!mr) {
459                         pr_err("%s: MW doesn't have an MR\n", __func__);
460                         state = RESPST_ERR_RKEY_VIOLATION;
461                         goto err;
462                 }
463
464                 if (mw->access & IB_ZERO_BASED)
465                         qp->resp.offset = mw->addr;
466
467                 rxe_put(mw);
468                 rxe_get(mr);
469         } else {
470                 mr = lookup_mr(qp->pd, access, rkey, RXE_LOOKUP_REMOTE);
471                 if (!mr) {
472                         pr_err("%s: no MR matches rkey %#x\n", __func__, rkey);
473                         state = RESPST_ERR_RKEY_VIOLATION;
474                         goto err;
475                 }
476         }
477
478         if (mr_check_range(mr, va + qp->resp.offset, resid)) {
479                 state = RESPST_ERR_RKEY_VIOLATION;
480                 goto err;
481         }
482
483         if (pkt->mask & RXE_WRITE_MASK)  {
484                 if (resid > mtu) {
485                         if (pktlen != mtu || bth_pad(pkt)) {
486                                 state = RESPST_ERR_LENGTH;
487                                 goto err;
488                         }
489                 } else {
490                         if (pktlen != resid) {
491                                 state = RESPST_ERR_LENGTH;
492                                 goto err;
493                         }
494                         if ((bth_pad(pkt) != (0x3 & (-resid)))) {
495                                 /* This case may not be exactly that
496                                  * but nothing else fits.
497                                  */
498                                 state = RESPST_ERR_LENGTH;
499                                 goto err;
500                         }
501                 }
502         }
503
504         WARN_ON_ONCE(qp->resp.mr);
505
506         qp->resp.mr = mr;
507         return RESPST_EXECUTE;
508
509 err:
510         if (mr)
511                 rxe_put(mr);
512         if (mw)
513                 rxe_put(mw);
514
515         return state;
516 }
517
518 static enum resp_states send_data_in(struct rxe_qp *qp, void *data_addr,
519                                      int data_len)
520 {
521         int err;
522
523         err = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE, &qp->resp.wqe->dma,
524                         data_addr, data_len, RXE_TO_MR_OBJ);
525         if (unlikely(err))
526                 return (err == -ENOSPC) ? RESPST_ERR_LENGTH
527                                         : RESPST_ERR_MALFORMED_WQE;
528
529         return RESPST_NONE;
530 }
531
532 static enum resp_states write_data_in(struct rxe_qp *qp,
533                                       struct rxe_pkt_info *pkt)
534 {
535         enum resp_states rc = RESPST_NONE;
536         int     err;
537         int data_len = payload_size(pkt);
538
539         err = rxe_mr_copy(qp->resp.mr, qp->resp.va + qp->resp.offset,
540                           payload_addr(pkt), data_len, RXE_TO_MR_OBJ);
541         if (err) {
542                 rc = RESPST_ERR_RKEY_VIOLATION;
543                 goto out;
544         }
545
546         qp->resp.va += data_len;
547         qp->resp.resid -= data_len;
548
549 out:
550         return rc;
551 }
552
553 /* Guarantee atomicity of atomic operations at the machine level. */
554 static DEFINE_SPINLOCK(atomic_ops_lock);
555
556 static enum resp_states process_atomic(struct rxe_qp *qp,
557                                        struct rxe_pkt_info *pkt)
558 {
559         u64 *vaddr;
560         enum resp_states ret;
561         struct rxe_mr *mr = qp->resp.mr;
562
563         if (mr->state != RXE_MR_STATE_VALID) {
564                 ret = RESPST_ERR_RKEY_VIOLATION;
565                 goto out;
566         }
567
568         vaddr = iova_to_vaddr(mr, qp->resp.va + qp->resp.offset, sizeof(u64));
569
570         /* check vaddr is 8 bytes aligned. */
571         if (!vaddr || (uintptr_t)vaddr & 7) {
572                 ret = RESPST_ERR_MISALIGNED_ATOMIC;
573                 goto out;
574         }
575
576         spin_lock_bh(&atomic_ops_lock);
577
578         qp->resp.atomic_orig = *vaddr;
579
580         if (pkt->opcode == IB_OPCODE_RC_COMPARE_SWAP ||
581             pkt->opcode == IB_OPCODE_RD_COMPARE_SWAP) {
582                 if (*vaddr == atmeth_comp(pkt))
583                         *vaddr = atmeth_swap_add(pkt);
584         } else {
585                 *vaddr += atmeth_swap_add(pkt);
586         }
587
588         spin_unlock_bh(&atomic_ops_lock);
589
590         ret = RESPST_NONE;
591 out:
592         return ret;
593 }
594
595 static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp,
596                                           struct rxe_pkt_info *pkt,
597                                           struct rxe_pkt_info *ack,
598                                           int opcode,
599                                           int payload,
600                                           u32 psn,
601                                           u8 syndrome)
602 {
603         struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
604         struct sk_buff *skb;
605         int paylen;
606         int pad;
607         int err;
608
609         /*
610          * allocate packet
611          */
612         pad = (-payload) & 0x3;
613         paylen = rxe_opcode[opcode].length + payload + pad + RXE_ICRC_SIZE;
614
615         skb = rxe_init_packet(rxe, &qp->pri_av, paylen, ack);
616         if (!skb)
617                 return NULL;
618
619         ack->qp = qp;
620         ack->opcode = opcode;
621         ack->mask = rxe_opcode[opcode].mask;
622         ack->paylen = paylen;
623         ack->psn = psn;
624
625         bth_init(ack, opcode, 0, 0, pad, IB_DEFAULT_PKEY_FULL,
626                  qp->attr.dest_qp_num, 0, psn);
627
628         if (ack->mask & RXE_AETH_MASK) {
629                 aeth_set_syn(ack, syndrome);
630                 aeth_set_msn(ack, qp->resp.msn);
631         }
632
633         if (ack->mask & RXE_ATMACK_MASK)
634                 atmack_set_orig(ack, qp->resp.atomic_orig);
635
636         err = rxe_prepare(&qp->pri_av, ack, skb);
637         if (err) {
638                 kfree_skb(skb);
639                 return NULL;
640         }
641
642         return skb;
643 }
644
645 static struct resp_res *rxe_prepare_read_res(struct rxe_qp *qp,
646                                         struct rxe_pkt_info *pkt)
647 {
648         struct resp_res *res;
649         u32 pkts;
650
651         res = &qp->resp.resources[qp->resp.res_head];
652         rxe_advance_resp_resource(qp);
653         free_rd_atomic_resource(qp, res);
654
655         res->type = RXE_READ_MASK;
656         res->replay = 0;
657         res->read.va = qp->resp.va + qp->resp.offset;
658         res->read.va_org = qp->resp.va + qp->resp.offset;
659         res->read.resid = qp->resp.resid;
660         res->read.length = qp->resp.resid;
661         res->read.rkey = qp->resp.rkey;
662
663         pkts = max_t(u32, (reth_len(pkt) + qp->mtu - 1)/qp->mtu, 1);
664         res->first_psn = pkt->psn;
665         res->cur_psn = pkt->psn;
666         res->last_psn = (pkt->psn + pkts - 1) & BTH_PSN_MASK;
667
668         res->state = rdatm_res_state_new;
669
670         return res;
671 }
672
673 /**
674  * rxe_recheck_mr - revalidate MR from rkey and get a reference
675  * @qp: the qp
676  * @rkey: the rkey
677  *
678  * This code allows the MR to be invalidated or deregistered or
679  * the MW if one was used to be invalidated or deallocated.
680  * It is assumed that the access permissions if originally good
681  * are OK and the mappings to be unchanged.
682  *
683  * TODO: If someone reregisters an MR to change its size or
684  * access permissions during the processing of an RDMA read
685  * we should kill the responder resource and complete the
686  * operation with an error.
687  *
688  * Return: mr on success else NULL
689  */
690 static struct rxe_mr *rxe_recheck_mr(struct rxe_qp *qp, u32 rkey)
691 {
692         struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
693         struct rxe_mr *mr;
694         struct rxe_mw *mw;
695
696         if (rkey_is_mw(rkey)) {
697                 mw = rxe_pool_get_index(&rxe->mw_pool, rkey >> 8);
698                 if (!mw)
699                         return NULL;
700
701                 mr = mw->mr;
702                 if (mw->rkey != rkey || mw->state != RXE_MW_STATE_VALID ||
703                     !mr || mr->state != RXE_MR_STATE_VALID) {
704                         rxe_put(mw);
705                         return NULL;
706                 }
707
708                 rxe_get(mr);
709                 rxe_put(mw);
710
711                 return mr;
712         }
713
714         mr = rxe_pool_get_index(&rxe->mr_pool, rkey >> 8);
715         if (!mr)
716                 return NULL;
717
718         if (mr->rkey != rkey || mr->state != RXE_MR_STATE_VALID) {
719                 rxe_put(mr);
720                 return NULL;
721         }
722
723         return mr;
724 }
725
726 /* RDMA read response. If res is not NULL, then we have a current RDMA request
727  * being processed or replayed.
728  */
729 static enum resp_states read_reply(struct rxe_qp *qp,
730                                    struct rxe_pkt_info *req_pkt)
731 {
732         struct rxe_pkt_info ack_pkt;
733         struct sk_buff *skb;
734         int mtu = qp->mtu;
735         enum resp_states state;
736         int payload;
737         int opcode;
738         int err;
739         struct resp_res *res = qp->resp.res;
740         struct rxe_mr *mr;
741
742         if (!res) {
743                 res = rxe_prepare_read_res(qp, req_pkt);
744                 qp->resp.res = res;
745         }
746
747         if (res->state == rdatm_res_state_new) {
748                 if (!res->replay) {
749                         mr = qp->resp.mr;
750                         qp->resp.mr = NULL;
751                 } else {
752                         mr = rxe_recheck_mr(qp, res->read.rkey);
753                         if (!mr)
754                                 return RESPST_ERR_RKEY_VIOLATION;
755                 }
756
757                 if (res->read.resid <= mtu)
758                         opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY;
759                 else
760                         opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST;
761         } else {
762                 mr = rxe_recheck_mr(qp, res->read.rkey);
763                 if (!mr)
764                         return RESPST_ERR_RKEY_VIOLATION;
765
766                 if (res->read.resid > mtu)
767                         opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE;
768                 else
769                         opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST;
770         }
771
772         res->state = rdatm_res_state_next;
773
774         payload = min_t(int, res->read.resid, mtu);
775
776         skb = prepare_ack_packet(qp, req_pkt, &ack_pkt, opcode, payload,
777                                  res->cur_psn, AETH_ACK_UNLIMITED);
778         if (!skb)
779                 return RESPST_ERR_RNR;
780
781         err = rxe_mr_copy(mr, res->read.va, payload_addr(&ack_pkt),
782                           payload, RXE_FROM_MR_OBJ);
783         if (err)
784                 pr_err("Failed copying memory\n");
785         if (mr)
786                 rxe_put(mr);
787
788         if (bth_pad(&ack_pkt)) {
789                 u8 *pad = payload_addr(&ack_pkt) + payload;
790
791                 memset(pad, 0, bth_pad(&ack_pkt));
792         }
793
794         err = rxe_xmit_packet(qp, &ack_pkt, skb);
795         if (err) {
796                 pr_err("Failed sending RDMA reply.\n");
797                 return RESPST_ERR_RNR;
798         }
799
800         res->read.va += payload;
801         res->read.resid -= payload;
802         res->cur_psn = (res->cur_psn + 1) & BTH_PSN_MASK;
803
804         if (res->read.resid > 0) {
805                 state = RESPST_DONE;
806         } else {
807                 qp->resp.res = NULL;
808                 if (!res->replay)
809                         qp->resp.opcode = -1;
810                 if (psn_compare(res->cur_psn, qp->resp.psn) >= 0)
811                         qp->resp.psn = res->cur_psn;
812                 state = RESPST_CLEANUP;
813         }
814
815         return state;
816 }
817
818 static int invalidate_rkey(struct rxe_qp *qp, u32 rkey)
819 {
820         if (rkey_is_mw(rkey))
821                 return rxe_invalidate_mw(qp, rkey);
822         else
823                 return rxe_invalidate_mr(qp, rkey);
824 }
825
826 /* Executes a new request. A retried request never reach that function (send
827  * and writes are discarded, and reads and atomics are retried elsewhere.
828  */
829 static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
830 {
831         enum resp_states err;
832         struct sk_buff *skb = PKT_TO_SKB(pkt);
833         union rdma_network_hdr hdr;
834
835         if (pkt->mask & RXE_SEND_MASK) {
836                 if (qp_type(qp) == IB_QPT_UD ||
837                     qp_type(qp) == IB_QPT_SMI ||
838                     qp_type(qp) == IB_QPT_GSI) {
839                         if (skb->protocol == htons(ETH_P_IP)) {
840                                 memset(&hdr.reserved, 0,
841                                                 sizeof(hdr.reserved));
842                                 memcpy(&hdr.roce4grh, ip_hdr(skb),
843                                                 sizeof(hdr.roce4grh));
844                                 err = send_data_in(qp, &hdr, sizeof(hdr));
845                         } else {
846                                 err = send_data_in(qp, ipv6_hdr(skb),
847                                                 sizeof(hdr));
848                         }
849                         if (err)
850                                 return err;
851                 }
852                 err = send_data_in(qp, payload_addr(pkt), payload_size(pkt));
853                 if (err)
854                         return err;
855         } else if (pkt->mask & RXE_WRITE_MASK) {
856                 err = write_data_in(qp, pkt);
857                 if (err)
858                         return err;
859         } else if (pkt->mask & RXE_READ_MASK) {
860                 /* For RDMA Read we can increment the msn now. See C9-148. */
861                 qp->resp.msn++;
862                 return RESPST_READ_REPLY;
863         } else if (pkt->mask & RXE_ATOMIC_MASK) {
864                 err = process_atomic(qp, pkt);
865                 if (err)
866                         return err;
867         } else {
868                 /* Unreachable */
869                 WARN_ON_ONCE(1);
870         }
871
872         if (pkt->mask & RXE_IETH_MASK) {
873                 u32 rkey = ieth_rkey(pkt);
874
875                 err = invalidate_rkey(qp, rkey);
876                 if (err)
877                         return RESPST_ERR_INVALIDATE_RKEY;
878         }
879
880         if (pkt->mask & RXE_END_MASK)
881                 /* We successfully processed this new request. */
882                 qp->resp.msn++;
883
884         /* next expected psn, read handles this separately */
885         qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
886         qp->resp.ack_psn = qp->resp.psn;
887
888         qp->resp.opcode = pkt->opcode;
889         qp->resp.status = IB_WC_SUCCESS;
890
891         if (pkt->mask & RXE_COMP_MASK)
892                 return RESPST_COMPLETE;
893         else if (qp_type(qp) == IB_QPT_RC)
894                 return RESPST_ACKNOWLEDGE;
895         else
896                 return RESPST_CLEANUP;
897 }
898
899 static enum resp_states do_complete(struct rxe_qp *qp,
900                                     struct rxe_pkt_info *pkt)
901 {
902         struct rxe_cqe cqe;
903         struct ib_wc *wc = &cqe.ibwc;
904         struct ib_uverbs_wc *uwc = &cqe.uibwc;
905         struct rxe_recv_wqe *wqe = qp->resp.wqe;
906         struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
907
908         if (!wqe)
909                 goto finish;
910
911         memset(&cqe, 0, sizeof(cqe));
912
913         if (qp->rcq->is_user) {
914                 uwc->status             = qp->resp.status;
915                 uwc->qp_num             = qp->ibqp.qp_num;
916                 uwc->wr_id              = wqe->wr_id;
917         } else {
918                 wc->status              = qp->resp.status;
919                 wc->qp                  = &qp->ibqp;
920                 wc->wr_id               = wqe->wr_id;
921         }
922
923         if (wc->status == IB_WC_SUCCESS) {
924                 rxe_counter_inc(rxe, RXE_CNT_RDMA_RECV);
925                 wc->opcode = (pkt->mask & RXE_IMMDT_MASK &&
926                                 pkt->mask & RXE_WRITE_MASK) ?
927                                         IB_WC_RECV_RDMA_WITH_IMM : IB_WC_RECV;
928                 wc->byte_len = (pkt->mask & RXE_IMMDT_MASK &&
929                                 pkt->mask & RXE_WRITE_MASK) ?
930                                         qp->resp.length : wqe->dma.length - wqe->dma.resid;
931
932                 /* fields after byte_len are different between kernel and user
933                  * space
934                  */
935                 if (qp->rcq->is_user) {
936                         uwc->wc_flags = IB_WC_GRH;
937
938                         if (pkt->mask & RXE_IMMDT_MASK) {
939                                 uwc->wc_flags |= IB_WC_WITH_IMM;
940                                 uwc->ex.imm_data = immdt_imm(pkt);
941                         }
942
943                         if (pkt->mask & RXE_IETH_MASK) {
944                                 uwc->wc_flags |= IB_WC_WITH_INVALIDATE;
945                                 uwc->ex.invalidate_rkey = ieth_rkey(pkt);
946                         }
947
948                         if (pkt->mask & RXE_DETH_MASK)
949                                 uwc->src_qp = deth_sqp(pkt);
950
951                         uwc->port_num           = qp->attr.port_num;
952                 } else {
953                         struct sk_buff *skb = PKT_TO_SKB(pkt);
954
955                         wc->wc_flags = IB_WC_GRH | IB_WC_WITH_NETWORK_HDR_TYPE;
956                         if (skb->protocol == htons(ETH_P_IP))
957                                 wc->network_hdr_type = RDMA_NETWORK_IPV4;
958                         else
959                                 wc->network_hdr_type = RDMA_NETWORK_IPV6;
960
961                         if (is_vlan_dev(skb->dev)) {
962                                 wc->wc_flags |= IB_WC_WITH_VLAN;
963                                 wc->vlan_id = vlan_dev_vlan_id(skb->dev);
964                         }
965
966                         if (pkt->mask & RXE_IMMDT_MASK) {
967                                 wc->wc_flags |= IB_WC_WITH_IMM;
968                                 wc->ex.imm_data = immdt_imm(pkt);
969                         }
970
971                         if (pkt->mask & RXE_IETH_MASK) {
972                                 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
973                                 wc->ex.invalidate_rkey = ieth_rkey(pkt);
974                         }
975
976                         if (pkt->mask & RXE_DETH_MASK)
977                                 wc->src_qp = deth_sqp(pkt);
978
979                         wc->port_num            = qp->attr.port_num;
980                 }
981         }
982
983         /* have copy for srq and reference for !srq */
984         if (!qp->srq)
985                 queue_advance_consumer(qp->rq.queue, QUEUE_TYPE_FROM_CLIENT);
986
987         qp->resp.wqe = NULL;
988
989         if (rxe_cq_post(qp->rcq, &cqe, pkt ? bth_se(pkt) : 1))
990                 return RESPST_ERR_CQ_OVERFLOW;
991
992 finish:
993         if (unlikely(qp->resp.state == QP_STATE_ERROR))
994                 return RESPST_CHK_RESOURCE;
995         if (unlikely(!pkt))
996                 return RESPST_DONE;
997         if (qp_type(qp) == IB_QPT_RC)
998                 return RESPST_ACKNOWLEDGE;
999         else
1000                 return RESPST_CLEANUP;
1001 }
1002
1003 static int send_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
1004                     u8 syndrome, u32 psn)
1005 {
1006         int err = 0;
1007         struct rxe_pkt_info ack_pkt;
1008         struct sk_buff *skb;
1009
1010         skb = prepare_ack_packet(qp, pkt, &ack_pkt, IB_OPCODE_RC_ACKNOWLEDGE,
1011                                  0, psn, syndrome);
1012         if (!skb) {
1013                 err = -ENOMEM;
1014                 goto err1;
1015         }
1016
1017         err = rxe_xmit_packet(qp, &ack_pkt, skb);
1018         if (err)
1019                 pr_err_ratelimited("Failed sending ack\n");
1020
1021 err1:
1022         return err;
1023 }
1024
1025 static int send_atomic_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
1026                            u8 syndrome)
1027 {
1028         int rc = 0;
1029         struct rxe_pkt_info ack_pkt;
1030         struct sk_buff *skb;
1031         struct resp_res *res;
1032
1033         skb = prepare_ack_packet(qp, pkt, &ack_pkt,
1034                                  IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE, 0, pkt->psn,
1035                                  syndrome);
1036         if (!skb) {
1037                 rc = -ENOMEM;
1038                 goto out;
1039         }
1040
1041         res = &qp->resp.resources[qp->resp.res_head];
1042         free_rd_atomic_resource(qp, res);
1043         rxe_advance_resp_resource(qp);
1044
1045         skb_get(skb);
1046         res->type = RXE_ATOMIC_MASK;
1047         res->atomic.skb = skb;
1048         res->first_psn = ack_pkt.psn;
1049         res->last_psn  = ack_pkt.psn;
1050         res->cur_psn   = ack_pkt.psn;
1051
1052         rc = rxe_xmit_packet(qp, &ack_pkt, skb);
1053         if (rc) {
1054                 pr_err_ratelimited("Failed sending ack\n");
1055                 rxe_put(qp);
1056         }
1057 out:
1058         return rc;
1059 }
1060
1061 static enum resp_states acknowledge(struct rxe_qp *qp,
1062                                     struct rxe_pkt_info *pkt)
1063 {
1064         if (qp_type(qp) != IB_QPT_RC)
1065                 return RESPST_CLEANUP;
1066
1067         if (qp->resp.aeth_syndrome != AETH_ACK_UNLIMITED)
1068                 send_ack(qp, pkt, qp->resp.aeth_syndrome, pkt->psn);
1069         else if (pkt->mask & RXE_ATOMIC_MASK)
1070                 send_atomic_ack(qp, pkt, AETH_ACK_UNLIMITED);
1071         else if (bth_ack(pkt))
1072                 send_ack(qp, pkt, AETH_ACK_UNLIMITED, pkt->psn);
1073
1074         return RESPST_CLEANUP;
1075 }
1076
1077 static enum resp_states cleanup(struct rxe_qp *qp,
1078                                 struct rxe_pkt_info *pkt)
1079 {
1080         struct sk_buff *skb;
1081
1082         if (pkt) {
1083                 skb = skb_dequeue(&qp->req_pkts);
1084                 rxe_put(qp);
1085                 kfree_skb(skb);
1086                 ib_device_put(qp->ibqp.device);
1087         }
1088
1089         if (qp->resp.mr) {
1090                 rxe_put(qp->resp.mr);
1091                 qp->resp.mr = NULL;
1092         }
1093
1094         return RESPST_DONE;
1095 }
1096
1097 static struct resp_res *find_resource(struct rxe_qp *qp, u32 psn)
1098 {
1099         int i;
1100
1101         for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
1102                 struct resp_res *res = &qp->resp.resources[i];
1103
1104                 if (res->type == 0)
1105                         continue;
1106
1107                 if (psn_compare(psn, res->first_psn) >= 0 &&
1108                     psn_compare(psn, res->last_psn) <= 0) {
1109                         return res;
1110                 }
1111         }
1112
1113         return NULL;
1114 }
1115
1116 static enum resp_states duplicate_request(struct rxe_qp *qp,
1117                                           struct rxe_pkt_info *pkt)
1118 {
1119         enum resp_states rc;
1120         u32 prev_psn = (qp->resp.ack_psn - 1) & BTH_PSN_MASK;
1121
1122         if (pkt->mask & RXE_SEND_MASK ||
1123             pkt->mask & RXE_WRITE_MASK) {
1124                 /* SEND. Ack again and cleanup. C9-105. */
1125                 send_ack(qp, pkt, AETH_ACK_UNLIMITED, prev_psn);
1126                 return RESPST_CLEANUP;
1127         } else if (pkt->mask & RXE_READ_MASK) {
1128                 struct resp_res *res;
1129
1130                 res = find_resource(qp, pkt->psn);
1131                 if (!res) {
1132                         /* Resource not found. Class D error.  Drop the
1133                          * request.
1134                          */
1135                         rc = RESPST_CLEANUP;
1136                         goto out;
1137                 } else {
1138                         /* Ensure this new request is the same as the previous
1139                          * one or a subset of it.
1140                          */
1141                         u64 iova = reth_va(pkt);
1142                         u32 resid = reth_len(pkt);
1143
1144                         if (iova < res->read.va_org ||
1145                             resid > res->read.length ||
1146                             (iova + resid) > (res->read.va_org +
1147                                               res->read.length)) {
1148                                 rc = RESPST_CLEANUP;
1149                                 goto out;
1150                         }
1151
1152                         if (reth_rkey(pkt) != res->read.rkey) {
1153                                 rc = RESPST_CLEANUP;
1154                                 goto out;
1155                         }
1156
1157                         res->cur_psn = pkt->psn;
1158                         res->state = (pkt->psn == res->first_psn) ?
1159                                         rdatm_res_state_new :
1160                                         rdatm_res_state_replay;
1161                         res->replay = 1;
1162
1163                         /* Reset the resource, except length. */
1164                         res->read.va_org = iova;
1165                         res->read.va = iova;
1166                         res->read.resid = resid;
1167
1168                         /* Replay the RDMA read reply. */
1169                         qp->resp.res = res;
1170                         rc = RESPST_READ_REPLY;
1171                         goto out;
1172                 }
1173         } else {
1174                 struct resp_res *res;
1175
1176                 /* Find the operation in our list of responder resources. */
1177                 res = find_resource(qp, pkt->psn);
1178                 if (res) {
1179                         skb_get(res->atomic.skb);
1180                         /* Resend the result. */
1181                         rc = rxe_xmit_packet(qp, pkt, res->atomic.skb);
1182                         if (rc) {
1183                                 pr_err("Failed resending result. This flow is not handled - skb ignored\n");
1184                                 rc = RESPST_CLEANUP;
1185                                 goto out;
1186                         }
1187                 }
1188
1189                 /* Resource not found. Class D error. Drop the request. */
1190                 rc = RESPST_CLEANUP;
1191                 goto out;
1192         }
1193 out:
1194         return rc;
1195 }
1196
1197 /* Process a class A or C. Both are treated the same in this implementation. */
1198 static void do_class_ac_error(struct rxe_qp *qp, u8 syndrome,
1199                               enum ib_wc_status status)
1200 {
1201         qp->resp.aeth_syndrome  = syndrome;
1202         qp->resp.status         = status;
1203
1204         /* indicate that we should go through the ERROR state */
1205         qp->resp.goto_error     = 1;
1206 }
1207
1208 static enum resp_states do_class_d1e_error(struct rxe_qp *qp)
1209 {
1210         /* UC */
1211         if (qp->srq) {
1212                 /* Class E */
1213                 qp->resp.drop_msg = 1;
1214                 if (qp->resp.wqe) {
1215                         qp->resp.status = IB_WC_REM_INV_REQ_ERR;
1216                         return RESPST_COMPLETE;
1217                 } else {
1218                         return RESPST_CLEANUP;
1219                 }
1220         } else {
1221                 /* Class D1. This packet may be the start of a
1222                  * new message and could be valid. The previous
1223                  * message is invalid and ignored. reset the
1224                  * recv wr to its original state
1225                  */
1226                 if (qp->resp.wqe) {
1227                         qp->resp.wqe->dma.resid = qp->resp.wqe->dma.length;
1228                         qp->resp.wqe->dma.cur_sge = 0;
1229                         qp->resp.wqe->dma.sge_offset = 0;
1230                         qp->resp.opcode = -1;
1231                 }
1232
1233                 if (qp->resp.mr) {
1234                         rxe_put(qp->resp.mr);
1235                         qp->resp.mr = NULL;
1236                 }
1237
1238                 return RESPST_CLEANUP;
1239         }
1240 }
1241
1242 static void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify)
1243 {
1244         struct sk_buff *skb;
1245         struct rxe_queue *q = qp->rq.queue;
1246
1247         while ((skb = skb_dequeue(&qp->req_pkts))) {
1248                 rxe_put(qp);
1249                 kfree_skb(skb);
1250                 ib_device_put(qp->ibqp.device);
1251         }
1252
1253         if (notify)
1254                 return;
1255
1256         while (!qp->srq && q && queue_head(q, q->type))
1257                 queue_advance_consumer(q, q->type);
1258 }
1259
1260 int rxe_responder(void *arg)
1261 {
1262         struct rxe_qp *qp = (struct rxe_qp *)arg;
1263         struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
1264         enum resp_states state;
1265         struct rxe_pkt_info *pkt = NULL;
1266         int ret = 0;
1267
1268         rxe_get(qp);
1269
1270         qp->resp.aeth_syndrome = AETH_ACK_UNLIMITED;
1271
1272         if (!qp->valid) {
1273                 ret = -EINVAL;
1274                 goto done;
1275         }
1276
1277         switch (qp->resp.state) {
1278         case QP_STATE_RESET:
1279                 state = RESPST_RESET;
1280                 break;
1281
1282         default:
1283                 state = RESPST_GET_REQ;
1284                 break;
1285         }
1286
1287         while (1) {
1288                 pr_debug("qp#%d state = %s\n", qp_num(qp),
1289                          resp_state_name[state]);
1290                 switch (state) {
1291                 case RESPST_GET_REQ:
1292                         state = get_req(qp, &pkt);
1293                         break;
1294                 case RESPST_CHK_PSN:
1295                         state = check_psn(qp, pkt);
1296                         break;
1297                 case RESPST_CHK_OP_SEQ:
1298                         state = check_op_seq(qp, pkt);
1299                         break;
1300                 case RESPST_CHK_OP_VALID:
1301                         state = check_op_valid(qp, pkt);
1302                         break;
1303                 case RESPST_CHK_RESOURCE:
1304                         state = check_resource(qp, pkt);
1305                         break;
1306                 case RESPST_CHK_LENGTH:
1307                         state = check_length(qp, pkt);
1308                         break;
1309                 case RESPST_CHK_RKEY:
1310                         state = check_rkey(qp, pkt);
1311                         break;
1312                 case RESPST_EXECUTE:
1313                         state = execute(qp, pkt);
1314                         break;
1315                 case RESPST_COMPLETE:
1316                         state = do_complete(qp, pkt);
1317                         break;
1318                 case RESPST_READ_REPLY:
1319                         state = read_reply(qp, pkt);
1320                         break;
1321                 case RESPST_ACKNOWLEDGE:
1322                         state = acknowledge(qp, pkt);
1323                         break;
1324                 case RESPST_CLEANUP:
1325                         state = cleanup(qp, pkt);
1326                         break;
1327                 case RESPST_DUPLICATE_REQUEST:
1328                         state = duplicate_request(qp, pkt);
1329                         break;
1330                 case RESPST_ERR_PSN_OUT_OF_SEQ:
1331                         /* RC only - Class B. Drop packet. */
1332                         send_ack(qp, pkt, AETH_NAK_PSN_SEQ_ERROR, qp->resp.psn);
1333                         state = RESPST_CLEANUP;
1334                         break;
1335
1336                 case RESPST_ERR_TOO_MANY_RDMA_ATM_REQ:
1337                 case RESPST_ERR_MISSING_OPCODE_FIRST:
1338                 case RESPST_ERR_MISSING_OPCODE_LAST_C:
1339                 case RESPST_ERR_UNSUPPORTED_OPCODE:
1340                 case RESPST_ERR_MISALIGNED_ATOMIC:
1341                         /* RC Only - Class C. */
1342                         do_class_ac_error(qp, AETH_NAK_INVALID_REQ,
1343                                           IB_WC_REM_INV_REQ_ERR);
1344                         state = RESPST_COMPLETE;
1345                         break;
1346
1347                 case RESPST_ERR_MISSING_OPCODE_LAST_D1E:
1348                         state = do_class_d1e_error(qp);
1349                         break;
1350                 case RESPST_ERR_RNR:
1351                         if (qp_type(qp) == IB_QPT_RC) {
1352                                 rxe_counter_inc(rxe, RXE_CNT_SND_RNR);
1353                                 /* RC - class B */
1354                                 send_ack(qp, pkt, AETH_RNR_NAK |
1355                                          (~AETH_TYPE_MASK &
1356                                          qp->attr.min_rnr_timer),
1357                                          pkt->psn);
1358                         } else {
1359                                 /* UD/UC - class D */
1360                                 qp->resp.drop_msg = 1;
1361                         }
1362                         state = RESPST_CLEANUP;
1363                         break;
1364
1365                 case RESPST_ERR_RKEY_VIOLATION:
1366                         if (qp_type(qp) == IB_QPT_RC) {
1367                                 /* Class C */
1368                                 do_class_ac_error(qp, AETH_NAK_REM_ACC_ERR,
1369                                                   IB_WC_REM_ACCESS_ERR);
1370                                 state = RESPST_COMPLETE;
1371                         } else {
1372                                 qp->resp.drop_msg = 1;
1373                                 if (qp->srq) {
1374                                         /* UC/SRQ Class D */
1375                                         qp->resp.status = IB_WC_REM_ACCESS_ERR;
1376                                         state = RESPST_COMPLETE;
1377                                 } else {
1378                                         /* UC/non-SRQ Class E. */
1379                                         state = RESPST_CLEANUP;
1380                                 }
1381                         }
1382                         break;
1383
1384                 case RESPST_ERR_INVALIDATE_RKEY:
1385                         /* RC - Class J. */
1386                         qp->resp.goto_error = 1;
1387                         qp->resp.status = IB_WC_REM_INV_REQ_ERR;
1388                         state = RESPST_COMPLETE;
1389                         break;
1390
1391                 case RESPST_ERR_LENGTH:
1392                         if (qp_type(qp) == IB_QPT_RC) {
1393                                 /* Class C */
1394                                 do_class_ac_error(qp, AETH_NAK_INVALID_REQ,
1395                                                   IB_WC_REM_INV_REQ_ERR);
1396                                 state = RESPST_COMPLETE;
1397                         } else if (qp->srq) {
1398                                 /* UC/UD - class E */
1399                                 qp->resp.status = IB_WC_REM_INV_REQ_ERR;
1400                                 state = RESPST_COMPLETE;
1401                         } else {
1402                                 /* UC/UD - class D */
1403                                 qp->resp.drop_msg = 1;
1404                                 state = RESPST_CLEANUP;
1405                         }
1406                         break;
1407
1408                 case RESPST_ERR_MALFORMED_WQE:
1409                         /* All, Class A. */
1410                         do_class_ac_error(qp, AETH_NAK_REM_OP_ERR,
1411                                           IB_WC_LOC_QP_OP_ERR);
1412                         state = RESPST_COMPLETE;
1413                         break;
1414
1415                 case RESPST_ERR_CQ_OVERFLOW:
1416                         /* All - Class G */
1417                         state = RESPST_ERROR;
1418                         break;
1419
1420                 case RESPST_DONE:
1421                         if (qp->resp.goto_error) {
1422                                 state = RESPST_ERROR;
1423                                 break;
1424                         }
1425
1426                         goto done;
1427
1428                 case RESPST_EXIT:
1429                         if (qp->resp.goto_error) {
1430                                 state = RESPST_ERROR;
1431                                 break;
1432                         }
1433
1434                         goto exit;
1435
1436                 case RESPST_RESET:
1437                         rxe_drain_req_pkts(qp, false);
1438                         qp->resp.wqe = NULL;
1439                         goto exit;
1440
1441                 case RESPST_ERROR:
1442                         qp->resp.goto_error = 0;
1443                         pr_warn("qp#%d moved to error state\n", qp_num(qp));
1444                         rxe_qp_error(qp);
1445                         goto exit;
1446
1447                 default:
1448                         WARN_ON_ONCE(1);
1449                 }
1450         }
1451
1452 exit:
1453         ret = -EAGAIN;
1454 done:
1455         rxe_put(qp);
1456         return ret;
1457 }