ACPI: PM: s2idle: Always set up EC GPE for system wakeup
[linux-2.6-microblaze.git] / drivers / infiniband / hw / hfi1 / tid_rdma.c
1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
2 /*
3  * Copyright(c) 2018 Intel Corporation.
4  *
5  */
6
7 #include "hfi.h"
8 #include "qp.h"
9 #include "rc.h"
10 #include "verbs.h"
11 #include "tid_rdma.h"
12 #include "exp_rcv.h"
13 #include "trace.h"
14
15 /**
16  * DOC: TID RDMA READ protocol
17  *
18  * This is an end-to-end protocol at the hfi1 level between two nodes that
19  * improves performance by avoiding data copy on the requester side. It
20  * converts a qualified RDMA READ request into a TID RDMA READ request on
21  * the requester side and thereafter handles the request and response
22  * differently. To be qualified, the RDMA READ request should meet the
23  * following:
24  * -- The total data length should be greater than 256K;
25  * -- The total data length should be a multiple of 4K page size;
26  * -- Each local scatter-gather entry should be 4K page aligned;
27  * -- Each local scatter-gather entry should be a multiple of 4K page size;
28  */
29
30 #define RCV_TID_FLOW_TABLE_CTRL_FLOW_VALID_SMASK BIT_ULL(32)
31 #define RCV_TID_FLOW_TABLE_CTRL_HDR_SUPP_EN_SMASK BIT_ULL(33)
32 #define RCV_TID_FLOW_TABLE_CTRL_KEEP_AFTER_SEQ_ERR_SMASK BIT_ULL(34)
33 #define RCV_TID_FLOW_TABLE_CTRL_KEEP_ON_GEN_ERR_SMASK BIT_ULL(35)
34 #define RCV_TID_FLOW_TABLE_STATUS_SEQ_MISMATCH_SMASK BIT_ULL(37)
35 #define RCV_TID_FLOW_TABLE_STATUS_GEN_MISMATCH_SMASK BIT_ULL(38)
36
37 /* Maximum number of packets within a flow generation. */
38 #define MAX_TID_FLOW_PSN BIT(HFI1_KDETH_BTH_SEQ_SHIFT)
39
40 #define GENERATION_MASK 0xFFFFF
41
42 static u32 mask_generation(u32 a)
43 {
44         return a & GENERATION_MASK;
45 }
46
47 /* Reserved generation value to set to unused flows for kernel contexts */
48 #define KERN_GENERATION_RESERVED mask_generation(U32_MAX)
49
50 /*
51  * J_KEY for kernel contexts when TID RDMA is used.
52  * See generate_jkey() in hfi.h for more information.
53  */
54 #define TID_RDMA_JKEY                   32
55 #define HFI1_KERNEL_MIN_JKEY HFI1_ADMIN_JKEY_RANGE
56 #define HFI1_KERNEL_MAX_JKEY (2 * HFI1_ADMIN_JKEY_RANGE - 1)
57
58 /* Maximum number of segments in flight per QP request. */
59 #define TID_RDMA_MAX_READ_SEGS_PER_REQ  6
60 #define TID_RDMA_MAX_WRITE_SEGS_PER_REQ 4
61 #define MAX_REQ max_t(u16, TID_RDMA_MAX_READ_SEGS_PER_REQ, \
62                         TID_RDMA_MAX_WRITE_SEGS_PER_REQ)
63 #define MAX_FLOWS roundup_pow_of_two(MAX_REQ + 1)
64
65 #define MAX_EXPECTED_PAGES     (MAX_EXPECTED_BUFFER / PAGE_SIZE)
66
67 #define TID_RDMA_DESTQP_FLOW_SHIFT      11
68 #define TID_RDMA_DESTQP_FLOW_MASK       0x1f
69
70 #define TID_OPFN_QP_CTXT_MASK 0xff
71 #define TID_OPFN_QP_CTXT_SHIFT 56
72 #define TID_OPFN_QP_KDETH_MASK 0xff
73 #define TID_OPFN_QP_KDETH_SHIFT 48
74 #define TID_OPFN_MAX_LEN_MASK 0x7ff
75 #define TID_OPFN_MAX_LEN_SHIFT 37
76 #define TID_OPFN_TIMEOUT_MASK 0x1f
77 #define TID_OPFN_TIMEOUT_SHIFT 32
78 #define TID_OPFN_RESERVED_MASK 0x3f
79 #define TID_OPFN_RESERVED_SHIFT 26
80 #define TID_OPFN_URG_MASK 0x1
81 #define TID_OPFN_URG_SHIFT 25
82 #define TID_OPFN_VER_MASK 0x7
83 #define TID_OPFN_VER_SHIFT 22
84 #define TID_OPFN_JKEY_MASK 0x3f
85 #define TID_OPFN_JKEY_SHIFT 16
86 #define TID_OPFN_MAX_READ_MASK 0x3f
87 #define TID_OPFN_MAX_READ_SHIFT 10
88 #define TID_OPFN_MAX_WRITE_MASK 0x3f
89 #define TID_OPFN_MAX_WRITE_SHIFT 4
90
91 /*
92  * OPFN TID layout
93  *
94  * 63               47               31               15
95  * NNNNNNNNKKKKKKKK MMMMMMMMMMMTTTTT DDDDDDUVVVJJJJJJ RRRRRRWWWWWWCCCC
96  * 3210987654321098 7654321098765432 1098765432109876 5432109876543210
97  * N - the context Number
98  * K - the Kdeth_qp
99  * M - Max_len
100  * T - Timeout
101  * D - reserveD
102  * V - version
103  * U - Urg capable
104  * J - Jkey
105  * R - max_Read
106  * W - max_Write
107  * C - Capcode
108  */
109
110 static u32 tid_rdma_flow_wt;
111
112 static void tid_rdma_trigger_resume(struct work_struct *work);
113 static void hfi1_kern_exp_rcv_free_flows(struct tid_rdma_request *req);
114 static int hfi1_kern_exp_rcv_alloc_flows(struct tid_rdma_request *req,
115                                          gfp_t gfp);
116 static void hfi1_init_trdma_req(struct rvt_qp *qp,
117                                 struct tid_rdma_request *req);
118 static void hfi1_tid_write_alloc_resources(struct rvt_qp *qp, bool intr_ctx);
119 static void hfi1_tid_timeout(struct timer_list *t);
120 static void hfi1_add_tid_reap_timer(struct rvt_qp *qp);
121 static void hfi1_mod_tid_reap_timer(struct rvt_qp *qp);
122 static void hfi1_mod_tid_retry_timer(struct rvt_qp *qp);
123 static int hfi1_stop_tid_retry_timer(struct rvt_qp *qp);
124 static void hfi1_tid_retry_timeout(struct timer_list *t);
125 static int make_tid_rdma_ack(struct rvt_qp *qp,
126                              struct ib_other_headers *ohdr,
127                              struct hfi1_pkt_state *ps);
128 static void hfi1_do_tid_send(struct rvt_qp *qp);
129 static u32 read_r_next_psn(struct hfi1_devdata *dd, u8 ctxt, u8 fidx);
130 static void tid_rdma_rcv_err(struct hfi1_packet *packet,
131                              struct ib_other_headers *ohdr,
132                              struct rvt_qp *qp, u32 psn, int diff, bool fecn);
133 static void update_r_next_psn_fecn(struct hfi1_packet *packet,
134                                    struct hfi1_qp_priv *priv,
135                                    struct hfi1_ctxtdata *rcd,
136                                    struct tid_rdma_flow *flow,
137                                    bool fecn);
138
139 static u64 tid_rdma_opfn_encode(struct tid_rdma_params *p)
140 {
141         return
142                 (((u64)p->qp & TID_OPFN_QP_CTXT_MASK) <<
143                         TID_OPFN_QP_CTXT_SHIFT) |
144                 ((((u64)p->qp >> 16) & TID_OPFN_QP_KDETH_MASK) <<
145                         TID_OPFN_QP_KDETH_SHIFT) |
146                 (((u64)((p->max_len >> PAGE_SHIFT) - 1) &
147                         TID_OPFN_MAX_LEN_MASK) << TID_OPFN_MAX_LEN_SHIFT) |
148                 (((u64)p->timeout & TID_OPFN_TIMEOUT_MASK) <<
149                         TID_OPFN_TIMEOUT_SHIFT) |
150                 (((u64)p->urg & TID_OPFN_URG_MASK) << TID_OPFN_URG_SHIFT) |
151                 (((u64)p->jkey & TID_OPFN_JKEY_MASK) << TID_OPFN_JKEY_SHIFT) |
152                 (((u64)p->max_read & TID_OPFN_MAX_READ_MASK) <<
153                         TID_OPFN_MAX_READ_SHIFT) |
154                 (((u64)p->max_write & TID_OPFN_MAX_WRITE_MASK) <<
155                         TID_OPFN_MAX_WRITE_SHIFT);
156 }
157
158 static void tid_rdma_opfn_decode(struct tid_rdma_params *p, u64 data)
159 {
160         p->max_len = (((data >> TID_OPFN_MAX_LEN_SHIFT) &
161                 TID_OPFN_MAX_LEN_MASK) + 1) << PAGE_SHIFT;
162         p->jkey = (data >> TID_OPFN_JKEY_SHIFT) & TID_OPFN_JKEY_MASK;
163         p->max_write = (data >> TID_OPFN_MAX_WRITE_SHIFT) &
164                 TID_OPFN_MAX_WRITE_MASK;
165         p->max_read = (data >> TID_OPFN_MAX_READ_SHIFT) &
166                 TID_OPFN_MAX_READ_MASK;
167         p->qp =
168                 ((((data >> TID_OPFN_QP_KDETH_SHIFT) & TID_OPFN_QP_KDETH_MASK)
169                         << 16) |
170                 ((data >> TID_OPFN_QP_CTXT_SHIFT) & TID_OPFN_QP_CTXT_MASK));
171         p->urg = (data >> TID_OPFN_URG_SHIFT) & TID_OPFN_URG_MASK;
172         p->timeout = (data >> TID_OPFN_TIMEOUT_SHIFT) & TID_OPFN_TIMEOUT_MASK;
173 }
174
175 void tid_rdma_opfn_init(struct rvt_qp *qp, struct tid_rdma_params *p)
176 {
177         struct hfi1_qp_priv *priv = qp->priv;
178
179         p->qp = (kdeth_qp << 16) | priv->rcd->ctxt;
180         p->max_len = TID_RDMA_MAX_SEGMENT_SIZE;
181         p->jkey = priv->rcd->jkey;
182         p->max_read = TID_RDMA_MAX_READ_SEGS_PER_REQ;
183         p->max_write = TID_RDMA_MAX_WRITE_SEGS_PER_REQ;
184         p->timeout = qp->timeout;
185         p->urg = is_urg_masked(priv->rcd);
186 }
187
188 bool tid_rdma_conn_req(struct rvt_qp *qp, u64 *data)
189 {
190         struct hfi1_qp_priv *priv = qp->priv;
191
192         *data = tid_rdma_opfn_encode(&priv->tid_rdma.local);
193         return true;
194 }
195
196 bool tid_rdma_conn_reply(struct rvt_qp *qp, u64 data)
197 {
198         struct hfi1_qp_priv *priv = qp->priv;
199         struct tid_rdma_params *remote, *old;
200         bool ret = true;
201
202         old = rcu_dereference_protected(priv->tid_rdma.remote,
203                                         lockdep_is_held(&priv->opfn.lock));
204         data &= ~0xfULL;
205         /*
206          * If data passed in is zero, return true so as not to continue the
207          * negotiation process
208          */
209         if (!data || !HFI1_CAP_IS_KSET(TID_RDMA))
210                 goto null;
211         /*
212          * If kzalloc fails, return false. This will result in:
213          * * at the requester a new OPFN request being generated to retry
214          *   the negotiation
215          * * at the responder, 0 being returned to the requester so as to
216          *   disable TID RDMA at both the requester and the responder
217          */
218         remote = kzalloc(sizeof(*remote), GFP_ATOMIC);
219         if (!remote) {
220                 ret = false;
221                 goto null;
222         }
223
224         tid_rdma_opfn_decode(remote, data);
225         priv->tid_timer_timeout_jiffies =
226                 usecs_to_jiffies((((4096UL * (1UL << remote->timeout)) /
227                                    1000UL) << 3) * 7);
228         trace_hfi1_opfn_param(qp, 0, &priv->tid_rdma.local);
229         trace_hfi1_opfn_param(qp, 1, remote);
230         rcu_assign_pointer(priv->tid_rdma.remote, remote);
231         /*
232          * A TID RDMA READ request's segment size is not equal to
233          * remote->max_len only when the request's data length is smaller
234          * than remote->max_len. In that case, there will be only one segment.
235          * Therefore, when priv->pkts_ps is used to calculate req->cur_seg
236          * during retry, it will lead to req->cur_seg = 0, which is exactly
237          * what is expected.
238          */
239         priv->pkts_ps = (u16)rvt_div_mtu(qp, remote->max_len);
240         priv->timeout_shift = ilog2(priv->pkts_ps - 1) + 1;
241         goto free;
242 null:
243         RCU_INIT_POINTER(priv->tid_rdma.remote, NULL);
244         priv->timeout_shift = 0;
245 free:
246         if (old)
247                 kfree_rcu(old, rcu_head);
248         return ret;
249 }
250
251 bool tid_rdma_conn_resp(struct rvt_qp *qp, u64 *data)
252 {
253         bool ret;
254
255         ret = tid_rdma_conn_reply(qp, *data);
256         *data = 0;
257         /*
258          * If tid_rdma_conn_reply() returns error, set *data as 0 to indicate
259          * TID RDMA could not be enabled. This will result in TID RDMA being
260          * disabled at the requester too.
261          */
262         if (ret)
263                 (void)tid_rdma_conn_req(qp, data);
264         return ret;
265 }
266
267 void tid_rdma_conn_error(struct rvt_qp *qp)
268 {
269         struct hfi1_qp_priv *priv = qp->priv;
270         struct tid_rdma_params *old;
271
272         old = rcu_dereference_protected(priv->tid_rdma.remote,
273                                         lockdep_is_held(&priv->opfn.lock));
274         RCU_INIT_POINTER(priv->tid_rdma.remote, NULL);
275         if (old)
276                 kfree_rcu(old, rcu_head);
277 }
278
279 /* This is called at context initialization time */
280 int hfi1_kern_exp_rcv_init(struct hfi1_ctxtdata *rcd, int reinit)
281 {
282         if (reinit)
283                 return 0;
284
285         BUILD_BUG_ON(TID_RDMA_JKEY < HFI1_KERNEL_MIN_JKEY);
286         BUILD_BUG_ON(TID_RDMA_JKEY > HFI1_KERNEL_MAX_JKEY);
287         rcd->jkey = TID_RDMA_JKEY;
288         hfi1_set_ctxt_jkey(rcd->dd, rcd, rcd->jkey);
289         return hfi1_alloc_ctxt_rcv_groups(rcd);
290 }
291
292 /**
293  * qp_to_rcd - determine the receive context used by a qp
294  * @qp - the qp
295  *
296  * This routine returns the receive context associated
297  * with a a qp's qpn.
298  *
299  * Returns the context.
300  */
301 static struct hfi1_ctxtdata *qp_to_rcd(struct rvt_dev_info *rdi,
302                                        struct rvt_qp *qp)
303 {
304         struct hfi1_ibdev *verbs_dev = container_of(rdi,
305                                                     struct hfi1_ibdev,
306                                                     rdi);
307         struct hfi1_devdata *dd = container_of(verbs_dev,
308                                                struct hfi1_devdata,
309                                                verbs_dev);
310         unsigned int ctxt;
311
312         if (qp->ibqp.qp_num == 0)
313                 ctxt = 0;
314         else
315                 ctxt = hfi1_get_qp_map(dd, qp->ibqp.qp_num >> dd->qos_shift);
316         return dd->rcd[ctxt];
317 }
318
319 int hfi1_qp_priv_init(struct rvt_dev_info *rdi, struct rvt_qp *qp,
320                       struct ib_qp_init_attr *init_attr)
321 {
322         struct hfi1_qp_priv *qpriv = qp->priv;
323         int i, ret;
324
325         qpriv->rcd = qp_to_rcd(rdi, qp);
326
327         spin_lock_init(&qpriv->opfn.lock);
328         INIT_WORK(&qpriv->opfn.opfn_work, opfn_send_conn_request);
329         INIT_WORK(&qpriv->tid_rdma.trigger_work, tid_rdma_trigger_resume);
330         qpriv->flow_state.psn = 0;
331         qpriv->flow_state.index = RXE_NUM_TID_FLOWS;
332         qpriv->flow_state.last_index = RXE_NUM_TID_FLOWS;
333         qpriv->flow_state.generation = KERN_GENERATION_RESERVED;
334         qpriv->s_state = TID_OP(WRITE_RESP);
335         qpriv->s_tid_cur = HFI1_QP_WQE_INVALID;
336         qpriv->s_tid_head = HFI1_QP_WQE_INVALID;
337         qpriv->s_tid_tail = HFI1_QP_WQE_INVALID;
338         qpriv->rnr_nak_state = TID_RNR_NAK_INIT;
339         qpriv->r_tid_head = HFI1_QP_WQE_INVALID;
340         qpriv->r_tid_tail = HFI1_QP_WQE_INVALID;
341         qpriv->r_tid_ack = HFI1_QP_WQE_INVALID;
342         qpriv->r_tid_alloc = HFI1_QP_WQE_INVALID;
343         atomic_set(&qpriv->n_requests, 0);
344         atomic_set(&qpriv->n_tid_requests, 0);
345         timer_setup(&qpriv->s_tid_timer, hfi1_tid_timeout, 0);
346         timer_setup(&qpriv->s_tid_retry_timer, hfi1_tid_retry_timeout, 0);
347         INIT_LIST_HEAD(&qpriv->tid_wait);
348
349         if (init_attr->qp_type == IB_QPT_RC && HFI1_CAP_IS_KSET(TID_RDMA)) {
350                 struct hfi1_devdata *dd = qpriv->rcd->dd;
351
352                 qpriv->pages = kzalloc_node(TID_RDMA_MAX_PAGES *
353                                                 sizeof(*qpriv->pages),
354                                             GFP_KERNEL, dd->node);
355                 if (!qpriv->pages)
356                         return -ENOMEM;
357                 for (i = 0; i < qp->s_size; i++) {
358                         struct hfi1_swqe_priv *priv;
359                         struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, i);
360
361                         priv = kzalloc_node(sizeof(*priv), GFP_KERNEL,
362                                             dd->node);
363                         if (!priv)
364                                 return -ENOMEM;
365
366                         hfi1_init_trdma_req(qp, &priv->tid_req);
367                         priv->tid_req.e.swqe = wqe;
368                         wqe->priv = priv;
369                 }
370                 for (i = 0; i < rvt_max_atomic(rdi); i++) {
371                         struct hfi1_ack_priv *priv;
372
373                         priv = kzalloc_node(sizeof(*priv), GFP_KERNEL,
374                                             dd->node);
375                         if (!priv)
376                                 return -ENOMEM;
377
378                         hfi1_init_trdma_req(qp, &priv->tid_req);
379                         priv->tid_req.e.ack = &qp->s_ack_queue[i];
380
381                         ret = hfi1_kern_exp_rcv_alloc_flows(&priv->tid_req,
382                                                             GFP_KERNEL);
383                         if (ret) {
384                                 kfree(priv);
385                                 return ret;
386                         }
387                         qp->s_ack_queue[i].priv = priv;
388                 }
389         }
390
391         return 0;
392 }
393
394 void hfi1_qp_priv_tid_free(struct rvt_dev_info *rdi, struct rvt_qp *qp)
395 {
396         struct hfi1_qp_priv *qpriv = qp->priv;
397         struct rvt_swqe *wqe;
398         u32 i;
399
400         if (qp->ibqp.qp_type == IB_QPT_RC && HFI1_CAP_IS_KSET(TID_RDMA)) {
401                 for (i = 0; i < qp->s_size; i++) {
402                         wqe = rvt_get_swqe_ptr(qp, i);
403                         kfree(wqe->priv);
404                         wqe->priv = NULL;
405                 }
406                 for (i = 0; i < rvt_max_atomic(rdi); i++) {
407                         struct hfi1_ack_priv *priv = qp->s_ack_queue[i].priv;
408
409                         if (priv)
410                                 hfi1_kern_exp_rcv_free_flows(&priv->tid_req);
411                         kfree(priv);
412                         qp->s_ack_queue[i].priv = NULL;
413                 }
414                 cancel_work_sync(&qpriv->opfn.opfn_work);
415                 kfree(qpriv->pages);
416                 qpriv->pages = NULL;
417         }
418 }
419
420 /* Flow and tid waiter functions */
421 /**
422  * DOC: lock ordering
423  *
424  * There are two locks involved with the queuing
425  * routines: the qp s_lock and the exp_lock.
426  *
427  * Since the tid space allocation is called from
428  * the send engine, the qp s_lock is already held.
429  *
430  * The allocation routines will get the exp_lock.
431  *
432  * The first_qp() call is provided to allow the head of
433  * the rcd wait queue to be fetched under the exp_lock and
434  * followed by a drop of the exp_lock.
435  *
436  * Any qp in the wait list will have the qp reference count held
437  * to hold the qp in memory.
438  */
439
440 /*
441  * return head of rcd wait list
442  *
443  * Must hold the exp_lock.
444  *
445  * Get a reference to the QP to hold the QP in memory.
446  *
447  * The caller must release the reference when the local
448  * is no longer being used.
449  */
450 static struct rvt_qp *first_qp(struct hfi1_ctxtdata *rcd,
451                                struct tid_queue *queue)
452         __must_hold(&rcd->exp_lock)
453 {
454         struct hfi1_qp_priv *priv;
455
456         lockdep_assert_held(&rcd->exp_lock);
457         priv = list_first_entry_or_null(&queue->queue_head,
458                                         struct hfi1_qp_priv,
459                                         tid_wait);
460         if (!priv)
461                 return NULL;
462         rvt_get_qp(priv->owner);
463         return priv->owner;
464 }
465
466 /**
467  * kernel_tid_waiters - determine rcd wait
468  * @rcd: the receive context
469  * @qp: the head of the qp being processed
470  *
471  * This routine will return false IFF
472  * the list is NULL or the head of the
473  * list is the indicated qp.
474  *
475  * Must hold the qp s_lock and the exp_lock.
476  *
477  * Return:
478  * false if either of the conditions below are satisfied:
479  * 1. The list is empty or
480  * 2. The indicated qp is at the head of the list and the
481  *    HFI1_S_WAIT_TID_SPACE bit is set in qp->s_flags.
482  * true is returned otherwise.
483  */
484 static bool kernel_tid_waiters(struct hfi1_ctxtdata *rcd,
485                                struct tid_queue *queue, struct rvt_qp *qp)
486         __must_hold(&rcd->exp_lock) __must_hold(&qp->s_lock)
487 {
488         struct rvt_qp *fqp;
489         bool ret = true;
490
491         lockdep_assert_held(&qp->s_lock);
492         lockdep_assert_held(&rcd->exp_lock);
493         fqp = first_qp(rcd, queue);
494         if (!fqp || (fqp == qp && (qp->s_flags & HFI1_S_WAIT_TID_SPACE)))
495                 ret = false;
496         rvt_put_qp(fqp);
497         return ret;
498 }
499
500 /**
501  * dequeue_tid_waiter - dequeue the qp from the list
502  * @qp - the qp to remove the wait list
503  *
504  * This routine removes the indicated qp from the
505  * wait list if it is there.
506  *
507  * This should be done after the hardware flow and
508  * tid array resources have been allocated.
509  *
510  * Must hold the qp s_lock and the rcd exp_lock.
511  *
512  * It assumes the s_lock to protect the s_flags
513  * field and to reliably test the HFI1_S_WAIT_TID_SPACE flag.
514  */
515 static void dequeue_tid_waiter(struct hfi1_ctxtdata *rcd,
516                                struct tid_queue *queue, struct rvt_qp *qp)
517         __must_hold(&rcd->exp_lock) __must_hold(&qp->s_lock)
518 {
519         struct hfi1_qp_priv *priv = qp->priv;
520
521         lockdep_assert_held(&qp->s_lock);
522         lockdep_assert_held(&rcd->exp_lock);
523         if (list_empty(&priv->tid_wait))
524                 return;
525         list_del_init(&priv->tid_wait);
526         qp->s_flags &= ~HFI1_S_WAIT_TID_SPACE;
527         queue->dequeue++;
528         rvt_put_qp(qp);
529 }
530
531 /**
532  * queue_qp_for_tid_wait - suspend QP on tid space
533  * @rcd: the receive context
534  * @qp: the qp
535  *
536  * The qp is inserted at the tail of the rcd
537  * wait queue and the HFI1_S_WAIT_TID_SPACE s_flag is set.
538  *
539  * Must hold the qp s_lock and the exp_lock.
540  */
541 static void queue_qp_for_tid_wait(struct hfi1_ctxtdata *rcd,
542                                   struct tid_queue *queue, struct rvt_qp *qp)
543         __must_hold(&rcd->exp_lock) __must_hold(&qp->s_lock)
544 {
545         struct hfi1_qp_priv *priv = qp->priv;
546
547         lockdep_assert_held(&qp->s_lock);
548         lockdep_assert_held(&rcd->exp_lock);
549         if (list_empty(&priv->tid_wait)) {
550                 qp->s_flags |= HFI1_S_WAIT_TID_SPACE;
551                 list_add_tail(&priv->tid_wait, &queue->queue_head);
552                 priv->tid_enqueue = ++queue->enqueue;
553                 rcd->dd->verbs_dev.n_tidwait++;
554                 trace_hfi1_qpsleep(qp, HFI1_S_WAIT_TID_SPACE);
555                 rvt_get_qp(qp);
556         }
557 }
558
559 /**
560  * __trigger_tid_waiter - trigger tid waiter
561  * @qp: the qp
562  *
563  * This is a private entrance to schedule the qp
564  * assuming the caller is holding the qp->s_lock.
565  */
566 static void __trigger_tid_waiter(struct rvt_qp *qp)
567         __must_hold(&qp->s_lock)
568 {
569         lockdep_assert_held(&qp->s_lock);
570         if (!(qp->s_flags & HFI1_S_WAIT_TID_SPACE))
571                 return;
572         trace_hfi1_qpwakeup(qp, HFI1_S_WAIT_TID_SPACE);
573         hfi1_schedule_send(qp);
574 }
575
576 /**
577  * tid_rdma_schedule_tid_wakeup - schedule wakeup for a qp
578  * @qp - the qp
579  *
580  * trigger a schedule or a waiting qp in a deadlock
581  * safe manner.  The qp reference is held prior
582  * to this call via first_qp().
583  *
584  * If the qp trigger was already scheduled (!rval)
585  * the the reference is dropped, otherwise the resume
586  * or the destroy cancel will dispatch the reference.
587  */
588 static void tid_rdma_schedule_tid_wakeup(struct rvt_qp *qp)
589 {
590         struct hfi1_qp_priv *priv;
591         struct hfi1_ibport *ibp;
592         struct hfi1_pportdata *ppd;
593         struct hfi1_devdata *dd;
594         bool rval;
595
596         if (!qp)
597                 return;
598
599         priv = qp->priv;
600         ibp = to_iport(qp->ibqp.device, qp->port_num);
601         ppd = ppd_from_ibp(ibp);
602         dd = dd_from_ibdev(qp->ibqp.device);
603
604         rval = queue_work_on(priv->s_sde ?
605                              priv->s_sde->cpu :
606                              cpumask_first(cpumask_of_node(dd->node)),
607                              ppd->hfi1_wq,
608                              &priv->tid_rdma.trigger_work);
609         if (!rval)
610                 rvt_put_qp(qp);
611 }
612
613 /**
614  * tid_rdma_trigger_resume - field a trigger work request
615  * @work - the work item
616  *
617  * Complete the off qp trigger processing by directly
618  * calling the progress routine.
619  */
620 static void tid_rdma_trigger_resume(struct work_struct *work)
621 {
622         struct tid_rdma_qp_params *tr;
623         struct hfi1_qp_priv *priv;
624         struct rvt_qp *qp;
625
626         tr = container_of(work, struct tid_rdma_qp_params, trigger_work);
627         priv = container_of(tr, struct hfi1_qp_priv, tid_rdma);
628         qp = priv->owner;
629         spin_lock_irq(&qp->s_lock);
630         if (qp->s_flags & HFI1_S_WAIT_TID_SPACE) {
631                 spin_unlock_irq(&qp->s_lock);
632                 hfi1_do_send(priv->owner, true);
633         } else {
634                 spin_unlock_irq(&qp->s_lock);
635         }
636         rvt_put_qp(qp);
637 }
638
639 /**
640  * tid_rdma_flush_wait - unwind any tid space wait
641  *
642  * This is called when resetting a qp to
643  * allow a destroy or reset to get rid
644  * of any tid space linkage and reference counts.
645  */
646 static void _tid_rdma_flush_wait(struct rvt_qp *qp, struct tid_queue *queue)
647         __must_hold(&qp->s_lock)
648 {
649         struct hfi1_qp_priv *priv;
650
651         if (!qp)
652                 return;
653         lockdep_assert_held(&qp->s_lock);
654         priv = qp->priv;
655         qp->s_flags &= ~HFI1_S_WAIT_TID_SPACE;
656         spin_lock(&priv->rcd->exp_lock);
657         if (!list_empty(&priv->tid_wait)) {
658                 list_del_init(&priv->tid_wait);
659                 qp->s_flags &= ~HFI1_S_WAIT_TID_SPACE;
660                 queue->dequeue++;
661                 rvt_put_qp(qp);
662         }
663         spin_unlock(&priv->rcd->exp_lock);
664 }
665
666 void hfi1_tid_rdma_flush_wait(struct rvt_qp *qp)
667         __must_hold(&qp->s_lock)
668 {
669         struct hfi1_qp_priv *priv = qp->priv;
670
671         _tid_rdma_flush_wait(qp, &priv->rcd->flow_queue);
672         _tid_rdma_flush_wait(qp, &priv->rcd->rarr_queue);
673 }
674
675 /* Flow functions */
676 /**
677  * kern_reserve_flow - allocate a hardware flow
678  * @rcd - the context to use for allocation
679  * @last - the index of the preferred flow. Use RXE_NUM_TID_FLOWS to
680  *         signify "don't care".
681  *
682  * Use a bit mask based allocation to reserve a hardware
683  * flow for use in receiving KDETH data packets. If a preferred flow is
684  * specified the function will attempt to reserve that flow again, if
685  * available.
686  *
687  * The exp_lock must be held.
688  *
689  * Return:
690  * On success: a value postive value between 0 and RXE_NUM_TID_FLOWS - 1
691  * On failure: -EAGAIN
692  */
693 static int kern_reserve_flow(struct hfi1_ctxtdata *rcd, int last)
694         __must_hold(&rcd->exp_lock)
695 {
696         int nr;
697
698         /* Attempt to reserve the preferred flow index */
699         if (last >= 0 && last < RXE_NUM_TID_FLOWS &&
700             !test_and_set_bit(last, &rcd->flow_mask))
701                 return last;
702
703         nr = ffz(rcd->flow_mask);
704         BUILD_BUG_ON(RXE_NUM_TID_FLOWS >=
705                      (sizeof(rcd->flow_mask) * BITS_PER_BYTE));
706         if (nr > (RXE_NUM_TID_FLOWS - 1))
707                 return -EAGAIN;
708         set_bit(nr, &rcd->flow_mask);
709         return nr;
710 }
711
712 static void kern_set_hw_flow(struct hfi1_ctxtdata *rcd, u32 generation,
713                              u32 flow_idx)
714 {
715         u64 reg;
716
717         reg = ((u64)generation << HFI1_KDETH_BTH_SEQ_SHIFT) |
718                 RCV_TID_FLOW_TABLE_CTRL_FLOW_VALID_SMASK |
719                 RCV_TID_FLOW_TABLE_CTRL_KEEP_AFTER_SEQ_ERR_SMASK |
720                 RCV_TID_FLOW_TABLE_CTRL_KEEP_ON_GEN_ERR_SMASK |
721                 RCV_TID_FLOW_TABLE_STATUS_SEQ_MISMATCH_SMASK |
722                 RCV_TID_FLOW_TABLE_STATUS_GEN_MISMATCH_SMASK;
723
724         if (generation != KERN_GENERATION_RESERVED)
725                 reg |= RCV_TID_FLOW_TABLE_CTRL_HDR_SUPP_EN_SMASK;
726
727         write_uctxt_csr(rcd->dd, rcd->ctxt,
728                         RCV_TID_FLOW_TABLE + 8 * flow_idx, reg);
729 }
730
731 static u32 kern_setup_hw_flow(struct hfi1_ctxtdata *rcd, u32 flow_idx)
732         __must_hold(&rcd->exp_lock)
733 {
734         u32 generation = rcd->flows[flow_idx].generation;
735
736         kern_set_hw_flow(rcd, generation, flow_idx);
737         return generation;
738 }
739
740 static u32 kern_flow_generation_next(u32 gen)
741 {
742         u32 generation = mask_generation(gen + 1);
743
744         if (generation == KERN_GENERATION_RESERVED)
745                 generation = mask_generation(generation + 1);
746         return generation;
747 }
748
749 static void kern_clear_hw_flow(struct hfi1_ctxtdata *rcd, u32 flow_idx)
750         __must_hold(&rcd->exp_lock)
751 {
752         rcd->flows[flow_idx].generation =
753                 kern_flow_generation_next(rcd->flows[flow_idx].generation);
754         kern_set_hw_flow(rcd, KERN_GENERATION_RESERVED, flow_idx);
755 }
756
757 int hfi1_kern_setup_hw_flow(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp)
758 {
759         struct hfi1_qp_priv *qpriv = (struct hfi1_qp_priv *)qp->priv;
760         struct tid_flow_state *fs = &qpriv->flow_state;
761         struct rvt_qp *fqp;
762         unsigned long flags;
763         int ret = 0;
764
765         /* The QP already has an allocated flow */
766         if (fs->index != RXE_NUM_TID_FLOWS)
767                 return ret;
768
769         spin_lock_irqsave(&rcd->exp_lock, flags);
770         if (kernel_tid_waiters(rcd, &rcd->flow_queue, qp))
771                 goto queue;
772
773         ret = kern_reserve_flow(rcd, fs->last_index);
774         if (ret < 0)
775                 goto queue;
776         fs->index = ret;
777         fs->last_index = fs->index;
778
779         /* Generation received in a RESYNC overrides default flow generation */
780         if (fs->generation != KERN_GENERATION_RESERVED)
781                 rcd->flows[fs->index].generation = fs->generation;
782         fs->generation = kern_setup_hw_flow(rcd, fs->index);
783         fs->psn = 0;
784         dequeue_tid_waiter(rcd, &rcd->flow_queue, qp);
785         /* get head before dropping lock */
786         fqp = first_qp(rcd, &rcd->flow_queue);
787         spin_unlock_irqrestore(&rcd->exp_lock, flags);
788
789         tid_rdma_schedule_tid_wakeup(fqp);
790         return 0;
791 queue:
792         queue_qp_for_tid_wait(rcd, &rcd->flow_queue, qp);
793         spin_unlock_irqrestore(&rcd->exp_lock, flags);
794         return -EAGAIN;
795 }
796
797 void hfi1_kern_clear_hw_flow(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp)
798 {
799         struct hfi1_qp_priv *qpriv = (struct hfi1_qp_priv *)qp->priv;
800         struct tid_flow_state *fs = &qpriv->flow_state;
801         struct rvt_qp *fqp;
802         unsigned long flags;
803
804         if (fs->index >= RXE_NUM_TID_FLOWS)
805                 return;
806         spin_lock_irqsave(&rcd->exp_lock, flags);
807         kern_clear_hw_flow(rcd, fs->index);
808         clear_bit(fs->index, &rcd->flow_mask);
809         fs->index = RXE_NUM_TID_FLOWS;
810         fs->psn = 0;
811         fs->generation = KERN_GENERATION_RESERVED;
812
813         /* get head before dropping lock */
814         fqp = first_qp(rcd, &rcd->flow_queue);
815         spin_unlock_irqrestore(&rcd->exp_lock, flags);
816
817         if (fqp == qp) {
818                 __trigger_tid_waiter(fqp);
819                 rvt_put_qp(fqp);
820         } else {
821                 tid_rdma_schedule_tid_wakeup(fqp);
822         }
823 }
824
825 void hfi1_kern_init_ctxt_generations(struct hfi1_ctxtdata *rcd)
826 {
827         int i;
828
829         for (i = 0; i < RXE_NUM_TID_FLOWS; i++) {
830                 rcd->flows[i].generation = mask_generation(prandom_u32());
831                 kern_set_hw_flow(rcd, KERN_GENERATION_RESERVED, i);
832         }
833 }
834
835 /* TID allocation functions */
836 static u8 trdma_pset_order(struct tid_rdma_pageset *s)
837 {
838         u8 count = s->count;
839
840         return ilog2(count) + 1;
841 }
842
843 /**
844  * tid_rdma_find_phys_blocks_4k - get groups base on mr info
845  * @npages - number of pages
846  * @pages - pointer to an array of page structs
847  * @list - page set array to return
848  *
849  * This routine returns the number of groups associated with
850  * the current sge information.  This implementation is based
851  * on the expected receive find_phys_blocks() adjusted to
852  * use the MR information vs. the pfn.
853  *
854  * Return:
855  * the number of RcvArray entries
856  */
857 static u32 tid_rdma_find_phys_blocks_4k(struct tid_rdma_flow *flow,
858                                         struct page **pages,
859                                         u32 npages,
860                                         struct tid_rdma_pageset *list)
861 {
862         u32 pagecount, pageidx, setcount = 0, i;
863         void *vaddr, *this_vaddr;
864
865         if (!npages)
866                 return 0;
867
868         /*
869          * Look for sets of physically contiguous pages in the user buffer.
870          * This will allow us to optimize Expected RcvArray entry usage by
871          * using the bigger supported sizes.
872          */
873         vaddr = page_address(pages[0]);
874         trace_hfi1_tid_flow_page(flow->req->qp, flow, 0, 0, 0, vaddr);
875         for (pageidx = 0, pagecount = 1, i = 1; i <= npages; i++) {
876                 this_vaddr = i < npages ? page_address(pages[i]) : NULL;
877                 trace_hfi1_tid_flow_page(flow->req->qp, flow, i, 0, 0,
878                                          this_vaddr);
879                 /*
880                  * If the vaddr's are not sequential, pages are not physically
881                  * contiguous.
882                  */
883                 if (this_vaddr != (vaddr + PAGE_SIZE)) {
884                         /*
885                          * At this point we have to loop over the set of
886                          * physically contiguous pages and break them down it
887                          * sizes supported by the HW.
888                          * There are two main constraints:
889                          *     1. The max buffer size is MAX_EXPECTED_BUFFER.
890                          *        If the total set size is bigger than that
891                          *        program only a MAX_EXPECTED_BUFFER chunk.
892                          *     2. The buffer size has to be a power of two. If
893                          *        it is not, round down to the closes power of
894                          *        2 and program that size.
895                          */
896                         while (pagecount) {
897                                 int maxpages = pagecount;
898                                 u32 bufsize = pagecount * PAGE_SIZE;
899
900                                 if (bufsize > MAX_EXPECTED_BUFFER)
901                                         maxpages =
902                                                 MAX_EXPECTED_BUFFER >>
903                                                 PAGE_SHIFT;
904                                 else if (!is_power_of_2(bufsize))
905                                         maxpages =
906                                                 rounddown_pow_of_two(bufsize) >>
907                                                 PAGE_SHIFT;
908
909                                 list[setcount].idx = pageidx;
910                                 list[setcount].count = maxpages;
911                                 trace_hfi1_tid_pageset(flow->req->qp, setcount,
912                                                        list[setcount].idx,
913                                                        list[setcount].count);
914                                 pagecount -= maxpages;
915                                 pageidx += maxpages;
916                                 setcount++;
917                         }
918                         pageidx = i;
919                         pagecount = 1;
920                         vaddr = this_vaddr;
921                 } else {
922                         vaddr += PAGE_SIZE;
923                         pagecount++;
924                 }
925         }
926         /* insure we always return an even number of sets */
927         if (setcount & 1)
928                 list[setcount++].count = 0;
929         return setcount;
930 }
931
932 /**
933  * tid_flush_pages - dump out pages into pagesets
934  * @list - list of pagesets
935  * @idx - pointer to current page index
936  * @pages - number of pages to dump
937  * @sets - current number of pagesset
938  *
939  * This routine flushes out accumuated pages.
940  *
941  * To insure an even number of sets the
942  * code may add a filler.
943  *
944  * This can happen with when pages is not
945  * a power of 2 or pages is a power of 2
946  * less than the maximum pages.
947  *
948  * Return:
949  * The new number of sets
950  */
951
952 static u32 tid_flush_pages(struct tid_rdma_pageset *list,
953                            u32 *idx, u32 pages, u32 sets)
954 {
955         while (pages) {
956                 u32 maxpages = pages;
957
958                 if (maxpages > MAX_EXPECTED_PAGES)
959                         maxpages = MAX_EXPECTED_PAGES;
960                 else if (!is_power_of_2(maxpages))
961                         maxpages = rounddown_pow_of_two(maxpages);
962                 list[sets].idx = *idx;
963                 list[sets++].count = maxpages;
964                 *idx += maxpages;
965                 pages -= maxpages;
966         }
967         /* might need a filler */
968         if (sets & 1)
969                 list[sets++].count = 0;
970         return sets;
971 }
972
973 /**
974  * tid_rdma_find_phys_blocks_8k - get groups base on mr info
975  * @pages - pointer to an array of page structs
976  * @npages - number of pages
977  * @list - page set array to return
978  *
979  * This routine parses an array of pages to compute pagesets
980  * in an 8k compatible way.
981  *
982  * pages are tested two at a time, i, i + 1 for contiguous
983  * pages and i - 1 and i contiguous pages.
984  *
985  * If any condition is false, any accumlated pages are flushed and
986  * v0,v1 are emitted as separate PAGE_SIZE pagesets
987  *
988  * Otherwise, the current 8k is totaled for a future flush.
989  *
990  * Return:
991  * The number of pagesets
992  * list set with the returned number of pagesets
993  *
994  */
995 static u32 tid_rdma_find_phys_blocks_8k(struct tid_rdma_flow *flow,
996                                         struct page **pages,
997                                         u32 npages,
998                                         struct tid_rdma_pageset *list)
999 {
1000         u32 idx, sets = 0, i;
1001         u32 pagecnt = 0;
1002         void *v0, *v1, *vm1;
1003
1004         if (!npages)
1005                 return 0;
1006         for (idx = 0, i = 0, vm1 = NULL; i < npages; i += 2) {
1007                 /* get a new v0 */
1008                 v0 = page_address(pages[i]);
1009                 trace_hfi1_tid_flow_page(flow->req->qp, flow, i, 1, 0, v0);
1010                 v1 = i + 1 < npages ?
1011                                 page_address(pages[i + 1]) : NULL;
1012                 trace_hfi1_tid_flow_page(flow->req->qp, flow, i, 1, 1, v1);
1013                 /* compare i, i + 1 vaddr */
1014                 if (v1 != (v0 + PAGE_SIZE)) {
1015                         /* flush out pages */
1016                         sets = tid_flush_pages(list, &idx, pagecnt, sets);
1017                         /* output v0,v1 as two pagesets */
1018                         list[sets].idx = idx++;
1019                         list[sets++].count = 1;
1020                         if (v1) {
1021                                 list[sets].count = 1;
1022                                 list[sets++].idx = idx++;
1023                         } else {
1024                                 list[sets++].count = 0;
1025                         }
1026                         vm1 = NULL;
1027                         pagecnt = 0;
1028                         continue;
1029                 }
1030                 /* i,i+1 consecutive, look at i-1,i */
1031                 if (vm1 && v0 != (vm1 + PAGE_SIZE)) {
1032                         /* flush out pages */
1033                         sets = tid_flush_pages(list, &idx, pagecnt, sets);
1034                         pagecnt = 0;
1035                 }
1036                 /* pages will always be a multiple of 8k */
1037                 pagecnt += 2;
1038                 /* save i-1 */
1039                 vm1 = v1;
1040                 /* move to next pair */
1041         }
1042         /* dump residual pages at end */
1043         sets = tid_flush_pages(list, &idx, npages - idx, sets);
1044         /* by design cannot be odd sets */
1045         WARN_ON(sets & 1);
1046         return sets;
1047 }
1048
1049 /**
1050  * Find pages for one segment of a sge array represented by @ss. The function
1051  * does not check the sge, the sge must have been checked for alignment with a
1052  * prior call to hfi1_kern_trdma_ok. Other sge checking is done as part of
1053  * rvt_lkey_ok and rvt_rkey_ok. Also, the function only modifies the local sge
1054  * copy maintained in @ss->sge, the original sge is not modified.
1055  *
1056  * Unlike IB RDMA WRITE, we can't decrement ss->num_sge here because we are not
1057  * releasing the MR reference count at the same time. Otherwise, we'll "leak"
1058  * references to the MR. This difference requires that we keep track of progress
1059  * into the sg_list. This is done by the cur_seg cursor in the tid_rdma_request
1060  * structure.
1061  */
1062 static u32 kern_find_pages(struct tid_rdma_flow *flow,
1063                            struct page **pages,
1064                            struct rvt_sge_state *ss, bool *last)
1065 {
1066         struct tid_rdma_request *req = flow->req;
1067         struct rvt_sge *sge = &ss->sge;
1068         u32 length = flow->req->seg_len;
1069         u32 len = PAGE_SIZE;
1070         u32 i = 0;
1071
1072         while (length && req->isge < ss->num_sge) {
1073                 pages[i++] = virt_to_page(sge->vaddr);
1074
1075                 sge->vaddr += len;
1076                 sge->length -= len;
1077                 sge->sge_length -= len;
1078                 if (!sge->sge_length) {
1079                         if (++req->isge < ss->num_sge)
1080                                 *sge = ss->sg_list[req->isge - 1];
1081                 } else if (sge->length == 0 && sge->mr->lkey) {
1082                         if (++sge->n >= RVT_SEGSZ) {
1083                                 ++sge->m;
1084                                 sge->n = 0;
1085                         }
1086                         sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
1087                         sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
1088                 }
1089                 length -= len;
1090         }
1091
1092         flow->length = flow->req->seg_len - length;
1093         *last = req->isge == ss->num_sge ? false : true;
1094         return i;
1095 }
1096
1097 static void dma_unmap_flow(struct tid_rdma_flow *flow)
1098 {
1099         struct hfi1_devdata *dd;
1100         int i;
1101         struct tid_rdma_pageset *pset;
1102
1103         dd = flow->req->rcd->dd;
1104         for (i = 0, pset = &flow->pagesets[0]; i < flow->npagesets;
1105                         i++, pset++) {
1106                 if (pset->count && pset->addr) {
1107                         dma_unmap_page(&dd->pcidev->dev,
1108                                        pset->addr,
1109                                        PAGE_SIZE * pset->count,
1110                                        DMA_FROM_DEVICE);
1111                         pset->mapped = 0;
1112                 }
1113         }
1114 }
1115
1116 static int dma_map_flow(struct tid_rdma_flow *flow, struct page **pages)
1117 {
1118         int i;
1119         struct hfi1_devdata *dd = flow->req->rcd->dd;
1120         struct tid_rdma_pageset *pset;
1121
1122         for (i = 0, pset = &flow->pagesets[0]; i < flow->npagesets;
1123                         i++, pset++) {
1124                 if (pset->count) {
1125                         pset->addr = dma_map_page(&dd->pcidev->dev,
1126                                                   pages[pset->idx],
1127                                                   0,
1128                                                   PAGE_SIZE * pset->count,
1129                                                   DMA_FROM_DEVICE);
1130
1131                         if (dma_mapping_error(&dd->pcidev->dev, pset->addr)) {
1132                                 dma_unmap_flow(flow);
1133                                 return -ENOMEM;
1134                         }
1135                         pset->mapped = 1;
1136                 }
1137         }
1138         return 0;
1139 }
1140
1141 static inline bool dma_mapped(struct tid_rdma_flow *flow)
1142 {
1143         return !!flow->pagesets[0].mapped;
1144 }
1145
1146 /*
1147  * Get pages pointers and identify contiguous physical memory chunks for a
1148  * segment. All segments are of length flow->req->seg_len.
1149  */
1150 static int kern_get_phys_blocks(struct tid_rdma_flow *flow,
1151                                 struct page **pages,
1152                                 struct rvt_sge_state *ss, bool *last)
1153 {
1154         u8 npages;
1155
1156         /* Reuse previously computed pagesets, if any */
1157         if (flow->npagesets) {
1158                 trace_hfi1_tid_flow_alloc(flow->req->qp, flow->req->setup_head,
1159                                           flow);
1160                 if (!dma_mapped(flow))
1161                         return dma_map_flow(flow, pages);
1162                 return 0;
1163         }
1164
1165         npages = kern_find_pages(flow, pages, ss, last);
1166
1167         if (flow->req->qp->pmtu == enum_to_mtu(OPA_MTU_4096))
1168                 flow->npagesets =
1169                         tid_rdma_find_phys_blocks_4k(flow, pages, npages,
1170                                                      flow->pagesets);
1171         else
1172                 flow->npagesets =
1173                         tid_rdma_find_phys_blocks_8k(flow, pages, npages,
1174                                                      flow->pagesets);
1175
1176         return dma_map_flow(flow, pages);
1177 }
1178
1179 static inline void kern_add_tid_node(struct tid_rdma_flow *flow,
1180                                      struct hfi1_ctxtdata *rcd, char *s,
1181                                      struct tid_group *grp, u8 cnt)
1182 {
1183         struct kern_tid_node *node = &flow->tnode[flow->tnode_cnt++];
1184
1185         WARN_ON_ONCE(flow->tnode_cnt >=
1186                      (TID_RDMA_MAX_SEGMENT_SIZE >> PAGE_SHIFT));
1187         if (WARN_ON_ONCE(cnt & 1))
1188                 dd_dev_err(rcd->dd,
1189                            "unexpected odd allocation cnt %u map 0x%x used %u",
1190                            cnt, grp->map, grp->used);
1191
1192         node->grp = grp;
1193         node->map = grp->map;
1194         node->cnt = cnt;
1195         trace_hfi1_tid_node_add(flow->req->qp, s, flow->tnode_cnt - 1,
1196                                 grp->base, grp->map, grp->used, cnt);
1197 }
1198
1199 /*
1200  * Try to allocate pageset_count TID's from TID groups for a context
1201  *
1202  * This function allocates TID's without moving groups between lists or
1203  * modifying grp->map. This is done as follows, being cogizant of the lists
1204  * between which the TID groups will move:
1205  * 1. First allocate complete groups of 8 TID's since this is more efficient,
1206  *    these groups will move from group->full without affecting used
1207  * 2. If more TID's are needed allocate from used (will move from used->full or
1208  *    stay in used)
1209  * 3. If we still don't have the required number of TID's go back and look again
1210  *    at a complete group (will move from group->used)
1211  */
1212 static int kern_alloc_tids(struct tid_rdma_flow *flow)
1213 {
1214         struct hfi1_ctxtdata *rcd = flow->req->rcd;
1215         struct hfi1_devdata *dd = rcd->dd;
1216         u32 ngroups, pageidx = 0;
1217         struct tid_group *group = NULL, *used;
1218         u8 use;
1219
1220         flow->tnode_cnt = 0;
1221         ngroups = flow->npagesets / dd->rcv_entries.group_size;
1222         if (!ngroups)
1223                 goto used_list;
1224
1225         /* First look at complete groups */
1226         list_for_each_entry(group,  &rcd->tid_group_list.list, list) {
1227                 kern_add_tid_node(flow, rcd, "complete groups", group,
1228                                   group->size);
1229
1230                 pageidx += group->size;
1231                 if (!--ngroups)
1232                         break;
1233         }
1234
1235         if (pageidx >= flow->npagesets)
1236                 goto ok;
1237
1238 used_list:
1239         /* Now look at partially used groups */
1240         list_for_each_entry(used, &rcd->tid_used_list.list, list) {
1241                 use = min_t(u32, flow->npagesets - pageidx,
1242                             used->size - used->used);
1243                 kern_add_tid_node(flow, rcd, "used groups", used, use);
1244
1245                 pageidx += use;
1246                 if (pageidx >= flow->npagesets)
1247                         goto ok;
1248         }
1249
1250         /*
1251          * Look again at a complete group, continuing from where we left.
1252          * However, if we are at the head, we have reached the end of the
1253          * complete groups list from the first loop above
1254          */
1255         if (group && &group->list == &rcd->tid_group_list.list)
1256                 goto bail_eagain;
1257         group = list_prepare_entry(group, &rcd->tid_group_list.list,
1258                                    list);
1259         if (list_is_last(&group->list, &rcd->tid_group_list.list))
1260                 goto bail_eagain;
1261         group = list_next_entry(group, list);
1262         use = min_t(u32, flow->npagesets - pageidx, group->size);
1263         kern_add_tid_node(flow, rcd, "complete continue", group, use);
1264         pageidx += use;
1265         if (pageidx >= flow->npagesets)
1266                 goto ok;
1267 bail_eagain:
1268         trace_hfi1_msg_alloc_tids(flow->req->qp, " insufficient tids: needed ",
1269                                   (u64)flow->npagesets);
1270         return -EAGAIN;
1271 ok:
1272         return 0;
1273 }
1274
1275 static void kern_program_rcv_group(struct tid_rdma_flow *flow, int grp_num,
1276                                    u32 *pset_idx)
1277 {
1278         struct hfi1_ctxtdata *rcd = flow->req->rcd;
1279         struct hfi1_devdata *dd = rcd->dd;
1280         struct kern_tid_node *node = &flow->tnode[grp_num];
1281         struct tid_group *grp = node->grp;
1282         struct tid_rdma_pageset *pset;
1283         u32 pmtu_pg = flow->req->qp->pmtu >> PAGE_SHIFT;
1284         u32 rcventry, npages = 0, pair = 0, tidctrl;
1285         u8 i, cnt = 0;
1286
1287         for (i = 0; i < grp->size; i++) {
1288                 rcventry = grp->base + i;
1289
1290                 if (node->map & BIT(i) || cnt >= node->cnt) {
1291                         rcv_array_wc_fill(dd, rcventry);
1292                         continue;
1293                 }
1294                 pset = &flow->pagesets[(*pset_idx)++];
1295                 if (pset->count) {
1296                         hfi1_put_tid(dd, rcventry, PT_EXPECTED,
1297                                      pset->addr, trdma_pset_order(pset));
1298                 } else {
1299                         hfi1_put_tid(dd, rcventry, PT_INVALID, 0, 0);
1300                 }
1301                 npages += pset->count;
1302
1303                 rcventry -= rcd->expected_base;
1304                 tidctrl = pair ? 0x3 : rcventry & 0x1 ? 0x2 : 0x1;
1305                 /*
1306                  * A single TID entry will be used to use a rcvarr pair (with
1307                  * tidctrl 0x3), if ALL these are true (a) the bit pos is even
1308                  * (b) the group map shows current and the next bits as free
1309                  * indicating two consecutive rcvarry entries are available (c)
1310                  * we actually need 2 more entries
1311                  */
1312                 pair = !(i & 0x1) && !((node->map >> i) & 0x3) &&
1313                         node->cnt >= cnt + 2;
1314                 if (!pair) {
1315                         if (!pset->count)
1316                                 tidctrl = 0x1;
1317                         flow->tid_entry[flow->tidcnt++] =
1318                                 EXP_TID_SET(IDX, rcventry >> 1) |
1319                                 EXP_TID_SET(CTRL, tidctrl) |
1320                                 EXP_TID_SET(LEN, npages);
1321                         trace_hfi1_tid_entry_alloc(/* entry */
1322                            flow->req->qp, flow->tidcnt - 1,
1323                            flow->tid_entry[flow->tidcnt - 1]);
1324
1325                         /* Efficient DIV_ROUND_UP(npages, pmtu_pg) */
1326                         flow->npkts += (npages + pmtu_pg - 1) >> ilog2(pmtu_pg);
1327                         npages = 0;
1328                 }
1329
1330                 if (grp->used == grp->size - 1)
1331                         tid_group_move(grp, &rcd->tid_used_list,
1332                                        &rcd->tid_full_list);
1333                 else if (!grp->used)
1334                         tid_group_move(grp, &rcd->tid_group_list,
1335                                        &rcd->tid_used_list);
1336
1337                 grp->used++;
1338                 grp->map |= BIT(i);
1339                 cnt++;
1340         }
1341 }
1342
1343 static void kern_unprogram_rcv_group(struct tid_rdma_flow *flow, int grp_num)
1344 {
1345         struct hfi1_ctxtdata *rcd = flow->req->rcd;
1346         struct hfi1_devdata *dd = rcd->dd;
1347         struct kern_tid_node *node = &flow->tnode[grp_num];
1348         struct tid_group *grp = node->grp;
1349         u32 rcventry;
1350         u8 i, cnt = 0;
1351
1352         for (i = 0; i < grp->size; i++) {
1353                 rcventry = grp->base + i;
1354
1355                 if (node->map & BIT(i) || cnt >= node->cnt) {
1356                         rcv_array_wc_fill(dd, rcventry);
1357                         continue;
1358                 }
1359
1360                 hfi1_put_tid(dd, rcventry, PT_INVALID, 0, 0);
1361
1362                 grp->used--;
1363                 grp->map &= ~BIT(i);
1364                 cnt++;
1365
1366                 if (grp->used == grp->size - 1)
1367                         tid_group_move(grp, &rcd->tid_full_list,
1368                                        &rcd->tid_used_list);
1369                 else if (!grp->used)
1370                         tid_group_move(grp, &rcd->tid_used_list,
1371                                        &rcd->tid_group_list);
1372         }
1373         if (WARN_ON_ONCE(cnt & 1)) {
1374                 struct hfi1_ctxtdata *rcd = flow->req->rcd;
1375                 struct hfi1_devdata *dd = rcd->dd;
1376
1377                 dd_dev_err(dd, "unexpected odd free cnt %u map 0x%x used %u",
1378                            cnt, grp->map, grp->used);
1379         }
1380 }
1381
1382 static void kern_program_rcvarray(struct tid_rdma_flow *flow)
1383 {
1384         u32 pset_idx = 0;
1385         int i;
1386
1387         flow->npkts = 0;
1388         flow->tidcnt = 0;
1389         for (i = 0; i < flow->tnode_cnt; i++)
1390                 kern_program_rcv_group(flow, i, &pset_idx);
1391         trace_hfi1_tid_flow_alloc(flow->req->qp, flow->req->setup_head, flow);
1392 }
1393
1394 /**
1395  * hfi1_kern_exp_rcv_setup() - setup TID's and flow for one segment of a
1396  * TID RDMA request
1397  *
1398  * @req: TID RDMA request for which the segment/flow is being set up
1399  * @ss: sge state, maintains state across successive segments of a sge
1400  * @last: set to true after the last sge segment has been processed
1401  *
1402  * This function
1403  * (1) finds a free flow entry in the flow circular buffer
1404  * (2) finds pages and continuous physical chunks constituing one segment
1405  *     of an sge
1406  * (3) allocates TID group entries for those chunks
1407  * (4) programs rcvarray entries in the hardware corresponding to those
1408  *     TID's
1409  * (5) computes a tidarray with formatted TID entries which can be sent
1410  *     to the sender
1411  * (6) Reserves and programs HW flows.
1412  * (7) It also manages queing the QP when TID/flow resources are not
1413  *     available.
1414  *
1415  * @req points to struct tid_rdma_request of which the segments are a part. The
1416  * function uses qp, rcd and seg_len members of @req. In the absence of errors,
1417  * req->flow_idx is the index of the flow which has been prepared in this
1418  * invocation of function call. With flow = &req->flows[req->flow_idx],
1419  * flow->tid_entry contains the TID array which the sender can use for TID RDMA
1420  * sends and flow->npkts contains number of packets required to send the
1421  * segment.
1422  *
1423  * hfi1_check_sge_align should be called prior to calling this function and if
1424  * it signals error TID RDMA cannot be used for this sge and this function
1425  * should not be called.
1426  *
1427  * For the queuing, caller must hold the flow->req->qp s_lock from the send
1428  * engine and the function will procure the exp_lock.
1429  *
1430  * Return:
1431  * The function returns -EAGAIN if sufficient number of TID/flow resources to
1432  * map the segment could not be allocated. In this case the function should be
1433  * called again with previous arguments to retry the TID allocation. There are
1434  * no other error returns. The function returns 0 on success.
1435  */
1436 int hfi1_kern_exp_rcv_setup(struct tid_rdma_request *req,
1437                             struct rvt_sge_state *ss, bool *last)
1438         __must_hold(&req->qp->s_lock)
1439 {
1440         struct tid_rdma_flow *flow = &req->flows[req->setup_head];
1441         struct hfi1_ctxtdata *rcd = req->rcd;
1442         struct hfi1_qp_priv *qpriv = req->qp->priv;
1443         unsigned long flags;
1444         struct rvt_qp *fqp;
1445         u16 clear_tail = req->clear_tail;
1446
1447         lockdep_assert_held(&req->qp->s_lock);
1448         /*
1449          * We return error if either (a) we don't have space in the flow
1450          * circular buffer, or (b) we already have max entries in the buffer.
1451          * Max entries depend on the type of request we are processing and the
1452          * negotiated TID RDMA parameters.
1453          */
1454         if (!CIRC_SPACE(req->setup_head, clear_tail, MAX_FLOWS) ||
1455             CIRC_CNT(req->setup_head, clear_tail, MAX_FLOWS) >=
1456             req->n_flows)
1457                 return -EINVAL;
1458
1459         /*
1460          * Get pages, identify contiguous physical memory chunks for the segment
1461          * If we can not determine a DMA address mapping we will treat it just
1462          * like if we ran out of space above.
1463          */
1464         if (kern_get_phys_blocks(flow, qpriv->pages, ss, last)) {
1465                 hfi1_wait_kmem(flow->req->qp);
1466                 return -ENOMEM;
1467         }
1468
1469         spin_lock_irqsave(&rcd->exp_lock, flags);
1470         if (kernel_tid_waiters(rcd, &rcd->rarr_queue, flow->req->qp))
1471                 goto queue;
1472
1473         /*
1474          * At this point we know the number of pagesets and hence the number of
1475          * TID's to map the segment. Allocate the TID's from the TID groups. If
1476          * we cannot allocate the required number we exit and try again later
1477          */
1478         if (kern_alloc_tids(flow))
1479                 goto queue;
1480         /*
1481          * Finally program the TID entries with the pagesets, compute the
1482          * tidarray and enable the HW flow
1483          */
1484         kern_program_rcvarray(flow);
1485
1486         /*
1487          * Setup the flow state with relevant information.
1488          * This information is used for tracking the sequence of data packets
1489          * for the segment.
1490          * The flow is setup here as this is the most accurate time and place
1491          * to do so. Doing at a later time runs the risk of the flow data in
1492          * qpriv getting out of sync.
1493          */
1494         memset(&flow->flow_state, 0x0, sizeof(flow->flow_state));
1495         flow->idx = qpriv->flow_state.index;
1496         flow->flow_state.generation = qpriv->flow_state.generation;
1497         flow->flow_state.spsn = qpriv->flow_state.psn;
1498         flow->flow_state.lpsn = flow->flow_state.spsn + flow->npkts - 1;
1499         flow->flow_state.r_next_psn =
1500                 full_flow_psn(flow, flow->flow_state.spsn);
1501         qpriv->flow_state.psn += flow->npkts;
1502
1503         dequeue_tid_waiter(rcd, &rcd->rarr_queue, flow->req->qp);
1504         /* get head before dropping lock */
1505         fqp = first_qp(rcd, &rcd->rarr_queue);
1506         spin_unlock_irqrestore(&rcd->exp_lock, flags);
1507         tid_rdma_schedule_tid_wakeup(fqp);
1508
1509         req->setup_head = (req->setup_head + 1) & (MAX_FLOWS - 1);
1510         return 0;
1511 queue:
1512         queue_qp_for_tid_wait(rcd, &rcd->rarr_queue, flow->req->qp);
1513         spin_unlock_irqrestore(&rcd->exp_lock, flags);
1514         return -EAGAIN;
1515 }
1516
1517 static void hfi1_tid_rdma_reset_flow(struct tid_rdma_flow *flow)
1518 {
1519         flow->npagesets = 0;
1520 }
1521
1522 /*
1523  * This function is called after one segment has been successfully sent to
1524  * release the flow and TID HW/SW resources for that segment. The segments for a
1525  * TID RDMA request are setup and cleared in FIFO order which is managed using a
1526  * circular buffer.
1527  */
1528 int hfi1_kern_exp_rcv_clear(struct tid_rdma_request *req)
1529         __must_hold(&req->qp->s_lock)
1530 {
1531         struct tid_rdma_flow *flow = &req->flows[req->clear_tail];
1532         struct hfi1_ctxtdata *rcd = req->rcd;
1533         unsigned long flags;
1534         int i;
1535         struct rvt_qp *fqp;
1536
1537         lockdep_assert_held(&req->qp->s_lock);
1538         /* Exit if we have nothing in the flow circular buffer */
1539         if (!CIRC_CNT(req->setup_head, req->clear_tail, MAX_FLOWS))
1540                 return -EINVAL;
1541
1542         spin_lock_irqsave(&rcd->exp_lock, flags);
1543
1544         for (i = 0; i < flow->tnode_cnt; i++)
1545                 kern_unprogram_rcv_group(flow, i);
1546         /* To prevent double unprogramming */
1547         flow->tnode_cnt = 0;
1548         /* get head before dropping lock */
1549         fqp = first_qp(rcd, &rcd->rarr_queue);
1550         spin_unlock_irqrestore(&rcd->exp_lock, flags);
1551
1552         dma_unmap_flow(flow);
1553
1554         hfi1_tid_rdma_reset_flow(flow);
1555         req->clear_tail = (req->clear_tail + 1) & (MAX_FLOWS - 1);
1556
1557         if (fqp == req->qp) {
1558                 __trigger_tid_waiter(fqp);
1559                 rvt_put_qp(fqp);
1560         } else {
1561                 tid_rdma_schedule_tid_wakeup(fqp);
1562         }
1563
1564         return 0;
1565 }
1566
1567 /*
1568  * This function is called to release all the tid entries for
1569  * a request.
1570  */
1571 void hfi1_kern_exp_rcv_clear_all(struct tid_rdma_request *req)
1572         __must_hold(&req->qp->s_lock)
1573 {
1574         /* Use memory barrier for proper ordering */
1575         while (CIRC_CNT(req->setup_head, req->clear_tail, MAX_FLOWS)) {
1576                 if (hfi1_kern_exp_rcv_clear(req))
1577                         break;
1578         }
1579 }
1580
1581 /**
1582  * hfi1_kern_exp_rcv_free_flows - free priviously allocated flow information
1583  * @req - the tid rdma request to be cleaned
1584  */
1585 static void hfi1_kern_exp_rcv_free_flows(struct tid_rdma_request *req)
1586 {
1587         kfree(req->flows);
1588         req->flows = NULL;
1589 }
1590
1591 /**
1592  * __trdma_clean_swqe - clean up for large sized QPs
1593  * @qp: the queue patch
1594  * @wqe: the send wqe
1595  */
1596 void __trdma_clean_swqe(struct rvt_qp *qp, struct rvt_swqe *wqe)
1597 {
1598         struct hfi1_swqe_priv *p = wqe->priv;
1599
1600         hfi1_kern_exp_rcv_free_flows(&p->tid_req);
1601 }
1602
1603 /*
1604  * This can be called at QP create time or in the data path.
1605  */
1606 static int hfi1_kern_exp_rcv_alloc_flows(struct tid_rdma_request *req,
1607                                          gfp_t gfp)
1608 {
1609         struct tid_rdma_flow *flows;
1610         int i;
1611
1612         if (likely(req->flows))
1613                 return 0;
1614         flows = kmalloc_node(MAX_FLOWS * sizeof(*flows), gfp,
1615                              req->rcd->numa_id);
1616         if (!flows)
1617                 return -ENOMEM;
1618         /* mini init */
1619         for (i = 0; i < MAX_FLOWS; i++) {
1620                 flows[i].req = req;
1621                 flows[i].npagesets = 0;
1622                 flows[i].pagesets[0].mapped =  0;
1623         }
1624         req->flows = flows;
1625         return 0;
1626 }
1627
1628 static void hfi1_init_trdma_req(struct rvt_qp *qp,
1629                                 struct tid_rdma_request *req)
1630 {
1631         struct hfi1_qp_priv *qpriv = qp->priv;
1632
1633         /*
1634          * Initialize various TID RDMA request variables.
1635          * These variables are "static", which is why they
1636          * can be pre-initialized here before the WRs has
1637          * even been submitted.
1638          * However, non-NULL values for these variables do not
1639          * imply that this WQE has been enabled for TID RDMA.
1640          * Drivers should check the WQE's opcode to determine
1641          * if a request is a TID RDMA one or not.
1642          */
1643         req->qp = qp;
1644         req->rcd = qpriv->rcd;
1645 }
1646
1647 u64 hfi1_access_sw_tid_wait(const struct cntr_entry *entry,
1648                             void *context, int vl, int mode, u64 data)
1649 {
1650         struct hfi1_devdata *dd = context;
1651
1652         return dd->verbs_dev.n_tidwait;
1653 }
1654
1655 static struct tid_rdma_flow *find_flow_ib(struct tid_rdma_request *req,
1656                                           u32 psn, u16 *fidx)
1657 {
1658         u16 head, tail;
1659         struct tid_rdma_flow *flow;
1660
1661         head = req->setup_head;
1662         tail = req->clear_tail;
1663         for ( ; CIRC_CNT(head, tail, MAX_FLOWS);
1664              tail = CIRC_NEXT(tail, MAX_FLOWS)) {
1665                 flow = &req->flows[tail];
1666                 if (cmp_psn(psn, flow->flow_state.ib_spsn) >= 0 &&
1667                     cmp_psn(psn, flow->flow_state.ib_lpsn) <= 0) {
1668                         if (fidx)
1669                                 *fidx = tail;
1670                         return flow;
1671                 }
1672         }
1673         return NULL;
1674 }
1675
1676 static struct tid_rdma_flow *
1677 __find_flow_ranged(struct tid_rdma_request *req, u16 head, u16 tail,
1678                    u32 psn, u16 *fidx)
1679 {
1680         for ( ; CIRC_CNT(head, tail, MAX_FLOWS);
1681               tail = CIRC_NEXT(tail, MAX_FLOWS)) {
1682                 struct tid_rdma_flow *flow = &req->flows[tail];
1683                 u32 spsn, lpsn;
1684
1685                 spsn = full_flow_psn(flow, flow->flow_state.spsn);
1686                 lpsn = full_flow_psn(flow, flow->flow_state.lpsn);
1687
1688                 if (cmp_psn(psn, spsn) >= 0 && cmp_psn(psn, lpsn) <= 0) {
1689                         if (fidx)
1690                                 *fidx = tail;
1691                         return flow;
1692                 }
1693         }
1694         return NULL;
1695 }
1696
1697 static struct tid_rdma_flow *find_flow(struct tid_rdma_request *req,
1698                                        u32 psn, u16 *fidx)
1699 {
1700         return __find_flow_ranged(req, req->setup_head, req->clear_tail, psn,
1701                                   fidx);
1702 }
1703
1704 /* TID RDMA READ functions */
1705 u32 hfi1_build_tid_rdma_read_packet(struct rvt_swqe *wqe,
1706                                     struct ib_other_headers *ohdr, u32 *bth1,
1707                                     u32 *bth2, u32 *len)
1708 {
1709         struct tid_rdma_request *req = wqe_to_tid_req(wqe);
1710         struct tid_rdma_flow *flow = &req->flows[req->flow_idx];
1711         struct rvt_qp *qp = req->qp;
1712         struct hfi1_qp_priv *qpriv = qp->priv;
1713         struct hfi1_swqe_priv *wpriv = wqe->priv;
1714         struct tid_rdma_read_req *rreq = &ohdr->u.tid_rdma.r_req;
1715         struct tid_rdma_params *remote;
1716         u32 req_len = 0;
1717         void *req_addr = NULL;
1718
1719         /* This is the IB psn used to send the request */
1720         *bth2 = mask_psn(flow->flow_state.ib_spsn + flow->pkt);
1721         trace_hfi1_tid_flow_build_read_pkt(qp, req->flow_idx, flow);
1722
1723         /* TID Entries for TID RDMA READ payload */
1724         req_addr = &flow->tid_entry[flow->tid_idx];
1725         req_len = sizeof(*flow->tid_entry) *
1726                         (flow->tidcnt - flow->tid_idx);
1727
1728         memset(&ohdr->u.tid_rdma.r_req, 0, sizeof(ohdr->u.tid_rdma.r_req));
1729         wpriv->ss.sge.vaddr = req_addr;
1730         wpriv->ss.sge.sge_length = req_len;
1731         wpriv->ss.sge.length = wpriv->ss.sge.sge_length;
1732         /*
1733          * We can safely zero these out. Since the first SGE covers the
1734          * entire packet, nothing else should even look at the MR.
1735          */
1736         wpriv->ss.sge.mr = NULL;
1737         wpriv->ss.sge.m = 0;
1738         wpriv->ss.sge.n = 0;
1739
1740         wpriv->ss.sg_list = NULL;
1741         wpriv->ss.total_len = wpriv->ss.sge.sge_length;
1742         wpriv->ss.num_sge = 1;
1743
1744         /* Construct the TID RDMA READ REQ packet header */
1745         rcu_read_lock();
1746         remote = rcu_dereference(qpriv->tid_rdma.remote);
1747
1748         KDETH_RESET(rreq->kdeth0, KVER, 0x1);
1749         KDETH_RESET(rreq->kdeth1, JKEY, remote->jkey);
1750         rreq->reth.vaddr = cpu_to_be64(wqe->rdma_wr.remote_addr +
1751                            req->cur_seg * req->seg_len + flow->sent);
1752         rreq->reth.rkey = cpu_to_be32(wqe->rdma_wr.rkey);
1753         rreq->reth.length = cpu_to_be32(*len);
1754         rreq->tid_flow_psn =
1755                 cpu_to_be32((flow->flow_state.generation <<
1756                              HFI1_KDETH_BTH_SEQ_SHIFT) |
1757                             ((flow->flow_state.spsn + flow->pkt) &
1758                              HFI1_KDETH_BTH_SEQ_MASK));
1759         rreq->tid_flow_qp =
1760                 cpu_to_be32(qpriv->tid_rdma.local.qp |
1761                             ((flow->idx & TID_RDMA_DESTQP_FLOW_MASK) <<
1762                              TID_RDMA_DESTQP_FLOW_SHIFT) |
1763                             qpriv->rcd->ctxt);
1764         rreq->verbs_qp = cpu_to_be32(qp->remote_qpn);
1765         *bth1 &= ~RVT_QPN_MASK;
1766         *bth1 |= remote->qp;
1767         *bth2 |= IB_BTH_REQ_ACK;
1768         rcu_read_unlock();
1769
1770         /* We are done with this segment */
1771         flow->sent += *len;
1772         req->cur_seg++;
1773         qp->s_state = TID_OP(READ_REQ);
1774         req->ack_pending++;
1775         req->flow_idx = (req->flow_idx + 1) & (MAX_FLOWS - 1);
1776         qpriv->pending_tid_r_segs++;
1777         qp->s_num_rd_atomic++;
1778
1779         /* Set the TID RDMA READ request payload size */
1780         *len = req_len;
1781
1782         return sizeof(ohdr->u.tid_rdma.r_req) / sizeof(u32);
1783 }
1784
1785 /*
1786  * @len: contains the data length to read upon entry and the read request
1787  *       payload length upon exit.
1788  */
1789 u32 hfi1_build_tid_rdma_read_req(struct rvt_qp *qp, struct rvt_swqe *wqe,
1790                                  struct ib_other_headers *ohdr, u32 *bth1,
1791                                  u32 *bth2, u32 *len)
1792         __must_hold(&qp->s_lock)
1793 {
1794         struct hfi1_qp_priv *qpriv = qp->priv;
1795         struct tid_rdma_request *req = wqe_to_tid_req(wqe);
1796         struct tid_rdma_flow *flow = NULL;
1797         u32 hdwords = 0;
1798         bool last;
1799         bool retry = true;
1800         u32 npkts = rvt_div_round_up_mtu(qp, *len);
1801
1802         trace_hfi1_tid_req_build_read_req(qp, 0, wqe->wr.opcode, wqe->psn,
1803                                           wqe->lpsn, req);
1804         /*
1805          * Check sync conditions. Make sure that there are no pending
1806          * segments before freeing the flow.
1807          */
1808 sync_check:
1809         if (req->state == TID_REQUEST_SYNC) {
1810                 if (qpriv->pending_tid_r_segs)
1811                         goto done;
1812
1813                 hfi1_kern_clear_hw_flow(req->rcd, qp);
1814                 qpriv->s_flags &= ~HFI1_R_TID_SW_PSN;
1815                 req->state = TID_REQUEST_ACTIVE;
1816         }
1817
1818         /*
1819          * If the request for this segment is resent, the tid resources should
1820          * have been allocated before. In this case, req->flow_idx should
1821          * fall behind req->setup_head.
1822          */
1823         if (req->flow_idx == req->setup_head) {
1824                 retry = false;
1825                 if (req->state == TID_REQUEST_RESEND) {
1826                         /*
1827                          * This is the first new segment for a request whose
1828                          * earlier segments have been re-sent. We need to
1829                          * set up the sge pointer correctly.
1830                          */
1831                         restart_sge(&qp->s_sge, wqe, req->s_next_psn,
1832                                     qp->pmtu);
1833                         req->isge = 0;
1834                         req->state = TID_REQUEST_ACTIVE;
1835                 }
1836
1837                 /*
1838                  * Check sync. The last PSN of each generation is reserved for
1839                  * RESYNC.
1840                  */
1841                 if ((qpriv->flow_state.psn + npkts) > MAX_TID_FLOW_PSN - 1) {
1842                         req->state = TID_REQUEST_SYNC;
1843                         goto sync_check;
1844                 }
1845
1846                 /* Allocate the flow if not yet */
1847                 if (hfi1_kern_setup_hw_flow(qpriv->rcd, qp))
1848                         goto done;
1849
1850                 /*
1851                  * The following call will advance req->setup_head after
1852                  * allocating the tid entries.
1853                  */
1854                 if (hfi1_kern_exp_rcv_setup(req, &qp->s_sge, &last)) {
1855                         req->state = TID_REQUEST_QUEUED;
1856
1857                         /*
1858                          * We don't have resources for this segment. The QP has
1859                          * already been queued.
1860                          */
1861                         goto done;
1862                 }
1863         }
1864
1865         /* req->flow_idx should only be one slot behind req->setup_head */
1866         flow = &req->flows[req->flow_idx];
1867         flow->pkt = 0;
1868         flow->tid_idx = 0;
1869         flow->sent = 0;
1870         if (!retry) {
1871                 /* Set the first and last IB PSN for the flow in use.*/
1872                 flow->flow_state.ib_spsn = req->s_next_psn;
1873                 flow->flow_state.ib_lpsn =
1874                         flow->flow_state.ib_spsn + flow->npkts - 1;
1875         }
1876
1877         /* Calculate the next segment start psn.*/
1878         req->s_next_psn += flow->npkts;
1879
1880         /* Build the packet header */
1881         hdwords = hfi1_build_tid_rdma_read_packet(wqe, ohdr, bth1, bth2, len);
1882 done:
1883         return hdwords;
1884 }
1885
1886 /*
1887  * Validate and accept the TID RDMA READ request parameters.
1888  * Return 0 if the request is accepted successfully;
1889  * Return 1 otherwise.
1890  */
1891 static int tid_rdma_rcv_read_request(struct rvt_qp *qp,
1892                                      struct rvt_ack_entry *e,
1893                                      struct hfi1_packet *packet,
1894                                      struct ib_other_headers *ohdr,
1895                                      u32 bth0, u32 psn, u64 vaddr, u32 len)
1896 {
1897         struct hfi1_qp_priv *qpriv = qp->priv;
1898         struct tid_rdma_request *req;
1899         struct tid_rdma_flow *flow;
1900         u32 flow_psn, i, tidlen = 0, pktlen, tlen;
1901
1902         req = ack_to_tid_req(e);
1903
1904         /* Validate the payload first */
1905         flow = &req->flows[req->setup_head];
1906
1907         /* payload length = packet length - (header length + ICRC length) */
1908         pktlen = packet->tlen - (packet->hlen + 4);
1909         if (pktlen > sizeof(flow->tid_entry))
1910                 return 1;
1911         memcpy(flow->tid_entry, packet->ebuf, pktlen);
1912         flow->tidcnt = pktlen / sizeof(*flow->tid_entry);
1913
1914         /*
1915          * Walk the TID_ENTRY list to make sure we have enough space for a
1916          * complete segment. Also calculate the number of required packets.
1917          */
1918         flow->npkts = rvt_div_round_up_mtu(qp, len);
1919         for (i = 0; i < flow->tidcnt; i++) {
1920                 trace_hfi1_tid_entry_rcv_read_req(qp, i,
1921                                                   flow->tid_entry[i]);
1922                 tlen = EXP_TID_GET(flow->tid_entry[i], LEN);
1923                 if (!tlen)
1924                         return 1;
1925
1926                 /*
1927                  * For tid pair (tidctr == 3), the buffer size of the pair
1928                  * should be the sum of the buffer size described by each
1929                  * tid entry. However, only the first entry needs to be
1930                  * specified in the request (see WFR HAS Section 8.5.7.1).
1931                  */
1932                 tidlen += tlen;
1933         }
1934         if (tidlen * PAGE_SIZE < len)
1935                 return 1;
1936
1937         /* Empty the flow array */
1938         req->clear_tail = req->setup_head;
1939         flow->pkt = 0;
1940         flow->tid_idx = 0;
1941         flow->tid_offset = 0;
1942         flow->sent = 0;
1943         flow->tid_qpn = be32_to_cpu(ohdr->u.tid_rdma.r_req.tid_flow_qp);
1944         flow->idx = (flow->tid_qpn >> TID_RDMA_DESTQP_FLOW_SHIFT) &
1945                     TID_RDMA_DESTQP_FLOW_MASK;
1946         flow_psn = mask_psn(be32_to_cpu(ohdr->u.tid_rdma.r_req.tid_flow_psn));
1947         flow->flow_state.generation = flow_psn >> HFI1_KDETH_BTH_SEQ_SHIFT;
1948         flow->flow_state.spsn = flow_psn & HFI1_KDETH_BTH_SEQ_MASK;
1949         flow->length = len;
1950
1951         flow->flow_state.lpsn = flow->flow_state.spsn +
1952                 flow->npkts - 1;
1953         flow->flow_state.ib_spsn = psn;
1954         flow->flow_state.ib_lpsn = flow->flow_state.ib_spsn + flow->npkts - 1;
1955
1956         trace_hfi1_tid_flow_rcv_read_req(qp, req->setup_head, flow);
1957         /* Set the initial flow index to the current flow. */
1958         req->flow_idx = req->setup_head;
1959
1960         /* advance circular buffer head */
1961         req->setup_head = (req->setup_head + 1) & (MAX_FLOWS - 1);
1962
1963         /*
1964          * Compute last PSN for request.
1965          */
1966         e->opcode = (bth0 >> 24) & 0xff;
1967         e->psn = psn;
1968         e->lpsn = psn + flow->npkts - 1;
1969         e->sent = 0;
1970
1971         req->n_flows = qpriv->tid_rdma.local.max_read;
1972         req->state = TID_REQUEST_ACTIVE;
1973         req->cur_seg = 0;
1974         req->comp_seg = 0;
1975         req->ack_seg = 0;
1976         req->isge = 0;
1977         req->seg_len = qpriv->tid_rdma.local.max_len;
1978         req->total_len = len;
1979         req->total_segs = 1;
1980         req->r_flow_psn = e->psn;
1981
1982         trace_hfi1_tid_req_rcv_read_req(qp, 0, e->opcode, e->psn, e->lpsn,
1983                                         req);
1984         return 0;
1985 }
1986
1987 static int tid_rdma_rcv_error(struct hfi1_packet *packet,
1988                               struct ib_other_headers *ohdr,
1989                               struct rvt_qp *qp, u32 psn, int diff)
1990 {
1991         struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
1992         struct hfi1_ctxtdata *rcd = ((struct hfi1_qp_priv *)qp->priv)->rcd;
1993         struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
1994         struct hfi1_qp_priv *qpriv = qp->priv;
1995         struct rvt_ack_entry *e;
1996         struct tid_rdma_request *req;
1997         unsigned long flags;
1998         u8 prev;
1999         bool old_req;
2000
2001         trace_hfi1_rsp_tid_rcv_error(qp, psn);
2002         trace_hfi1_tid_rdma_rcv_err(qp, 0, psn, diff);
2003         if (diff > 0) {
2004                 /* sequence error */
2005                 if (!qp->r_nak_state) {
2006                         ibp->rvp.n_rc_seqnak++;
2007                         qp->r_nak_state = IB_NAK_PSN_ERROR;
2008                         qp->r_ack_psn = qp->r_psn;
2009                         rc_defered_ack(rcd, qp);
2010                 }
2011                 goto done;
2012         }
2013
2014         ibp->rvp.n_rc_dupreq++;
2015
2016         spin_lock_irqsave(&qp->s_lock, flags);
2017         e = find_prev_entry(qp, psn, &prev, NULL, &old_req);
2018         if (!e || (e->opcode != TID_OP(READ_REQ) &&
2019                    e->opcode != TID_OP(WRITE_REQ)))
2020                 goto unlock;
2021
2022         req = ack_to_tid_req(e);
2023         req->r_flow_psn = psn;
2024         trace_hfi1_tid_req_rcv_err(qp, 0, e->opcode, e->psn, e->lpsn, req);
2025         if (e->opcode == TID_OP(READ_REQ)) {
2026                 struct ib_reth *reth;
2027                 u32 len;
2028                 u32 rkey;
2029                 u64 vaddr;
2030                 int ok;
2031                 u32 bth0;
2032
2033                 reth = &ohdr->u.tid_rdma.r_req.reth;
2034                 /*
2035                  * The requester always restarts from the start of the original
2036                  * request.
2037                  */
2038                 len = be32_to_cpu(reth->length);
2039                 if (psn != e->psn || len != req->total_len)
2040                         goto unlock;
2041
2042                 release_rdma_sge_mr(e);
2043
2044                 rkey = be32_to_cpu(reth->rkey);
2045                 vaddr = get_ib_reth_vaddr(reth);
2046
2047                 qp->r_len = len;
2048                 ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey,
2049                                  IB_ACCESS_REMOTE_READ);
2050                 if (unlikely(!ok))
2051                         goto unlock;
2052
2053                 /*
2054                  * If all the response packets for the current request have
2055                  * been sent out and this request is complete (old_request
2056                  * == false) and the TID flow may be unusable (the
2057                  * req->clear_tail is advanced). However, when an earlier
2058                  * request is received, this request will not be complete any
2059                  * more (qp->s_tail_ack_queue is moved back, see below).
2060                  * Consequently, we need to update the TID flow info everytime
2061                  * a duplicate request is received.
2062                  */
2063                 bth0 = be32_to_cpu(ohdr->bth[0]);
2064                 if (tid_rdma_rcv_read_request(qp, e, packet, ohdr, bth0, psn,
2065                                               vaddr, len))
2066                         goto unlock;
2067
2068                 /*
2069                  * True if the request is already scheduled (between
2070                  * qp->s_tail_ack_queue and qp->r_head_ack_queue);
2071                  */
2072                 if (old_req)
2073                         goto unlock;
2074         } else {
2075                 struct flow_state *fstate;
2076                 bool schedule = false;
2077                 u8 i;
2078
2079                 if (req->state == TID_REQUEST_RESEND) {
2080                         req->state = TID_REQUEST_RESEND_ACTIVE;
2081                 } else if (req->state == TID_REQUEST_INIT_RESEND) {
2082                         req->state = TID_REQUEST_INIT;
2083                         schedule = true;
2084                 }
2085
2086                 /*
2087                  * True if the request is already scheduled (between
2088                  * qp->s_tail_ack_queue and qp->r_head_ack_queue).
2089                  * Also, don't change requests, which are at the SYNC
2090                  * point and haven't generated any responses yet.
2091                  * There is nothing to retransmit for them yet.
2092                  */
2093                 if (old_req || req->state == TID_REQUEST_INIT ||
2094                     (req->state == TID_REQUEST_SYNC && !req->cur_seg)) {
2095                         for (i = prev + 1; ; i++) {
2096                                 if (i > rvt_size_atomic(&dev->rdi))
2097                                         i = 0;
2098                                 if (i == qp->r_head_ack_queue)
2099                                         break;
2100                                 e = &qp->s_ack_queue[i];
2101                                 req = ack_to_tid_req(e);
2102                                 if (e->opcode == TID_OP(WRITE_REQ) &&
2103                                     req->state == TID_REQUEST_INIT)
2104                                         req->state = TID_REQUEST_INIT_RESEND;
2105                         }
2106                         /*
2107                          * If the state of the request has been changed,
2108                          * the first leg needs to get scheduled in order to
2109                          * pick up the change. Otherwise, normal response
2110                          * processing should take care of it.
2111                          */
2112                         if (!schedule)
2113                                 goto unlock;
2114                 }
2115
2116                 /*
2117                  * If there is no more allocated segment, just schedule the qp
2118                  * without changing any state.
2119                  */
2120                 if (req->clear_tail == req->setup_head)
2121                         goto schedule;
2122                 /*
2123                  * If this request has sent responses for segments, which have
2124                  * not received data yet (flow_idx != clear_tail), the flow_idx
2125                  * pointer needs to be adjusted so the same responses can be
2126                  * re-sent.
2127                  */
2128                 if (CIRC_CNT(req->flow_idx, req->clear_tail, MAX_FLOWS)) {
2129                         fstate = &req->flows[req->clear_tail].flow_state;
2130                         qpriv->pending_tid_w_segs -=
2131                                 CIRC_CNT(req->flow_idx, req->clear_tail,
2132                                          MAX_FLOWS);
2133                         req->flow_idx =
2134                                 CIRC_ADD(req->clear_tail,
2135                                          delta_psn(psn, fstate->resp_ib_psn),
2136                                          MAX_FLOWS);
2137                         qpriv->pending_tid_w_segs +=
2138                                 delta_psn(psn, fstate->resp_ib_psn);
2139                         /*
2140                          * When flow_idx == setup_head, we've gotten a duplicate
2141                          * request for a segment, which has not been allocated
2142                          * yet. In that case, don't adjust this request.
2143                          * However, we still want to go through the loop below
2144                          * to adjust all subsequent requests.
2145                          */
2146                         if (CIRC_CNT(req->setup_head, req->flow_idx,
2147                                      MAX_FLOWS)) {
2148                                 req->cur_seg = delta_psn(psn, e->psn);
2149                                 req->state = TID_REQUEST_RESEND_ACTIVE;
2150                         }
2151                 }
2152
2153                 for (i = prev + 1; ; i++) {
2154                         /*
2155                          * Look at everything up to and including
2156                          * s_tail_ack_queue
2157                          */
2158                         if (i > rvt_size_atomic(&dev->rdi))
2159                                 i = 0;
2160                         if (i == qp->r_head_ack_queue)
2161                                 break;
2162                         e = &qp->s_ack_queue[i];
2163                         req = ack_to_tid_req(e);
2164                         trace_hfi1_tid_req_rcv_err(qp, 0, e->opcode, e->psn,
2165                                                    e->lpsn, req);
2166                         if (e->opcode != TID_OP(WRITE_REQ) ||
2167                             req->cur_seg == req->comp_seg ||
2168                             req->state == TID_REQUEST_INIT ||
2169                             req->state == TID_REQUEST_INIT_RESEND) {
2170                                 if (req->state == TID_REQUEST_INIT)
2171                                         req->state = TID_REQUEST_INIT_RESEND;
2172                                 continue;
2173                         }
2174                         qpriv->pending_tid_w_segs -=
2175                                 CIRC_CNT(req->flow_idx,
2176                                          req->clear_tail,
2177                                          MAX_FLOWS);
2178                         req->flow_idx = req->clear_tail;
2179                         req->state = TID_REQUEST_RESEND;
2180                         req->cur_seg = req->comp_seg;
2181                 }
2182                 qpriv->s_flags &= ~HFI1_R_TID_WAIT_INTERLCK;
2183         }
2184         /* Re-process old requests.*/
2185         if (qp->s_acked_ack_queue == qp->s_tail_ack_queue)
2186                 qp->s_acked_ack_queue = prev;
2187         qp->s_tail_ack_queue = prev;
2188         /*
2189          * Since the qp->s_tail_ack_queue is modified, the
2190          * qp->s_ack_state must be changed to re-initialize
2191          * qp->s_ack_rdma_sge; Otherwise, we will end up in
2192          * wrong memory region.
2193          */
2194         qp->s_ack_state = OP(ACKNOWLEDGE);
2195 schedule:
2196         /*
2197          * It's possible to receive a retry psn that is earlier than an RNRNAK
2198          * psn. In this case, the rnrnak state should be cleared.
2199          */
2200         if (qpriv->rnr_nak_state) {
2201                 qp->s_nak_state = 0;
2202                 qpriv->rnr_nak_state = TID_RNR_NAK_INIT;
2203                 qp->r_psn = e->lpsn + 1;
2204                 hfi1_tid_write_alloc_resources(qp, true);
2205         }
2206
2207         qp->r_state = e->opcode;
2208         qp->r_nak_state = 0;
2209         qp->s_flags |= RVT_S_RESP_PENDING;
2210         hfi1_schedule_send(qp);
2211 unlock:
2212         spin_unlock_irqrestore(&qp->s_lock, flags);
2213 done:
2214         return 1;
2215 }
2216
2217 void hfi1_rc_rcv_tid_rdma_read_req(struct hfi1_packet *packet)
2218 {
2219         /* HANDLER FOR TID RDMA READ REQUEST packet (Responder side)*/
2220
2221         /*
2222          * 1. Verify TID RDMA READ REQ as per IB_OPCODE_RC_RDMA_READ
2223          *    (see hfi1_rc_rcv())
2224          * 2. Put TID RDMA READ REQ into the response queueu (s_ack_queue)
2225          *     - Setup struct tid_rdma_req with request info
2226          *     - Initialize struct tid_rdma_flow info;
2227          *     - Copy TID entries;
2228          * 3. Set the qp->s_ack_state.
2229          * 4. Set RVT_S_RESP_PENDING in s_flags.
2230          * 5. Kick the send engine (hfi1_schedule_send())
2231          */
2232         struct hfi1_ctxtdata *rcd = packet->rcd;
2233         struct rvt_qp *qp = packet->qp;
2234         struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
2235         struct ib_other_headers *ohdr = packet->ohdr;
2236         struct rvt_ack_entry *e;
2237         unsigned long flags;
2238         struct ib_reth *reth;
2239         struct hfi1_qp_priv *qpriv = qp->priv;
2240         u32 bth0, psn, len, rkey;
2241         bool fecn;
2242         u8 next;
2243         u64 vaddr;
2244         int diff;
2245         u8 nack_state = IB_NAK_INVALID_REQUEST;
2246
2247         bth0 = be32_to_cpu(ohdr->bth[0]);
2248         if (hfi1_ruc_check_hdr(ibp, packet))
2249                 return;
2250
2251         fecn = process_ecn(qp, packet);
2252         psn = mask_psn(be32_to_cpu(ohdr->bth[2]));
2253         trace_hfi1_rsp_rcv_tid_read_req(qp, psn);
2254
2255         if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST))
2256                 rvt_comm_est(qp);
2257
2258         if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
2259                 goto nack_inv;
2260
2261         reth = &ohdr->u.tid_rdma.r_req.reth;
2262         vaddr = be64_to_cpu(reth->vaddr);
2263         len = be32_to_cpu(reth->length);
2264         /* The length needs to be in multiples of PAGE_SIZE */
2265         if (!len || len & ~PAGE_MASK || len > qpriv->tid_rdma.local.max_len)
2266                 goto nack_inv;
2267
2268         diff = delta_psn(psn, qp->r_psn);
2269         if (unlikely(diff)) {
2270                 tid_rdma_rcv_err(packet, ohdr, qp, psn, diff, fecn);
2271                 return;
2272         }
2273
2274         /* We've verified the request, insert it into the ack queue. */
2275         next = qp->r_head_ack_queue + 1;
2276         if (next > rvt_size_atomic(ib_to_rvt(qp->ibqp.device)))
2277                 next = 0;
2278         spin_lock_irqsave(&qp->s_lock, flags);
2279         if (unlikely(next == qp->s_tail_ack_queue)) {
2280                 if (!qp->s_ack_queue[next].sent) {
2281                         nack_state = IB_NAK_REMOTE_OPERATIONAL_ERROR;
2282                         goto nack_inv_unlock;
2283                 }
2284                 update_ack_queue(qp, next);
2285         }
2286         e = &qp->s_ack_queue[qp->r_head_ack_queue];
2287         release_rdma_sge_mr(e);
2288
2289         rkey = be32_to_cpu(reth->rkey);
2290         qp->r_len = len;
2291
2292         if (unlikely(!rvt_rkey_ok(qp, &e->rdma_sge, qp->r_len, vaddr,
2293                                   rkey, IB_ACCESS_REMOTE_READ)))
2294                 goto nack_acc;
2295
2296         /* Accept the request parameters */
2297         if (tid_rdma_rcv_read_request(qp, e, packet, ohdr, bth0, psn, vaddr,
2298                                       len))
2299                 goto nack_inv_unlock;
2300
2301         qp->r_state = e->opcode;
2302         qp->r_nak_state = 0;
2303         /*
2304          * We need to increment the MSN here instead of when we
2305          * finish sending the result since a duplicate request would
2306          * increment it more than once.
2307          */
2308         qp->r_msn++;
2309         qp->r_psn += e->lpsn - e->psn + 1;
2310
2311         qp->r_head_ack_queue = next;
2312
2313         /*
2314          * For all requests other than TID WRITE which are added to the ack
2315          * queue, qpriv->r_tid_alloc follows qp->r_head_ack_queue. It is ok to
2316          * do this because of interlocks between these and TID WRITE
2317          * requests. The same change has also been made in hfi1_rc_rcv().
2318          */
2319         qpriv->r_tid_alloc = qp->r_head_ack_queue;
2320
2321         /* Schedule the send tasklet. */
2322         qp->s_flags |= RVT_S_RESP_PENDING;
2323         if (fecn)
2324                 qp->s_flags |= RVT_S_ECN;
2325         hfi1_schedule_send(qp);
2326
2327         spin_unlock_irqrestore(&qp->s_lock, flags);
2328         return;
2329
2330 nack_inv_unlock:
2331         spin_unlock_irqrestore(&qp->s_lock, flags);
2332 nack_inv:
2333         rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
2334         qp->r_nak_state = nack_state;
2335         qp->r_ack_psn = qp->r_psn;
2336         /* Queue NAK for later */
2337         rc_defered_ack(rcd, qp);
2338         return;
2339 nack_acc:
2340         spin_unlock_irqrestore(&qp->s_lock, flags);
2341         rvt_rc_error(qp, IB_WC_LOC_PROT_ERR);
2342         qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
2343         qp->r_ack_psn = qp->r_psn;
2344 }
2345
2346 u32 hfi1_build_tid_rdma_read_resp(struct rvt_qp *qp, struct rvt_ack_entry *e,
2347                                   struct ib_other_headers *ohdr, u32 *bth0,
2348                                   u32 *bth1, u32 *bth2, u32 *len, bool *last)
2349 {
2350         struct hfi1_ack_priv *epriv = e->priv;
2351         struct tid_rdma_request *req = &epriv->tid_req;
2352         struct hfi1_qp_priv *qpriv = qp->priv;
2353         struct tid_rdma_flow *flow = &req->flows[req->clear_tail];
2354         u32 tidentry = flow->tid_entry[flow->tid_idx];
2355         u32 tidlen = EXP_TID_GET(tidentry, LEN) << PAGE_SHIFT;
2356         struct tid_rdma_read_resp *resp = &ohdr->u.tid_rdma.r_rsp;
2357         u32 next_offset, om = KDETH_OM_LARGE;
2358         bool last_pkt;
2359         u32 hdwords = 0;
2360         struct tid_rdma_params *remote;
2361
2362         *len = min_t(u32, qp->pmtu, tidlen - flow->tid_offset);
2363         flow->sent += *len;
2364         next_offset = flow->tid_offset + *len;
2365         last_pkt = (flow->sent >= flow->length);
2366
2367         trace_hfi1_tid_entry_build_read_resp(qp, flow->tid_idx, tidentry);
2368         trace_hfi1_tid_flow_build_read_resp(qp, req->clear_tail, flow);
2369
2370         rcu_read_lock();
2371         remote = rcu_dereference(qpriv->tid_rdma.remote);
2372         if (!remote) {
2373                 rcu_read_unlock();
2374                 goto done;
2375         }
2376         KDETH_RESET(resp->kdeth0, KVER, 0x1);
2377         KDETH_SET(resp->kdeth0, SH, !last_pkt);
2378         KDETH_SET(resp->kdeth0, INTR, !!(!last_pkt && remote->urg));
2379         KDETH_SET(resp->kdeth0, TIDCTRL, EXP_TID_GET(tidentry, CTRL));
2380         KDETH_SET(resp->kdeth0, TID, EXP_TID_GET(tidentry, IDX));
2381         KDETH_SET(resp->kdeth0, OM, om == KDETH_OM_LARGE);
2382         KDETH_SET(resp->kdeth0, OFFSET, flow->tid_offset / om);
2383         KDETH_RESET(resp->kdeth1, JKEY, remote->jkey);
2384         resp->verbs_qp = cpu_to_be32(qp->remote_qpn);
2385         rcu_read_unlock();
2386
2387         resp->aeth = rvt_compute_aeth(qp);
2388         resp->verbs_psn = cpu_to_be32(mask_psn(flow->flow_state.ib_spsn +
2389                                                flow->pkt));
2390
2391         *bth0 = TID_OP(READ_RESP) << 24;
2392         *bth1 = flow->tid_qpn;
2393         *bth2 = mask_psn(((flow->flow_state.spsn + flow->pkt++) &
2394                           HFI1_KDETH_BTH_SEQ_MASK) |
2395                          (flow->flow_state.generation <<
2396                           HFI1_KDETH_BTH_SEQ_SHIFT));
2397         *last = last_pkt;
2398         if (last_pkt)
2399                 /* Advance to next flow */
2400                 req->clear_tail = (req->clear_tail + 1) &
2401                                   (MAX_FLOWS - 1);
2402
2403         if (next_offset >= tidlen) {
2404                 flow->tid_offset = 0;
2405                 flow->tid_idx++;
2406         } else {
2407                 flow->tid_offset = next_offset;
2408         }
2409
2410         hdwords = sizeof(ohdr->u.tid_rdma.r_rsp) / sizeof(u32);
2411
2412 done:
2413         return hdwords;
2414 }
2415
2416 static inline struct tid_rdma_request *
2417 find_tid_request(struct rvt_qp *qp, u32 psn, enum ib_wr_opcode opcode)
2418         __must_hold(&qp->s_lock)
2419 {
2420         struct rvt_swqe *wqe;
2421         struct tid_rdma_request *req = NULL;
2422         u32 i, end;
2423
2424         end = qp->s_cur + 1;
2425         if (end == qp->s_size)
2426                 end = 0;
2427         for (i = qp->s_acked; i != end;) {
2428                 wqe = rvt_get_swqe_ptr(qp, i);
2429                 if (cmp_psn(psn, wqe->psn) >= 0 &&
2430                     cmp_psn(psn, wqe->lpsn) <= 0) {
2431                         if (wqe->wr.opcode == opcode)
2432                                 req = wqe_to_tid_req(wqe);
2433                         break;
2434                 }
2435                 if (++i == qp->s_size)
2436                         i = 0;
2437         }
2438
2439         return req;
2440 }
2441
2442 void hfi1_rc_rcv_tid_rdma_read_resp(struct hfi1_packet *packet)
2443 {
2444         /* HANDLER FOR TID RDMA READ RESPONSE packet (Requestor side */
2445
2446         /*
2447          * 1. Find matching SWQE
2448          * 2. Check that the entire segment has been read.
2449          * 3. Remove HFI1_S_WAIT_TID_RESP from s_flags.
2450          * 4. Free the TID flow resources.
2451          * 5. Kick the send engine (hfi1_schedule_send())
2452          */
2453         struct ib_other_headers *ohdr = packet->ohdr;
2454         struct rvt_qp *qp = packet->qp;
2455         struct hfi1_qp_priv *priv = qp->priv;
2456         struct hfi1_ctxtdata *rcd = packet->rcd;
2457         struct tid_rdma_request *req;
2458         struct tid_rdma_flow *flow;
2459         u32 opcode, aeth;
2460         bool fecn;
2461         unsigned long flags;
2462         u32 kpsn, ipsn;
2463
2464         trace_hfi1_sender_rcv_tid_read_resp(qp);
2465         fecn = process_ecn(qp, packet);
2466         kpsn = mask_psn(be32_to_cpu(ohdr->bth[2]));
2467         aeth = be32_to_cpu(ohdr->u.tid_rdma.r_rsp.aeth);
2468         opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0xff;
2469
2470         spin_lock_irqsave(&qp->s_lock, flags);
2471         ipsn = mask_psn(be32_to_cpu(ohdr->u.tid_rdma.r_rsp.verbs_psn));
2472         req = find_tid_request(qp, ipsn, IB_WR_TID_RDMA_READ);
2473         if (unlikely(!req))
2474                 goto ack_op_err;
2475
2476         flow = &req->flows[req->clear_tail];
2477         /* When header suppression is disabled */
2478         if (cmp_psn(ipsn, flow->flow_state.ib_lpsn)) {
2479                 update_r_next_psn_fecn(packet, priv, rcd, flow, fecn);
2480
2481                 if (cmp_psn(kpsn, flow->flow_state.r_next_psn))
2482                         goto ack_done;
2483                 flow->flow_state.r_next_psn = mask_psn(kpsn + 1);
2484                 /*
2485                  * Copy the payload to destination buffer if this packet is
2486                  * delivered as an eager packet due to RSM rule and FECN.
2487                  * The RSM rule selects FECN bit in BTH and SH bit in
2488                  * KDETH header and therefore will not match the last
2489                  * packet of each segment that has SH bit cleared.
2490                  */
2491                 if (fecn && packet->etype == RHF_RCV_TYPE_EAGER) {
2492                         struct rvt_sge_state ss;
2493                         u32 len;
2494                         u32 tlen = packet->tlen;
2495                         u16 hdrsize = packet->hlen;
2496                         u8 pad = packet->pad;
2497                         u8 extra_bytes = pad + packet->extra_byte +
2498                                 (SIZE_OF_CRC << 2);
2499                         u32 pmtu = qp->pmtu;
2500
2501                         if (unlikely(tlen != (hdrsize + pmtu + extra_bytes)))
2502                                 goto ack_op_err;
2503                         len = restart_sge(&ss, req->e.swqe, ipsn, pmtu);
2504                         if (unlikely(len < pmtu))
2505                                 goto ack_op_err;
2506                         rvt_copy_sge(qp, &ss, packet->payload, pmtu, false,
2507                                      false);
2508                         /* Raise the sw sequence check flag for next packet */
2509                         priv->s_flags |= HFI1_R_TID_SW_PSN;
2510                 }
2511
2512                 goto ack_done;
2513         }
2514         flow->flow_state.r_next_psn = mask_psn(kpsn + 1);
2515         req->ack_pending--;
2516         priv->pending_tid_r_segs--;
2517         qp->s_num_rd_atomic--;
2518         if ((qp->s_flags & RVT_S_WAIT_FENCE) &&
2519             !qp->s_num_rd_atomic) {
2520                 qp->s_flags &= ~(RVT_S_WAIT_FENCE |
2521                                  RVT_S_WAIT_ACK);
2522                 hfi1_schedule_send(qp);
2523         }
2524         if (qp->s_flags & RVT_S_WAIT_RDMAR) {
2525                 qp->s_flags &= ~(RVT_S_WAIT_RDMAR | RVT_S_WAIT_ACK);
2526                 hfi1_schedule_send(qp);
2527         }
2528
2529         trace_hfi1_ack(qp, ipsn);
2530         trace_hfi1_tid_req_rcv_read_resp(qp, 0, req->e.swqe->wr.opcode,
2531                                          req->e.swqe->psn, req->e.swqe->lpsn,
2532                                          req);
2533         trace_hfi1_tid_flow_rcv_read_resp(qp, req->clear_tail, flow);
2534
2535         /* Release the tid resources */
2536         hfi1_kern_exp_rcv_clear(req);
2537
2538         if (!do_rc_ack(qp, aeth, ipsn, opcode, 0, rcd))
2539                 goto ack_done;
2540
2541         /* If not done yet, build next read request */
2542         if (++req->comp_seg >= req->total_segs) {
2543                 priv->tid_r_comp++;
2544                 req->state = TID_REQUEST_COMPLETE;
2545         }
2546
2547         /*
2548          * Clear the hw flow under two conditions:
2549          * 1. This request is a sync point and it is complete;
2550          * 2. Current request is completed and there are no more requests.
2551          */
2552         if ((req->state == TID_REQUEST_SYNC &&
2553              req->comp_seg == req->cur_seg) ||
2554             priv->tid_r_comp == priv->tid_r_reqs) {
2555                 hfi1_kern_clear_hw_flow(priv->rcd, qp);
2556                 priv->s_flags &= ~HFI1_R_TID_SW_PSN;
2557                 if (req->state == TID_REQUEST_SYNC)
2558                         req->state = TID_REQUEST_ACTIVE;
2559         }
2560
2561         hfi1_schedule_send(qp);
2562         goto ack_done;
2563
2564 ack_op_err:
2565         /*
2566          * The test indicates that the send engine has finished its cleanup
2567          * after sending the request and it's now safe to put the QP into error
2568          * state. However, if the wqe queue is empty (qp->s_acked == qp->s_tail
2569          * == qp->s_head), it would be unsafe to complete the wqe pointed by
2570          * qp->s_acked here. Putting the qp into error state will safely flush
2571          * all remaining requests.
2572          */
2573         if (qp->s_last == qp->s_acked)
2574                 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
2575
2576 ack_done:
2577         spin_unlock_irqrestore(&qp->s_lock, flags);
2578 }
2579
2580 void hfi1_kern_read_tid_flow_free(struct rvt_qp *qp)
2581         __must_hold(&qp->s_lock)
2582 {
2583         u32 n = qp->s_acked;
2584         struct rvt_swqe *wqe;
2585         struct tid_rdma_request *req;
2586         struct hfi1_qp_priv *priv = qp->priv;
2587
2588         lockdep_assert_held(&qp->s_lock);
2589         /* Free any TID entries */
2590         while (n != qp->s_tail) {
2591                 wqe = rvt_get_swqe_ptr(qp, n);
2592                 if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) {
2593                         req = wqe_to_tid_req(wqe);
2594                         hfi1_kern_exp_rcv_clear_all(req);
2595                 }
2596
2597                 if (++n == qp->s_size)
2598                         n = 0;
2599         }
2600         /* Free flow */
2601         hfi1_kern_clear_hw_flow(priv->rcd, qp);
2602 }
2603
2604 static bool tid_rdma_tid_err(struct hfi1_ctxtdata *rcd,
2605                              struct hfi1_packet *packet, u8 rcv_type,
2606                              u8 opcode)
2607 {
2608         struct rvt_qp *qp = packet->qp;
2609         struct hfi1_qp_priv *qpriv = qp->priv;
2610         u32 ipsn;
2611         struct ib_other_headers *ohdr = packet->ohdr;
2612         struct rvt_ack_entry *e;
2613         struct tid_rdma_request *req;
2614         struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2615         u32 i;
2616
2617         if (rcv_type >= RHF_RCV_TYPE_IB)
2618                 goto done;
2619
2620         spin_lock(&qp->s_lock);
2621
2622         /*
2623          * We've ran out of space in the eager buffer.
2624          * Eagerly received KDETH packets which require space in the
2625          * Eager buffer (packet that have payload) are TID RDMA WRITE
2626          * response packets. In this case, we have to re-transmit the
2627          * TID RDMA WRITE request.
2628          */
2629         if (rcv_type == RHF_RCV_TYPE_EAGER) {
2630                 hfi1_restart_rc(qp, qp->s_last_psn + 1, 1);
2631                 hfi1_schedule_send(qp);
2632                 goto done_unlock;
2633         }
2634
2635         /*
2636          * For TID READ response, error out QP after freeing the tid
2637          * resources.
2638          */
2639         if (opcode == TID_OP(READ_RESP)) {
2640                 ipsn = mask_psn(be32_to_cpu(ohdr->u.tid_rdma.r_rsp.verbs_psn));
2641                 if (cmp_psn(ipsn, qp->s_last_psn) > 0 &&
2642                     cmp_psn(ipsn, qp->s_psn) < 0) {
2643                         hfi1_kern_read_tid_flow_free(qp);
2644                         spin_unlock(&qp->s_lock);
2645                         rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
2646                         goto done;
2647                 }
2648                 goto done_unlock;
2649         }
2650
2651         /*
2652          * Error out the qp for TID RDMA WRITE
2653          */
2654         hfi1_kern_clear_hw_flow(qpriv->rcd, qp);
2655         for (i = 0; i < rvt_max_atomic(rdi); i++) {
2656                 e = &qp->s_ack_queue[i];
2657                 if (e->opcode == TID_OP(WRITE_REQ)) {
2658                         req = ack_to_tid_req(e);
2659                         hfi1_kern_exp_rcv_clear_all(req);
2660                 }
2661         }
2662         spin_unlock(&qp->s_lock);
2663         rvt_rc_error(qp, IB_WC_LOC_LEN_ERR);
2664         goto done;
2665
2666 done_unlock:
2667         spin_unlock(&qp->s_lock);
2668 done:
2669         return true;
2670 }
2671
2672 static void restart_tid_rdma_read_req(struct hfi1_ctxtdata *rcd,
2673                                       struct rvt_qp *qp, struct rvt_swqe *wqe)
2674 {
2675         struct tid_rdma_request *req;
2676         struct tid_rdma_flow *flow;
2677
2678         /* Start from the right segment */
2679         qp->r_flags |= RVT_R_RDMAR_SEQ;
2680         req = wqe_to_tid_req(wqe);
2681         flow = &req->flows[req->clear_tail];
2682         hfi1_restart_rc(qp, flow->flow_state.ib_spsn, 0);
2683         if (list_empty(&qp->rspwait)) {
2684                 qp->r_flags |= RVT_R_RSP_SEND;
2685                 rvt_get_qp(qp);
2686                 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
2687         }
2688 }
2689
2690 /*
2691  * Handle the KDETH eflags for TID RDMA READ response.
2692  *
2693  * Return true if the last packet for a segment has been received and it is
2694  * time to process the response normally; otherwise, return true.
2695  *
2696  * The caller must hold the packet->qp->r_lock and the rcu_read_lock.
2697  */
2698 static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd,
2699                                      struct hfi1_packet *packet, u8 rcv_type,
2700                                      u8 rte, u32 psn, u32 ibpsn)
2701         __must_hold(&packet->qp->r_lock) __must_hold(RCU)
2702 {
2703         struct hfi1_pportdata *ppd = rcd->ppd;
2704         struct hfi1_devdata *dd = ppd->dd;
2705         struct hfi1_ibport *ibp;
2706         struct rvt_swqe *wqe;
2707         struct tid_rdma_request *req;
2708         struct tid_rdma_flow *flow;
2709         u32 ack_psn;
2710         struct rvt_qp *qp = packet->qp;
2711         struct hfi1_qp_priv *priv = qp->priv;
2712         bool ret = true;
2713         int diff = 0;
2714         u32 fpsn;
2715
2716         lockdep_assert_held(&qp->r_lock);
2717         /* If the psn is out of valid range, drop the packet */
2718         if (cmp_psn(ibpsn, qp->s_last_psn) < 0 ||
2719             cmp_psn(ibpsn, qp->s_psn) > 0)
2720                 return ret;
2721
2722         spin_lock(&qp->s_lock);
2723         /*
2724          * Note that NAKs implicitly ACK outstanding SEND and RDMA write
2725          * requests and implicitly NAK RDMA read and atomic requests issued
2726          * before the NAK'ed request.
2727          */
2728         ack_psn = ibpsn - 1;
2729         wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
2730         ibp = to_iport(qp->ibqp.device, qp->port_num);
2731
2732         /* Complete WQEs that the PSN finishes. */
2733         while ((int)delta_psn(ack_psn, wqe->lpsn) >= 0) {
2734                 /*
2735                  * If this request is a RDMA read or atomic, and the NACK is
2736                  * for a later operation, this NACK NAKs the RDMA read or
2737                  * atomic.
2738                  */
2739                 if (wqe->wr.opcode == IB_WR_RDMA_READ ||
2740                     wqe->wr.opcode == IB_WR_TID_RDMA_READ ||
2741                     wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
2742                     wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
2743                         /* Retry this request. */
2744                         if (!(qp->r_flags & RVT_R_RDMAR_SEQ)) {
2745                                 qp->r_flags |= RVT_R_RDMAR_SEQ;
2746                                 if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) {
2747                                         restart_tid_rdma_read_req(rcd, qp,
2748                                                                   wqe);
2749                                 } else {
2750                                         hfi1_restart_rc(qp, qp->s_last_psn + 1,
2751                                                         0);
2752                                         if (list_empty(&qp->rspwait)) {
2753                                                 qp->r_flags |= RVT_R_RSP_SEND;
2754                                                 rvt_get_qp(qp);
2755                                                 list_add_tail(/* wait */
2756                                                    &qp->rspwait,
2757                                                    &rcd->qp_wait_list);
2758                                         }
2759                                 }
2760                         }
2761                         /*
2762                          * No need to process the NAK since we are
2763                          * restarting an earlier request.
2764                          */
2765                         break;
2766                 }
2767
2768                 wqe = do_rc_completion(qp, wqe, ibp);
2769                 if (qp->s_acked == qp->s_tail)
2770                         break;
2771         }
2772
2773         /* Handle the eflags for the request */
2774         if (wqe->wr.opcode != IB_WR_TID_RDMA_READ)
2775                 goto s_unlock;
2776
2777         req = wqe_to_tid_req(wqe);
2778         switch (rcv_type) {
2779         case RHF_RCV_TYPE_EXPECTED:
2780                 switch (rte) {
2781                 case RHF_RTE_EXPECTED_FLOW_SEQ_ERR:
2782                         /*
2783                          * On the first occurrence of a Flow Sequence error,
2784                          * the flag TID_FLOW_SW_PSN is set.
2785                          *
2786                          * After that, the flow is *not* reprogrammed and the
2787                          * protocol falls back to SW PSN checking. This is done
2788                          * to prevent continuous Flow Sequence errors for any
2789                          * packets that could be still in the fabric.
2790                          */
2791                         flow = find_flow(req, psn, NULL);
2792                         if (!flow) {
2793                                 /*
2794                                  * We can't find the IB PSN matching the
2795                                  * received KDETH PSN. The only thing we can
2796                                  * do at this point is report the error to
2797                                  * the QP.
2798                                  */
2799                                 hfi1_kern_read_tid_flow_free(qp);
2800                                 spin_unlock(&qp->s_lock);
2801                                 rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
2802                                 return ret;
2803                         }
2804                         if (priv->s_flags & HFI1_R_TID_SW_PSN) {
2805                                 diff = cmp_psn(psn,
2806                                                flow->flow_state.r_next_psn);
2807                                 if (diff > 0) {
2808                                         if (!(qp->r_flags & RVT_R_RDMAR_SEQ))
2809                                                 restart_tid_rdma_read_req(rcd,
2810                                                                           qp,
2811                                                                           wqe);
2812
2813                                         /* Drop the packet.*/
2814                                         goto s_unlock;
2815                                 } else if (diff < 0) {
2816                                         /*
2817                                          * If a response packet for a restarted
2818                                          * request has come back, reset the
2819                                          * restart flag.
2820                                          */
2821                                         if (qp->r_flags & RVT_R_RDMAR_SEQ)
2822                                                 qp->r_flags &=
2823                                                         ~RVT_R_RDMAR_SEQ;
2824
2825                                         /* Drop the packet.*/
2826                                         goto s_unlock;
2827                                 }
2828
2829                                 /*
2830                                  * If SW PSN verification is successful and
2831                                  * this is the last packet in the segment, tell
2832                                  * the caller to process it as a normal packet.
2833                                  */
2834                                 fpsn = full_flow_psn(flow,
2835                                                      flow->flow_state.lpsn);
2836                                 if (cmp_psn(fpsn, psn) == 0) {
2837                                         ret = false;
2838                                         if (qp->r_flags & RVT_R_RDMAR_SEQ)
2839                                                 qp->r_flags &=
2840                                                         ~RVT_R_RDMAR_SEQ;
2841                                 }
2842                                 flow->flow_state.r_next_psn =
2843                                         mask_psn(psn + 1);
2844                         } else {
2845                                 u32 last_psn;
2846
2847                                 last_psn = read_r_next_psn(dd, rcd->ctxt,
2848                                                            flow->idx);
2849                                 flow->flow_state.r_next_psn = last_psn;
2850                                 priv->s_flags |= HFI1_R_TID_SW_PSN;
2851                                 /*
2852                                  * If no request has been restarted yet,
2853                                  * restart the current one.
2854                                  */
2855                                 if (!(qp->r_flags & RVT_R_RDMAR_SEQ))
2856                                         restart_tid_rdma_read_req(rcd, qp,
2857                                                                   wqe);
2858                         }
2859
2860                         break;
2861
2862                 case RHF_RTE_EXPECTED_FLOW_GEN_ERR:
2863                         /*
2864                          * Since the TID flow is able to ride through
2865                          * generation mismatch, drop this stale packet.
2866                          */
2867                         break;
2868
2869                 default:
2870                         break;
2871                 }
2872                 break;
2873
2874         case RHF_RCV_TYPE_ERROR:
2875                 switch (rte) {
2876                 case RHF_RTE_ERROR_OP_CODE_ERR:
2877                 case RHF_RTE_ERROR_KHDR_MIN_LEN_ERR:
2878                 case RHF_RTE_ERROR_KHDR_HCRC_ERR:
2879                 case RHF_RTE_ERROR_KHDR_KVER_ERR:
2880                 case RHF_RTE_ERROR_CONTEXT_ERR:
2881                 case RHF_RTE_ERROR_KHDR_TID_ERR:
2882                 default:
2883                         break;
2884                 }
2885         default:
2886                 break;
2887         }
2888 s_unlock:
2889         spin_unlock(&qp->s_lock);
2890         return ret;
2891 }
2892
2893 bool hfi1_handle_kdeth_eflags(struct hfi1_ctxtdata *rcd,
2894                               struct hfi1_pportdata *ppd,
2895                               struct hfi1_packet *packet)
2896 {
2897         struct hfi1_ibport *ibp = &ppd->ibport_data;
2898         struct hfi1_devdata *dd = ppd->dd;
2899         struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
2900         u8 rcv_type = rhf_rcv_type(packet->rhf);
2901         u8 rte = rhf_rcv_type_err(packet->rhf);
2902         struct ib_header *hdr = packet->hdr;
2903         struct ib_other_headers *ohdr = NULL;
2904         int lnh = be16_to_cpu(hdr->lrh[0]) & 3;
2905         u16 lid  = be16_to_cpu(hdr->lrh[1]);
2906         u8 opcode;
2907         u32 qp_num, psn, ibpsn;
2908         struct rvt_qp *qp;
2909         struct hfi1_qp_priv *qpriv;
2910         unsigned long flags;
2911         bool ret = true;
2912         struct rvt_ack_entry *e;
2913         struct tid_rdma_request *req;
2914         struct tid_rdma_flow *flow;
2915         int diff = 0;
2916
2917         trace_hfi1_msg_handle_kdeth_eflags(NULL, "Kdeth error: rhf ",
2918                                            packet->rhf);
2919         if (packet->rhf & RHF_ICRC_ERR)
2920                 return ret;
2921
2922         packet->ohdr = &hdr->u.oth;
2923         ohdr = packet->ohdr;
2924         trace_input_ibhdr(rcd->dd, packet, !!(rhf_dc_info(packet->rhf)));
2925
2926         /* Get the destination QP number. */
2927         qp_num = be32_to_cpu(ohdr->u.tid_rdma.r_rsp.verbs_qp) &
2928                 RVT_QPN_MASK;
2929         if (lid >= be16_to_cpu(IB_MULTICAST_LID_BASE))
2930                 goto drop;
2931
2932         psn = mask_psn(be32_to_cpu(ohdr->bth[2]));
2933         opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0xff;
2934
2935         rcu_read_lock();
2936         qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num);
2937         if (!qp)
2938                 goto rcu_unlock;
2939
2940         packet->qp = qp;
2941
2942         /* Check for valid receive state. */
2943         spin_lock_irqsave(&qp->r_lock, flags);
2944         if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
2945                 ibp->rvp.n_pkt_drops++;
2946                 goto r_unlock;
2947         }
2948
2949         if (packet->rhf & RHF_TID_ERR) {
2950                 /* For TIDERR and RC QPs preemptively schedule a NAK */
2951                 u32 tlen = rhf_pkt_len(packet->rhf); /* in bytes */
2952
2953                 /* Sanity check packet */
2954                 if (tlen < 24)
2955                         goto r_unlock;
2956
2957                 /*
2958                  * Check for GRH. We should never get packets with GRH in this
2959                  * path.
2960                  */
2961                 if (lnh == HFI1_LRH_GRH)
2962                         goto r_unlock;
2963
2964                 if (tid_rdma_tid_err(rcd, packet, rcv_type, opcode))
2965                         goto r_unlock;
2966         }
2967
2968         /* handle TID RDMA READ */
2969         if (opcode == TID_OP(READ_RESP)) {
2970                 ibpsn = be32_to_cpu(ohdr->u.tid_rdma.r_rsp.verbs_psn);
2971                 ibpsn = mask_psn(ibpsn);
2972                 ret = handle_read_kdeth_eflags(rcd, packet, rcv_type, rte, psn,
2973                                                ibpsn);
2974                 goto r_unlock;
2975         }
2976
2977         /*
2978          * qp->s_tail_ack_queue points to the rvt_ack_entry currently being
2979          * processed. These a completed sequentially so we can be sure that
2980          * the pointer will not change until the entire request has completed.
2981          */
2982         spin_lock(&qp->s_lock);
2983         qpriv = qp->priv;
2984         e = &qp->s_ack_queue[qpriv->r_tid_tail];
2985         req = ack_to_tid_req(e);
2986         flow = &req->flows[req->clear_tail];
2987         trace_hfi1_eflags_err_write(qp, rcv_type, rte, psn);
2988         trace_hfi1_rsp_handle_kdeth_eflags(qp, psn);
2989         trace_hfi1_tid_write_rsp_handle_kdeth_eflags(qp);
2990         trace_hfi1_tid_req_handle_kdeth_eflags(qp, 0, e->opcode, e->psn,
2991                                                e->lpsn, req);
2992         trace_hfi1_tid_flow_handle_kdeth_eflags(qp, req->clear_tail, flow);
2993
2994         switch (rcv_type) {
2995         case RHF_RCV_TYPE_EXPECTED:
2996                 switch (rte) {
2997                 case RHF_RTE_EXPECTED_FLOW_SEQ_ERR:
2998                         if (!(qpriv->s_flags & HFI1_R_TID_SW_PSN)) {
2999                                 qpriv->s_flags |= HFI1_R_TID_SW_PSN;
3000                                 flow->flow_state.r_next_psn =
3001                                         read_r_next_psn(dd, rcd->ctxt,
3002                                                         flow->idx);
3003                                 qpriv->r_next_psn_kdeth =
3004                                         flow->flow_state.r_next_psn;
3005                                 goto nak_psn;
3006                         } else {
3007                                 /*
3008                                  * If the received PSN does not match the next
3009                                  * expected PSN, NAK the packet.
3010                                  * However, only do that if we know that the a
3011                                  * NAK has already been sent. Otherwise, this
3012                                  * mismatch could be due to packets that were
3013                                  * already in flight.
3014                                  */
3015                                 diff = cmp_psn(psn,
3016                                                flow->flow_state.r_next_psn);
3017                                 if (diff > 0)
3018                                         goto nak_psn;
3019                                 else if (diff < 0)
3020                                         break;
3021
3022                                 qpriv->s_nak_state = 0;
3023                                 /*
3024                                  * If SW PSN verification is successful and this
3025                                  * is the last packet in the segment, tell the
3026                                  * caller to process it as a normal packet.
3027                                  */
3028                                 if (psn == full_flow_psn(flow,
3029                                                          flow->flow_state.lpsn))
3030                                         ret = false;
3031                                 flow->flow_state.r_next_psn =
3032                                         mask_psn(psn + 1);
3033                                 qpriv->r_next_psn_kdeth =
3034                                         flow->flow_state.r_next_psn;
3035                         }
3036                         break;
3037
3038                 case RHF_RTE_EXPECTED_FLOW_GEN_ERR:
3039                         goto nak_psn;
3040
3041                 default:
3042                         break;
3043                 }
3044                 break;
3045
3046         case RHF_RCV_TYPE_ERROR:
3047                 switch (rte) {
3048                 case RHF_RTE_ERROR_OP_CODE_ERR:
3049                 case RHF_RTE_ERROR_KHDR_MIN_LEN_ERR:
3050                 case RHF_RTE_ERROR_KHDR_HCRC_ERR:
3051                 case RHF_RTE_ERROR_KHDR_KVER_ERR:
3052                 case RHF_RTE_ERROR_CONTEXT_ERR:
3053                 case RHF_RTE_ERROR_KHDR_TID_ERR:
3054                 default:
3055                         break;
3056                 }
3057         default:
3058                 break;
3059         }
3060
3061 unlock:
3062         spin_unlock(&qp->s_lock);
3063 r_unlock:
3064         spin_unlock_irqrestore(&qp->r_lock, flags);
3065 rcu_unlock:
3066         rcu_read_unlock();
3067 drop:
3068         return ret;
3069 nak_psn:
3070         ibp->rvp.n_rc_seqnak++;
3071         if (!qpriv->s_nak_state) {
3072                 qpriv->s_nak_state = IB_NAK_PSN_ERROR;
3073                 /* We are NAK'ing the next expected PSN */
3074                 qpriv->s_nak_psn = mask_psn(flow->flow_state.r_next_psn);
3075                 qpriv->s_flags |= RVT_S_ACK_PENDING;
3076                 if (qpriv->r_tid_ack == HFI1_QP_WQE_INVALID)
3077                         qpriv->r_tid_ack = qpriv->r_tid_tail;
3078                 hfi1_schedule_tid_send(qp);
3079         }
3080         goto unlock;
3081 }
3082
3083 /*
3084  * "Rewind" the TID request information.
3085  * This means that we reset the state back to ACTIVE,
3086  * find the proper flow, set the flow index to that flow,
3087  * and reset the flow information.
3088  */
3089 void hfi1_tid_rdma_restart_req(struct rvt_qp *qp, struct rvt_swqe *wqe,
3090                                u32 *bth2)
3091 {
3092         struct tid_rdma_request *req = wqe_to_tid_req(wqe);
3093         struct tid_rdma_flow *flow;
3094         struct hfi1_qp_priv *qpriv = qp->priv;
3095         int diff, delta_pkts;
3096         u32 tididx = 0, i;
3097         u16 fidx;
3098
3099         if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) {
3100                 *bth2 = mask_psn(qp->s_psn);
3101                 flow = find_flow_ib(req, *bth2, &fidx);
3102                 if (!flow) {
3103                         trace_hfi1_msg_tid_restart_req(/* msg */
3104                            qp, "!!!!!! Could not find flow to restart: bth2 ",
3105                            (u64)*bth2);
3106                         trace_hfi1_tid_req_restart_req(qp, 0, wqe->wr.opcode,
3107                                                        wqe->psn, wqe->lpsn,
3108                                                        req);
3109                         return;
3110                 }
3111         } else {
3112                 fidx = req->acked_tail;
3113                 flow = &req->flows[fidx];
3114                 *bth2 = mask_psn(req->r_ack_psn);
3115         }
3116
3117         if (wqe->wr.opcode == IB_WR_TID_RDMA_READ)
3118                 delta_pkts = delta_psn(*bth2, flow->flow_state.ib_spsn);
3119         else
3120                 delta_pkts = delta_psn(*bth2,
3121                                        full_flow_psn(flow,
3122                                                      flow->flow_state.spsn));
3123
3124         trace_hfi1_tid_flow_restart_req(qp, fidx, flow);
3125         diff = delta_pkts + flow->resync_npkts;
3126
3127         flow->sent = 0;
3128         flow->pkt = 0;
3129         flow->tid_idx = 0;
3130         flow->tid_offset = 0;
3131         if (diff) {
3132                 for (tididx = 0; tididx < flow->tidcnt; tididx++) {
3133                         u32 tidentry = flow->tid_entry[tididx], tidlen,
3134                                 tidnpkts, npkts;
3135
3136                         flow->tid_offset = 0;
3137                         tidlen = EXP_TID_GET(tidentry, LEN) * PAGE_SIZE;
3138                         tidnpkts = rvt_div_round_up_mtu(qp, tidlen);
3139                         npkts = min_t(u32, diff, tidnpkts);
3140                         flow->pkt += npkts;
3141                         flow->sent += (npkts == tidnpkts ? tidlen :
3142                                        npkts * qp->pmtu);
3143                         flow->tid_offset += npkts * qp->pmtu;
3144                         diff -= npkts;
3145                         if (!diff)
3146                                 break;
3147                 }
3148         }
3149         if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE) {
3150                 rvt_skip_sge(&qpriv->tid_ss, (req->cur_seg * req->seg_len) +
3151                              flow->sent, 0);
3152                 /*
3153                  * Packet PSN is based on flow_state.spsn + flow->pkt. However,
3154                  * during a RESYNC, the generation is incremented and the
3155                  * sequence is reset to 0. Since we've adjusted the npkts in the
3156                  * flow and the SGE has been sufficiently advanced, we have to
3157                  * adjust flow->pkt in order to calculate the correct PSN.
3158                  */
3159                 flow->pkt -= flow->resync_npkts;
3160         }
3161
3162         if (flow->tid_offset ==
3163             EXP_TID_GET(flow->tid_entry[tididx], LEN) * PAGE_SIZE) {
3164                 tididx++;
3165                 flow->tid_offset = 0;
3166         }
3167         flow->tid_idx = tididx;
3168         if (wqe->wr.opcode == IB_WR_TID_RDMA_READ)
3169                 /* Move flow_idx to correct index */
3170                 req->flow_idx = fidx;
3171         else
3172                 req->clear_tail = fidx;
3173
3174         trace_hfi1_tid_flow_restart_req(qp, fidx, flow);
3175         trace_hfi1_tid_req_restart_req(qp, 0, wqe->wr.opcode, wqe->psn,
3176                                        wqe->lpsn, req);
3177         req->state = TID_REQUEST_ACTIVE;
3178         if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE) {
3179                 /* Reset all the flows that we are going to resend */
3180                 fidx = CIRC_NEXT(fidx, MAX_FLOWS);
3181                 i = qpriv->s_tid_tail;
3182                 do {
3183                         for (; CIRC_CNT(req->setup_head, fidx, MAX_FLOWS);
3184                               fidx = CIRC_NEXT(fidx, MAX_FLOWS)) {
3185                                 req->flows[fidx].sent = 0;
3186                                 req->flows[fidx].pkt = 0;
3187                                 req->flows[fidx].tid_idx = 0;
3188                                 req->flows[fidx].tid_offset = 0;
3189                                 req->flows[fidx].resync_npkts = 0;
3190                         }
3191                         if (i == qpriv->s_tid_cur)
3192                                 break;
3193                         do {
3194                                 i = (++i == qp->s_size ? 0 : i);
3195                                 wqe = rvt_get_swqe_ptr(qp, i);
3196                         } while (wqe->wr.opcode != IB_WR_TID_RDMA_WRITE);
3197                         req = wqe_to_tid_req(wqe);
3198                         req->cur_seg = req->ack_seg;
3199                         fidx = req->acked_tail;
3200                         /* Pull req->clear_tail back */
3201                         req->clear_tail = fidx;
3202                 } while (1);
3203         }
3204 }
3205
3206 void hfi1_qp_kern_exp_rcv_clear_all(struct rvt_qp *qp)
3207 {
3208         int i, ret;
3209         struct hfi1_qp_priv *qpriv = qp->priv;
3210         struct tid_flow_state *fs;
3211
3212         if (qp->ibqp.qp_type != IB_QPT_RC || !HFI1_CAP_IS_KSET(TID_RDMA))
3213                 return;
3214
3215         /*
3216          * First, clear the flow to help prevent any delayed packets from
3217          * being delivered.
3218          */
3219         fs = &qpriv->flow_state;
3220         if (fs->index != RXE_NUM_TID_FLOWS)
3221                 hfi1_kern_clear_hw_flow(qpriv->rcd, qp);
3222
3223         for (i = qp->s_acked; i != qp->s_head;) {
3224                 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, i);
3225
3226                 if (++i == qp->s_size)
3227                         i = 0;
3228                 /* Free only locally allocated TID entries */
3229                 if (wqe->wr.opcode != IB_WR_TID_RDMA_READ)
3230                         continue;
3231                 do {
3232                         struct hfi1_swqe_priv *priv = wqe->priv;
3233
3234                         ret = hfi1_kern_exp_rcv_clear(&priv->tid_req);
3235                 } while (!ret);
3236         }
3237         for (i = qp->s_acked_ack_queue; i != qp->r_head_ack_queue;) {
3238                 struct rvt_ack_entry *e = &qp->s_ack_queue[i];
3239
3240                 if (++i == rvt_max_atomic(ib_to_rvt(qp->ibqp.device)))
3241                         i = 0;
3242                 /* Free only locally allocated TID entries */
3243                 if (e->opcode != TID_OP(WRITE_REQ))
3244                         continue;
3245                 do {
3246                         struct hfi1_ack_priv *priv = e->priv;
3247
3248                         ret = hfi1_kern_exp_rcv_clear(&priv->tid_req);
3249                 } while (!ret);
3250         }
3251 }
3252
3253 bool hfi1_tid_rdma_wqe_interlock(struct rvt_qp *qp, struct rvt_swqe *wqe)
3254 {
3255         struct rvt_swqe *prev;
3256         struct hfi1_qp_priv *priv = qp->priv;
3257         u32 s_prev;
3258         struct tid_rdma_request *req;
3259
3260         s_prev = (qp->s_cur == 0 ? qp->s_size : qp->s_cur) - 1;
3261         prev = rvt_get_swqe_ptr(qp, s_prev);
3262
3263         switch (wqe->wr.opcode) {
3264         case IB_WR_SEND:
3265         case IB_WR_SEND_WITH_IMM:
3266         case IB_WR_SEND_WITH_INV:
3267         case IB_WR_ATOMIC_CMP_AND_SWP:
3268         case IB_WR_ATOMIC_FETCH_AND_ADD:
3269         case IB_WR_RDMA_WRITE:
3270                 switch (prev->wr.opcode) {
3271                 case IB_WR_TID_RDMA_WRITE:
3272                         req = wqe_to_tid_req(prev);
3273                         if (req->ack_seg != req->total_segs)
3274                                 goto interlock;
3275                 default:
3276                         break;
3277                 }
3278                 break;
3279         case IB_WR_RDMA_READ:
3280                 if (prev->wr.opcode != IB_WR_TID_RDMA_WRITE)
3281                         break;
3282                 /* fall through */
3283         case IB_WR_TID_RDMA_READ:
3284                 switch (prev->wr.opcode) {
3285                 case IB_WR_RDMA_READ:
3286                         if (qp->s_acked != qp->s_cur)
3287                                 goto interlock;
3288                         break;
3289                 case IB_WR_TID_RDMA_WRITE:
3290                         req = wqe_to_tid_req(prev);
3291                         if (req->ack_seg != req->total_segs)
3292                                 goto interlock;
3293                 default:
3294                         break;
3295                 }
3296         default:
3297                 break;
3298         }
3299         return false;
3300
3301 interlock:
3302         priv->s_flags |= HFI1_S_TID_WAIT_INTERLCK;
3303         return true;
3304 }
3305
3306 /* Does @sge meet the alignment requirements for tid rdma? */
3307 static inline bool hfi1_check_sge_align(struct rvt_qp *qp,
3308                                         struct rvt_sge *sge, int num_sge)
3309 {
3310         int i;
3311
3312         for (i = 0; i < num_sge; i++, sge++) {
3313                 trace_hfi1_sge_check_align(qp, i, sge);
3314                 if ((u64)sge->vaddr & ~PAGE_MASK ||
3315                     sge->sge_length & ~PAGE_MASK)
3316                         return false;
3317         }
3318         return true;
3319 }
3320
3321 void setup_tid_rdma_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe)
3322 {
3323         struct hfi1_qp_priv *qpriv = (struct hfi1_qp_priv *)qp->priv;
3324         struct hfi1_swqe_priv *priv = wqe->priv;
3325         struct tid_rdma_params *remote;
3326         enum ib_wr_opcode new_opcode;
3327         bool do_tid_rdma = false;
3328         struct hfi1_pportdata *ppd = qpriv->rcd->ppd;
3329
3330         if ((rdma_ah_get_dlid(&qp->remote_ah_attr) & ~((1 << ppd->lmc) - 1)) ==
3331                                 ppd->lid)
3332                 return;
3333         if (qpriv->hdr_type != HFI1_PKT_TYPE_9B)
3334                 return;
3335
3336         rcu_read_lock();
3337         remote = rcu_dereference(qpriv->tid_rdma.remote);
3338         /*
3339          * If TID RDMA is disabled by the negotiation, don't
3340          * use it.
3341          */
3342         if (!remote)
3343                 goto exit;
3344
3345         if (wqe->wr.opcode == IB_WR_RDMA_READ) {
3346                 if (hfi1_check_sge_align(qp, &wqe->sg_list[0],
3347                                          wqe->wr.num_sge)) {
3348                         new_opcode = IB_WR_TID_RDMA_READ;
3349                         do_tid_rdma = true;
3350                 }
3351         } else if (wqe->wr.opcode == IB_WR_RDMA_WRITE) {
3352                 /*
3353                  * TID RDMA is enabled for this RDMA WRITE request iff:
3354                  *   1. The remote address is page-aligned,
3355                  *   2. The length is larger than the minimum segment size,
3356                  *   3. The length is page-multiple.
3357                  */
3358                 if (!(wqe->rdma_wr.remote_addr & ~PAGE_MASK) &&
3359                     !(wqe->length & ~PAGE_MASK)) {
3360                         new_opcode = IB_WR_TID_RDMA_WRITE;
3361                         do_tid_rdma = true;
3362                 }
3363         }
3364
3365         if (do_tid_rdma) {
3366                 if (hfi1_kern_exp_rcv_alloc_flows(&priv->tid_req, GFP_ATOMIC))
3367                         goto exit;
3368                 wqe->wr.opcode = new_opcode;
3369                 priv->tid_req.seg_len =
3370                         min_t(u32, remote->max_len, wqe->length);
3371                 priv->tid_req.total_segs =
3372                         DIV_ROUND_UP(wqe->length, priv->tid_req.seg_len);
3373                 /* Compute the last PSN of the request */
3374                 wqe->lpsn = wqe->psn;
3375                 if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) {
3376                         priv->tid_req.n_flows = remote->max_read;
3377                         qpriv->tid_r_reqs++;
3378                         wqe->lpsn += rvt_div_round_up_mtu(qp, wqe->length) - 1;
3379                 } else {
3380                         wqe->lpsn += priv->tid_req.total_segs - 1;
3381                         atomic_inc(&qpriv->n_requests);
3382                 }
3383
3384                 priv->tid_req.cur_seg = 0;
3385                 priv->tid_req.comp_seg = 0;
3386                 priv->tid_req.ack_seg = 0;
3387                 priv->tid_req.state = TID_REQUEST_INACTIVE;
3388                 /*
3389                  * Reset acked_tail.
3390                  * TID RDMA READ does not have ACKs so it does not
3391                  * update the pointer. We have to reset it so TID RDMA
3392                  * WRITE does not get confused.
3393                  */
3394                 priv->tid_req.acked_tail = priv->tid_req.setup_head;
3395                 trace_hfi1_tid_req_setup_tid_wqe(qp, 1, wqe->wr.opcode,
3396                                                  wqe->psn, wqe->lpsn,
3397                                                  &priv->tid_req);
3398         }
3399 exit:
3400         rcu_read_unlock();
3401 }
3402
3403 /* TID RDMA WRITE functions */
3404
3405 u32 hfi1_build_tid_rdma_write_req(struct rvt_qp *qp, struct rvt_swqe *wqe,
3406                                   struct ib_other_headers *ohdr,
3407                                   u32 *bth1, u32 *bth2, u32 *len)
3408 {
3409         struct hfi1_qp_priv *qpriv = qp->priv;
3410         struct tid_rdma_request *req = wqe_to_tid_req(wqe);
3411         struct tid_rdma_params *remote;
3412
3413         rcu_read_lock();
3414         remote = rcu_dereference(qpriv->tid_rdma.remote);
3415         /*
3416          * Set the number of flow to be used based on negotiated
3417          * parameters.
3418          */
3419         req->n_flows = remote->max_write;
3420         req->state = TID_REQUEST_ACTIVE;
3421
3422         KDETH_RESET(ohdr->u.tid_rdma.w_req.kdeth0, KVER, 0x1);
3423         KDETH_RESET(ohdr->u.tid_rdma.w_req.kdeth1, JKEY, remote->jkey);
3424         ohdr->u.tid_rdma.w_req.reth.vaddr =
3425                 cpu_to_be64(wqe->rdma_wr.remote_addr + (wqe->length - *len));
3426         ohdr->u.tid_rdma.w_req.reth.rkey =
3427                 cpu_to_be32(wqe->rdma_wr.rkey);
3428         ohdr->u.tid_rdma.w_req.reth.length = cpu_to_be32(*len);
3429         ohdr->u.tid_rdma.w_req.verbs_qp = cpu_to_be32(qp->remote_qpn);
3430         *bth1 &= ~RVT_QPN_MASK;
3431         *bth1 |= remote->qp;
3432         qp->s_state = TID_OP(WRITE_REQ);
3433         qp->s_flags |= HFI1_S_WAIT_TID_RESP;
3434         *bth2 |= IB_BTH_REQ_ACK;
3435         *len = 0;
3436
3437         rcu_read_unlock();
3438         return sizeof(ohdr->u.tid_rdma.w_req) / sizeof(u32);
3439 }
3440
3441 void hfi1_compute_tid_rdma_flow_wt(void)
3442 {
3443         /*
3444          * Heuristic for computing the RNR timeout when waiting on the flow
3445          * queue. Rather than a computationaly expensive exact estimate of when
3446          * a flow will be available, we assume that if a QP is at position N in
3447          * the flow queue it has to wait approximately (N + 1) * (number of
3448          * segments between two sync points), assuming PMTU of 4K. The rationale
3449          * for this is that flows are released and recycled at each sync point.
3450          */
3451         tid_rdma_flow_wt = MAX_TID_FLOW_PSN * enum_to_mtu(OPA_MTU_4096) /
3452                 TID_RDMA_MAX_SEGMENT_SIZE;
3453 }
3454
3455 static u32 position_in_queue(struct hfi1_qp_priv *qpriv,
3456                              struct tid_queue *queue)
3457 {
3458         return qpriv->tid_enqueue - queue->dequeue;
3459 }
3460
3461 /*
3462  * @qp: points to rvt_qp context.
3463  * @to_seg: desired RNR timeout in segments.
3464  * Return: index of the next highest timeout in the ib_hfi1_rnr_table[]
3465  */
3466 static u32 hfi1_compute_tid_rnr_timeout(struct rvt_qp *qp, u32 to_seg)
3467 {
3468         struct hfi1_qp_priv *qpriv = qp->priv;
3469         u64 timeout;
3470         u32 bytes_per_us;
3471         u8 i;
3472
3473         bytes_per_us = active_egress_rate(qpriv->rcd->ppd) / 8;
3474         timeout = (to_seg * TID_RDMA_MAX_SEGMENT_SIZE) / bytes_per_us;
3475         /*
3476          * Find the next highest value in the RNR table to the required
3477          * timeout. This gives the responder some padding.
3478          */
3479         for (i = 1; i <= IB_AETH_CREDIT_MASK; i++)
3480                 if (rvt_rnr_tbl_to_usec(i) >= timeout)
3481                         return i;
3482         return 0;
3483 }
3484
3485 /**
3486  * Central place for resource allocation at TID write responder,
3487  * is called from write_req and write_data interrupt handlers as
3488  * well as the send thread when a queued QP is scheduled for
3489  * resource allocation.
3490  *
3491  * Iterates over (a) segments of a request and then (b) queued requests
3492  * themselves to allocate resources for up to local->max_write
3493  * segments across multiple requests. Stop allocating when we
3494  * hit a sync point, resume allocating after data packets at
3495  * sync point have been received.
3496  *
3497  * Resource allocation and sending of responses is decoupled. The
3498  * request/segment which are being allocated and sent are as follows.
3499  * Resources are allocated for:
3500  *     [request: qpriv->r_tid_alloc, segment: req->alloc_seg]
3501  * The send thread sends:
3502  *     [request: qp->s_tail_ack_queue, segment:req->cur_seg]
3503  */
3504 static void hfi1_tid_write_alloc_resources(struct rvt_qp *qp, bool intr_ctx)
3505 {
3506         struct tid_rdma_request *req;
3507         struct hfi1_qp_priv *qpriv = qp->priv;
3508         struct hfi1_ctxtdata *rcd = qpriv->rcd;
3509         struct tid_rdma_params *local = &qpriv->tid_rdma.local;
3510         struct rvt_ack_entry *e;
3511         u32 npkts, to_seg;
3512         bool last;
3513         int ret = 0;
3514
3515         lockdep_assert_held(&qp->s_lock);
3516
3517         while (1) {
3518                 trace_hfi1_rsp_tid_write_alloc_res(qp, 0);
3519                 trace_hfi1_tid_write_rsp_alloc_res(qp);
3520                 /*
3521                  * Don't allocate more segments if a RNR NAK has already been
3522                  * scheduled to avoid messing up qp->r_psn: the RNR NAK will
3523                  * be sent only when all allocated segments have been sent.
3524                  * However, if more segments are allocated before that, TID RDMA
3525                  * WRITE RESP packets will be sent out for these new segments
3526                  * before the RNR NAK packet. When the requester receives the
3527                  * RNR NAK packet, it will restart with qp->s_last_psn + 1,
3528                  * which does not match qp->r_psn and will be dropped.
3529                  * Consequently, the requester will exhaust its retries and
3530                  * put the qp into error state.
3531                  */
3532                 if (qpriv->rnr_nak_state == TID_RNR_NAK_SEND)
3533                         break;
3534
3535                 /* No requests left to process */
3536                 if (qpriv->r_tid_alloc == qpriv->r_tid_head) {
3537                         /* If all data has been received, clear the flow */
3538                         if (qpriv->flow_state.index < RXE_NUM_TID_FLOWS &&
3539                             !qpriv->alloc_w_segs) {
3540                                 hfi1_kern_clear_hw_flow(rcd, qp);
3541                                 qpriv->s_flags &= ~HFI1_R_TID_SW_PSN;
3542                         }
3543                         break;
3544                 }
3545
3546                 e = &qp->s_ack_queue[qpriv->r_tid_alloc];
3547                 if (e->opcode != TID_OP(WRITE_REQ))
3548                         goto next_req;
3549                 req = ack_to_tid_req(e);
3550                 trace_hfi1_tid_req_write_alloc_res(qp, 0, e->opcode, e->psn,
3551                                                    e->lpsn, req);
3552                 /* Finished allocating for all segments of this request */
3553                 if (req->alloc_seg >= req->total_segs)
3554                         goto next_req;
3555
3556                 /* Can allocate only a maximum of local->max_write for a QP */
3557                 if (qpriv->alloc_w_segs >= local->max_write)
3558                         break;
3559
3560                 /* Don't allocate at a sync point with data packets pending */
3561                 if (qpriv->sync_pt && qpriv->alloc_w_segs)
3562                         break;
3563
3564                 /* All data received at the sync point, continue */
3565                 if (qpriv->sync_pt && !qpriv->alloc_w_segs) {
3566                         hfi1_kern_clear_hw_flow(rcd, qp);
3567                         qpriv->sync_pt = false;
3568                         qpriv->s_flags &= ~HFI1_R_TID_SW_PSN;
3569                 }
3570
3571                 /* Allocate flow if we don't have one */
3572                 if (qpriv->flow_state.index >= RXE_NUM_TID_FLOWS) {
3573                         ret = hfi1_kern_setup_hw_flow(qpriv->rcd, qp);
3574                         if (ret) {
3575                                 to_seg = tid_rdma_flow_wt *
3576                                         position_in_queue(qpriv,
3577                                                           &rcd->flow_queue);
3578                                 break;
3579                         }
3580                 }
3581
3582                 npkts = rvt_div_round_up_mtu(qp, req->seg_len);
3583
3584                 /*
3585                  * We are at a sync point if we run out of KDETH PSN space.
3586                  * Last PSN of every generation is reserved for RESYNC.
3587                  */
3588                 if (qpriv->flow_state.psn + npkts > MAX_TID_FLOW_PSN - 1) {
3589                         qpriv->sync_pt = true;
3590                         break;
3591                 }
3592
3593                 /*
3594                  * If overtaking req->acked_tail, send an RNR NAK. Because the
3595                  * QP is not queued in this case, and the issue can only be
3596                  * caused due a delay in scheduling the second leg which we
3597                  * cannot estimate, we use a rather arbitrary RNR timeout of
3598                  * (MAX_FLOWS / 2) segments
3599                  */
3600                 if (!CIRC_SPACE(req->setup_head, req->acked_tail,
3601                                 MAX_FLOWS)) {
3602                         ret = -EAGAIN;
3603                         to_seg = MAX_FLOWS >> 1;
3604                         qpriv->s_flags |= RVT_S_ACK_PENDING;
3605                         hfi1_schedule_tid_send(qp);
3606                         break;
3607                 }
3608
3609                 /* Try to allocate rcv array / TID entries */
3610                 ret = hfi1_kern_exp_rcv_setup(req, &req->ss, &last);
3611                 if (ret == -EAGAIN)
3612                         to_seg = position_in_queue(qpriv, &rcd->rarr_queue);
3613                 if (ret)
3614                         break;
3615
3616                 qpriv->alloc_w_segs++;
3617                 req->alloc_seg++;
3618                 continue;
3619 next_req:
3620                 /* Begin processing the next request */
3621                 if (++qpriv->r_tid_alloc >
3622                     rvt_size_atomic(ib_to_rvt(qp->ibqp.device)))
3623                         qpriv->r_tid_alloc = 0;
3624         }
3625
3626         /*
3627          * Schedule an RNR NAK to be sent if (a) flow or rcv array allocation
3628          * has failed (b) we are called from the rcv handler interrupt context
3629          * (c) an RNR NAK has not already been scheduled
3630          */
3631         if (ret == -EAGAIN && intr_ctx && !qp->r_nak_state)
3632                 goto send_rnr_nak;
3633
3634         return;
3635
3636 send_rnr_nak:
3637         lockdep_assert_held(&qp->r_lock);
3638
3639         /* Set r_nak_state to prevent unrelated events from generating NAK's */
3640         qp->r_nak_state = hfi1_compute_tid_rnr_timeout(qp, to_seg) | IB_RNR_NAK;
3641
3642         /* Pull back r_psn to the segment being RNR NAK'd */
3643         qp->r_psn = e->psn + req->alloc_seg;
3644         qp->r_ack_psn = qp->r_psn;
3645         /*
3646          * Pull back r_head_ack_queue to the ack entry following the request
3647          * being RNR NAK'd. This allows resources to be allocated to the request
3648          * if the queued QP is scheduled.
3649          */
3650         qp->r_head_ack_queue = qpriv->r_tid_alloc + 1;
3651         if (qp->r_head_ack_queue > rvt_size_atomic(ib_to_rvt(qp->ibqp.device)))
3652                 qp->r_head_ack_queue = 0;
3653         qpriv->r_tid_head = qp->r_head_ack_queue;
3654         /*
3655          * These send side fields are used in make_rc_ack(). They are set in
3656          * hfi1_send_rc_ack() but must be set here before dropping qp->s_lock
3657          * for consistency
3658          */
3659         qp->s_nak_state = qp->r_nak_state;
3660         qp->s_ack_psn = qp->r_ack_psn;
3661         /*
3662          * Clear the ACK PENDING flag to prevent unwanted ACK because we
3663          * have modified qp->s_ack_psn here.
3664          */
3665         qp->s_flags &= ~(RVT_S_ACK_PENDING);
3666
3667         trace_hfi1_rsp_tid_write_alloc_res(qp, qp->r_psn);
3668         /*
3669          * qpriv->rnr_nak_state is used to determine when the scheduled RNR NAK
3670          * has actually been sent. qp->s_flags RVT_S_ACK_PENDING bit cannot be
3671          * used for this because qp->s_lock is dropped before calling
3672          * hfi1_send_rc_ack() leading to inconsistency between the receive
3673          * interrupt handlers and the send thread in make_rc_ack()
3674          */
3675         qpriv->rnr_nak_state = TID_RNR_NAK_SEND;
3676
3677         /*
3678          * Schedule RNR NAK to be sent. RNR NAK's are scheduled from the receive
3679          * interrupt handlers but will be sent from the send engine behind any
3680          * previous responses that may have been scheduled
3681          */
3682         rc_defered_ack(rcd, qp);
3683 }
3684
3685 void hfi1_rc_rcv_tid_rdma_write_req(struct hfi1_packet *packet)
3686 {
3687         /* HANDLER FOR TID RDMA WRITE REQUEST packet (Responder side)*/
3688
3689         /*
3690          * 1. Verify TID RDMA WRITE REQ as per IB_OPCODE_RC_RDMA_WRITE_FIRST
3691          *    (see hfi1_rc_rcv())
3692          *     - Don't allow 0-length requests.
3693          * 2. Put TID RDMA WRITE REQ into the response queueu (s_ack_queue)
3694          *     - Setup struct tid_rdma_req with request info
3695          *     - Prepare struct tid_rdma_flow array?
3696          * 3. Set the qp->s_ack_state as state diagram in design doc.
3697          * 4. Set RVT_S_RESP_PENDING in s_flags.
3698          * 5. Kick the send engine (hfi1_schedule_send())
3699          */
3700         struct hfi1_ctxtdata *rcd = packet->rcd;
3701         struct rvt_qp *qp = packet->qp;
3702         struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
3703         struct ib_other_headers *ohdr = packet->ohdr;
3704         struct rvt_ack_entry *e;
3705         unsigned long flags;
3706         struct ib_reth *reth;
3707         struct hfi1_qp_priv *qpriv = qp->priv;
3708         struct tid_rdma_request *req;
3709         u32 bth0, psn, len, rkey, num_segs;
3710         bool fecn;
3711         u8 next;
3712         u64 vaddr;
3713         int diff;
3714
3715         bth0 = be32_to_cpu(ohdr->bth[0]);
3716         if (hfi1_ruc_check_hdr(ibp, packet))
3717                 return;
3718
3719         fecn = process_ecn(qp, packet);
3720         psn = mask_psn(be32_to_cpu(ohdr->bth[2]));
3721         trace_hfi1_rsp_rcv_tid_write_req(qp, psn);
3722
3723         if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST))
3724                 rvt_comm_est(qp);
3725
3726         if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
3727                 goto nack_inv;
3728
3729         reth = &ohdr->u.tid_rdma.w_req.reth;
3730         vaddr = be64_to_cpu(reth->vaddr);
3731         len = be32_to_cpu(reth->length);
3732
3733         num_segs = DIV_ROUND_UP(len, qpriv->tid_rdma.local.max_len);
3734         diff = delta_psn(psn, qp->r_psn);
3735         if (unlikely(diff)) {
3736                 tid_rdma_rcv_err(packet, ohdr, qp, psn, diff, fecn);
3737                 return;
3738         }
3739
3740         /*
3741          * The resent request which was previously RNR NAK'd is inserted at the
3742          * location of the original request, which is one entry behind
3743          * r_head_ack_queue
3744          */
3745         if (qpriv->rnr_nak_state)
3746                 qp->r_head_ack_queue = qp->r_head_ack_queue ?
3747                         qp->r_head_ack_queue - 1 :
3748                         rvt_size_atomic(ib_to_rvt(qp->ibqp.device));
3749
3750         /* We've verified the request, insert it into the ack queue. */
3751         next = qp->r_head_ack_queue + 1;
3752         if (next > rvt_size_atomic(ib_to_rvt(qp->ibqp.device)))
3753                 next = 0;
3754         spin_lock_irqsave(&qp->s_lock, flags);
3755         if (unlikely(next == qp->s_acked_ack_queue)) {
3756                 if (!qp->s_ack_queue[next].sent)
3757                         goto nack_inv_unlock;
3758                 update_ack_queue(qp, next);
3759         }
3760         e = &qp->s_ack_queue[qp->r_head_ack_queue];
3761         req = ack_to_tid_req(e);
3762
3763         /* Bring previously RNR NAK'd request back to life */
3764         if (qpriv->rnr_nak_state) {
3765                 qp->r_nak_state = 0;
3766                 qp->s_nak_state = 0;
3767                 qpriv->rnr_nak_state = TID_RNR_NAK_INIT;
3768                 qp->r_psn = e->lpsn + 1;
3769                 req->state = TID_REQUEST_INIT;
3770                 goto update_head;
3771         }
3772
3773         release_rdma_sge_mr(e);
3774
3775         /* The length needs to be in multiples of PAGE_SIZE */
3776         if (!len || len & ~PAGE_MASK)
3777                 goto nack_inv_unlock;
3778
3779         rkey = be32_to_cpu(reth->rkey);
3780         qp->r_len = len;
3781
3782         if (e->opcode == TID_OP(WRITE_REQ) &&
3783             (req->setup_head != req->clear_tail ||
3784              req->clear_tail != req->acked_tail))
3785                 goto nack_inv_unlock;
3786
3787         if (unlikely(!rvt_rkey_ok(qp, &e->rdma_sge, qp->r_len, vaddr,
3788                                   rkey, IB_ACCESS_REMOTE_WRITE)))
3789                 goto nack_acc;
3790
3791         qp->r_psn += num_segs - 1;
3792
3793         e->opcode = (bth0 >> 24) & 0xff;
3794         e->psn = psn;
3795         e->lpsn = qp->r_psn;
3796         e->sent = 0;
3797
3798         req->n_flows = min_t(u16, num_segs, qpriv->tid_rdma.local.max_write);
3799         req->state = TID_REQUEST_INIT;
3800         req->cur_seg = 0;
3801         req->comp_seg = 0;
3802         req->ack_seg = 0;
3803         req->alloc_seg = 0;
3804         req->isge = 0;
3805         req->seg_len = qpriv->tid_rdma.local.max_len;
3806         req->total_len = len;
3807         req->total_segs = num_segs;
3808         req->r_flow_psn = e->psn;
3809         req->ss.sge = e->rdma_sge;
3810         req->ss.num_sge = 1;
3811
3812         req->flow_idx = req->setup_head;
3813         req->clear_tail = req->setup_head;
3814         req->acked_tail = req->setup_head;
3815
3816         qp->r_state = e->opcode;
3817         qp->r_nak_state = 0;
3818         /*
3819          * We need to increment the MSN here instead of when we
3820          * finish sending the result since a duplicate request would
3821          * increment it more than once.
3822          */
3823         qp->r_msn++;
3824         qp->r_psn++;
3825
3826         trace_hfi1_tid_req_rcv_write_req(qp, 0, e->opcode, e->psn, e->lpsn,
3827                                          req);
3828
3829         if (qpriv->r_tid_tail == HFI1_QP_WQE_INVALID) {
3830                 qpriv->r_tid_tail = qp->r_head_ack_queue;
3831         } else if (qpriv->r_tid_tail == qpriv->r_tid_head) {
3832                 struct tid_rdma_request *ptr;
3833
3834                 e = &qp->s_ack_queue[qpriv->r_tid_tail];
3835                 ptr = ack_to_tid_req(e);
3836
3837                 if (e->opcode != TID_OP(WRITE_REQ) ||
3838                     ptr->comp_seg == ptr->total_segs) {
3839                         if (qpriv->r_tid_tail == qpriv->r_tid_ack)
3840                                 qpriv->r_tid_ack = qp->r_head_ack_queue;
3841                         qpriv->r_tid_tail = qp->r_head_ack_queue;
3842                 }
3843         }
3844 update_head:
3845         qp->r_head_ack_queue = next;
3846         qpriv->r_tid_head = qp->r_head_ack_queue;
3847
3848         hfi1_tid_write_alloc_resources(qp, true);
3849         trace_hfi1_tid_write_rsp_rcv_req(qp);
3850
3851         /* Schedule the send tasklet. */
3852         qp->s_flags |= RVT_S_RESP_PENDING;
3853         if (fecn)
3854                 qp->s_flags |= RVT_S_ECN;
3855         hfi1_schedule_send(qp);
3856
3857         spin_unlock_irqrestore(&qp->s_lock, flags);
3858         return;
3859
3860 nack_inv_unlock:
3861         spin_unlock_irqrestore(&qp->s_lock, flags);
3862 nack_inv:
3863         rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
3864         qp->r_nak_state = IB_NAK_INVALID_REQUEST;
3865         qp->r_ack_psn = qp->r_psn;
3866         /* Queue NAK for later */
3867         rc_defered_ack(rcd, qp);
3868         return;
3869 nack_acc:
3870         spin_unlock_irqrestore(&qp->s_lock, flags);
3871         rvt_rc_error(qp, IB_WC_LOC_PROT_ERR);
3872         qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
3873         qp->r_ack_psn = qp->r_psn;
3874 }
3875
3876 u32 hfi1_build_tid_rdma_write_resp(struct rvt_qp *qp, struct rvt_ack_entry *e,
3877                                    struct ib_other_headers *ohdr, u32 *bth1,
3878                                    u32 bth2, u32 *len,
3879                                    struct rvt_sge_state **ss)
3880 {
3881         struct hfi1_ack_priv *epriv = e->priv;
3882         struct tid_rdma_request *req = &epriv->tid_req;
3883         struct hfi1_qp_priv *qpriv = qp->priv;
3884         struct tid_rdma_flow *flow = NULL;
3885         u32 resp_len = 0, hdwords = 0;
3886         void *resp_addr = NULL;
3887         struct tid_rdma_params *remote;
3888
3889         trace_hfi1_tid_req_build_write_resp(qp, 0, e->opcode, e->psn, e->lpsn,
3890                                             req);
3891         trace_hfi1_tid_write_rsp_build_resp(qp);
3892         trace_hfi1_rsp_build_tid_write_resp(qp, bth2);
3893         flow = &req->flows[req->flow_idx];
3894         switch (req->state) {
3895         default:
3896                 /*
3897                  * Try to allocate resources here in case QP was queued and was
3898                  * later scheduled when resources became available
3899                  */
3900                 hfi1_tid_write_alloc_resources(qp, false);
3901
3902                 /* We've already sent everything which is ready */
3903                 if (req->cur_seg >= req->alloc_seg)
3904                         goto done;
3905
3906                 /*
3907                  * Resources can be assigned but responses cannot be sent in
3908                  * rnr_nak state, till the resent request is received
3909                  */
3910                 if (qpriv->rnr_nak_state == TID_RNR_NAK_SENT)
3911                         goto done;
3912
3913                 req->state = TID_REQUEST_ACTIVE;
3914                 trace_hfi1_tid_flow_build_write_resp(qp, req->flow_idx, flow);
3915                 req->flow_idx = CIRC_NEXT(req->flow_idx, MAX_FLOWS);
3916                 hfi1_add_tid_reap_timer(qp);
3917                 break;
3918
3919         case TID_REQUEST_RESEND_ACTIVE:
3920         case TID_REQUEST_RESEND:
3921                 trace_hfi1_tid_flow_build_write_resp(qp, req->flow_idx, flow);
3922                 req->flow_idx = CIRC_NEXT(req->flow_idx, MAX_FLOWS);
3923                 if (!CIRC_CNT(req->setup_head, req->flow_idx, MAX_FLOWS))
3924                         req->state = TID_REQUEST_ACTIVE;
3925
3926                 hfi1_mod_tid_reap_timer(qp);
3927                 break;
3928         }
3929         flow->flow_state.resp_ib_psn = bth2;
3930         resp_addr = (void *)flow->tid_entry;
3931         resp_len = sizeof(*flow->tid_entry) * flow->tidcnt;
3932         req->cur_seg++;
3933
3934         memset(&ohdr->u.tid_rdma.w_rsp, 0, sizeof(ohdr->u.tid_rdma.w_rsp));
3935         epriv->ss.sge.vaddr = resp_addr;
3936         epriv->ss.sge.sge_length = resp_len;
3937         epriv->ss.sge.length = epriv->ss.sge.sge_length;
3938         /*
3939          * We can safely zero these out. Since the first SGE covers the
3940          * entire packet, nothing else should even look at the MR.
3941          */
3942         epriv->ss.sge.mr = NULL;
3943         epriv->ss.sge.m = 0;
3944         epriv->ss.sge.n = 0;
3945
3946         epriv->ss.sg_list = NULL;
3947         epriv->ss.total_len = epriv->ss.sge.sge_length;
3948         epriv->ss.num_sge = 1;
3949
3950         *ss = &epriv->ss;
3951         *len = epriv->ss.total_len;
3952
3953         /* Construct the TID RDMA WRITE RESP packet header */
3954         rcu_read_lock();
3955         remote = rcu_dereference(qpriv->tid_rdma.remote);
3956
3957         KDETH_RESET(ohdr->u.tid_rdma.w_rsp.kdeth0, KVER, 0x1);
3958         KDETH_RESET(ohdr->u.tid_rdma.w_rsp.kdeth1, JKEY, remote->jkey);
3959         ohdr->u.tid_rdma.w_rsp.aeth = rvt_compute_aeth(qp);
3960         ohdr->u.tid_rdma.w_rsp.tid_flow_psn =
3961                 cpu_to_be32((flow->flow_state.generation <<
3962                              HFI1_KDETH_BTH_SEQ_SHIFT) |
3963                             (flow->flow_state.spsn &
3964                              HFI1_KDETH_BTH_SEQ_MASK));
3965         ohdr->u.tid_rdma.w_rsp.tid_flow_qp =
3966                 cpu_to_be32(qpriv->tid_rdma.local.qp |
3967                             ((flow->idx & TID_RDMA_DESTQP_FLOW_MASK) <<
3968                              TID_RDMA_DESTQP_FLOW_SHIFT) |
3969                             qpriv->rcd->ctxt);
3970         ohdr->u.tid_rdma.w_rsp.verbs_qp = cpu_to_be32(qp->remote_qpn);
3971         *bth1 = remote->qp;
3972         rcu_read_unlock();
3973         hdwords = sizeof(ohdr->u.tid_rdma.w_rsp) / sizeof(u32);
3974         qpriv->pending_tid_w_segs++;
3975 done:
3976         return hdwords;
3977 }
3978
3979 static void hfi1_add_tid_reap_timer(struct rvt_qp *qp)
3980 {
3981         struct hfi1_qp_priv *qpriv = qp->priv;
3982
3983         lockdep_assert_held(&qp->s_lock);
3984         if (!(qpriv->s_flags & HFI1_R_TID_RSC_TIMER)) {
3985                 qpriv->s_flags |= HFI1_R_TID_RSC_TIMER;
3986                 qpriv->s_tid_timer.expires = jiffies +
3987                         qpriv->tid_timer_timeout_jiffies;
3988                 add_timer(&qpriv->s_tid_timer);
3989         }
3990 }
3991
3992 static void hfi1_mod_tid_reap_timer(struct rvt_qp *qp)
3993 {
3994         struct hfi1_qp_priv *qpriv = qp->priv;
3995
3996         lockdep_assert_held(&qp->s_lock);
3997         qpriv->s_flags |= HFI1_R_TID_RSC_TIMER;
3998         mod_timer(&qpriv->s_tid_timer, jiffies +
3999                   qpriv->tid_timer_timeout_jiffies);
4000 }
4001
4002 static int hfi1_stop_tid_reap_timer(struct rvt_qp *qp)
4003 {
4004         struct hfi1_qp_priv *qpriv = qp->priv;
4005         int rval = 0;
4006
4007         lockdep_assert_held(&qp->s_lock);
4008         if (qpriv->s_flags & HFI1_R_TID_RSC_TIMER) {
4009                 rval = del_timer(&qpriv->s_tid_timer);
4010                 qpriv->s_flags &= ~HFI1_R_TID_RSC_TIMER;
4011         }
4012         return rval;
4013 }
4014
4015 void hfi1_del_tid_reap_timer(struct rvt_qp *qp)
4016 {
4017         struct hfi1_qp_priv *qpriv = qp->priv;
4018
4019         del_timer_sync(&qpriv->s_tid_timer);
4020         qpriv->s_flags &= ~HFI1_R_TID_RSC_TIMER;
4021 }
4022
4023 static void hfi1_tid_timeout(struct timer_list *t)
4024 {
4025         struct hfi1_qp_priv *qpriv = from_timer(qpriv, t, s_tid_timer);
4026         struct rvt_qp *qp = qpriv->owner;
4027         struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
4028         unsigned long flags;
4029         u32 i;
4030
4031         spin_lock_irqsave(&qp->r_lock, flags);
4032         spin_lock(&qp->s_lock);
4033         if (qpriv->s_flags & HFI1_R_TID_RSC_TIMER) {
4034                 dd_dev_warn(dd_from_ibdev(qp->ibqp.device), "[QP%u] %s %d\n",
4035                             qp->ibqp.qp_num, __func__, __LINE__);
4036                 trace_hfi1_msg_tid_timeout(/* msg */
4037                         qp, "resource timeout = ",
4038                         (u64)qpriv->tid_timer_timeout_jiffies);
4039                 hfi1_stop_tid_reap_timer(qp);
4040                 /*
4041                  * Go though the entire ack queue and clear any outstanding
4042                  * HW flow and RcvArray resources.
4043                  */
4044                 hfi1_kern_clear_hw_flow(qpriv->rcd, qp);
4045                 for (i = 0; i < rvt_max_atomic(rdi); i++) {
4046                         struct tid_rdma_request *req =
4047                                 ack_to_tid_req(&qp->s_ack_queue[i]);
4048
4049                         hfi1_kern_exp_rcv_clear_all(req);
4050                 }
4051                 spin_unlock(&qp->s_lock);
4052                 if (qp->ibqp.event_handler) {
4053                         struct ib_event ev;
4054
4055                         ev.device = qp->ibqp.device;
4056                         ev.element.qp = &qp->ibqp;
4057                         ev.event = IB_EVENT_QP_FATAL;
4058                         qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
4059                 }
4060                 rvt_rc_error(qp, IB_WC_RESP_TIMEOUT_ERR);
4061                 goto unlock_r_lock;
4062         }
4063         spin_unlock(&qp->s_lock);
4064 unlock_r_lock:
4065         spin_unlock_irqrestore(&qp->r_lock, flags);
4066 }
4067
4068 void hfi1_rc_rcv_tid_rdma_write_resp(struct hfi1_packet *packet)
4069 {
4070         /* HANDLER FOR TID RDMA WRITE RESPONSE packet (Requestor side */
4071
4072         /*
4073          * 1. Find matching SWQE
4074          * 2. Check that TIDENTRY array has enough space for a complete
4075          *    segment. If not, put QP in error state.
4076          * 3. Save response data in struct tid_rdma_req and struct tid_rdma_flow
4077          * 4. Remove HFI1_S_WAIT_TID_RESP from s_flags.
4078          * 5. Set qp->s_state
4079          * 6. Kick the send engine (hfi1_schedule_send())
4080          */
4081         struct ib_other_headers *ohdr = packet->ohdr;
4082         struct rvt_qp *qp = packet->qp;
4083         struct hfi1_qp_priv *qpriv = qp->priv;
4084         struct hfi1_ctxtdata *rcd = packet->rcd;
4085         struct rvt_swqe *wqe;
4086         struct tid_rdma_request *req;
4087         struct tid_rdma_flow *flow;
4088         enum ib_wc_status status;
4089         u32 opcode, aeth, psn, flow_psn, i, tidlen = 0, pktlen;
4090         bool fecn;
4091         unsigned long flags;
4092
4093         fecn = process_ecn(qp, packet);
4094         psn = mask_psn(be32_to_cpu(ohdr->bth[2]));
4095         aeth = be32_to_cpu(ohdr->u.tid_rdma.w_rsp.aeth);
4096         opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0xff;
4097
4098         spin_lock_irqsave(&qp->s_lock, flags);
4099
4100         /* Ignore invalid responses */
4101         if (cmp_psn(psn, qp->s_next_psn) >= 0)
4102                 goto ack_done;
4103
4104         /* Ignore duplicate responses. */
4105         if (unlikely(cmp_psn(psn, qp->s_last_psn) <= 0))
4106                 goto ack_done;
4107
4108         if (unlikely(qp->s_acked == qp->s_tail))
4109                 goto ack_done;
4110
4111         /*
4112          * If we are waiting for a particular packet sequence number
4113          * due to a request being resent, check for it. Otherwise,
4114          * ensure that we haven't missed anything.
4115          */
4116         if (qp->r_flags & RVT_R_RDMAR_SEQ) {
4117                 if (cmp_psn(psn, qp->s_last_psn + 1) != 0)
4118                         goto ack_done;
4119                 qp->r_flags &= ~RVT_R_RDMAR_SEQ;
4120         }
4121
4122         wqe = rvt_get_swqe_ptr(qp, qpriv->s_tid_cur);
4123         if (unlikely(wqe->wr.opcode != IB_WR_TID_RDMA_WRITE))
4124                 goto ack_op_err;
4125
4126         req = wqe_to_tid_req(wqe);
4127         /*
4128          * If we've lost ACKs and our acked_tail pointer is too far
4129          * behind, don't overwrite segments. Just drop the packet and
4130          * let the reliability protocol take care of it.
4131          */
4132         if (!CIRC_SPACE(req->setup_head, req->acked_tail, MAX_FLOWS))
4133                 goto ack_done;
4134
4135         /*
4136          * The call to do_rc_ack() should be last in the chain of
4137          * packet checks because it will end up updating the QP state.
4138          * Therefore, anything that would prevent the packet from
4139          * being accepted as a successful response should be prior
4140          * to it.
4141          */
4142         if (!do_rc_ack(qp, aeth, psn, opcode, 0, rcd))
4143                 goto ack_done;
4144
4145         trace_hfi1_ack(qp, psn);
4146
4147         flow = &req->flows[req->setup_head];
4148         flow->pkt = 0;
4149         flow->tid_idx = 0;
4150         flow->tid_offset = 0;
4151         flow->sent = 0;
4152         flow->resync_npkts = 0;
4153         flow->tid_qpn = be32_to_cpu(ohdr->u.tid_rdma.w_rsp.tid_flow_qp);
4154         flow->idx = (flow->tid_qpn >> TID_RDMA_DESTQP_FLOW_SHIFT) &
4155                 TID_RDMA_DESTQP_FLOW_MASK;
4156         flow_psn = mask_psn(be32_to_cpu(ohdr->u.tid_rdma.w_rsp.tid_flow_psn));
4157         flow->flow_state.generation = flow_psn >> HFI1_KDETH_BTH_SEQ_SHIFT;
4158         flow->flow_state.spsn = flow_psn & HFI1_KDETH_BTH_SEQ_MASK;
4159         flow->flow_state.resp_ib_psn = psn;
4160         flow->length = min_t(u32, req->seg_len,
4161                              (wqe->length - (req->comp_seg * req->seg_len)));
4162
4163         flow->npkts = rvt_div_round_up_mtu(qp, flow->length);
4164         flow->flow_state.lpsn = flow->flow_state.spsn +
4165                 flow->npkts - 1;
4166         /* payload length = packet length - (header length + ICRC length) */
4167         pktlen = packet->tlen - (packet->hlen + 4);
4168         if (pktlen > sizeof(flow->tid_entry)) {
4169                 status = IB_WC_LOC_LEN_ERR;
4170                 goto ack_err;
4171         }
4172         memcpy(flow->tid_entry, packet->ebuf, pktlen);
4173         flow->tidcnt = pktlen / sizeof(*flow->tid_entry);
4174         trace_hfi1_tid_flow_rcv_write_resp(qp, req->setup_head, flow);
4175
4176         req->comp_seg++;
4177         trace_hfi1_tid_write_sender_rcv_resp(qp, 0);
4178         /*
4179          * Walk the TID_ENTRY list to make sure we have enough space for a
4180          * complete segment.
4181          */
4182         for (i = 0; i < flow->tidcnt; i++) {
4183                 trace_hfi1_tid_entry_rcv_write_resp(/* entry */
4184                         qp, i, flow->tid_entry[i]);
4185                 if (!EXP_TID_GET(flow->tid_entry[i], LEN)) {
4186                         status = IB_WC_LOC_LEN_ERR;
4187                         goto ack_err;
4188                 }
4189                 tidlen += EXP_TID_GET(flow->tid_entry[i], LEN);
4190         }
4191         if (tidlen * PAGE_SIZE < flow->length) {
4192                 status = IB_WC_LOC_LEN_ERR;
4193                 goto ack_err;
4194         }
4195
4196         trace_hfi1_tid_req_rcv_write_resp(qp, 0, wqe->wr.opcode, wqe->psn,
4197                                           wqe->lpsn, req);
4198         /*
4199          * If this is the first response for this request, set the initial
4200          * flow index to the current flow.
4201          */
4202         if (!cmp_psn(psn, wqe->psn)) {
4203                 req->r_last_acked = mask_psn(wqe->psn - 1);
4204                 /* Set acked flow index to head index */
4205                 req->acked_tail = req->setup_head;
4206         }
4207
4208         /* advance circular buffer head */
4209         req->setup_head = CIRC_NEXT(req->setup_head, MAX_FLOWS);
4210         req->state = TID_REQUEST_ACTIVE;
4211
4212         /*
4213          * If all responses for this TID RDMA WRITE request have been received
4214          * advance the pointer to the next one.
4215          * Since TID RDMA requests could be mixed in with regular IB requests,
4216          * they might not appear sequentially in the queue. Therefore, the
4217          * next request needs to be "found".
4218          */
4219         if (qpriv->s_tid_cur != qpriv->s_tid_head &&
4220             req->comp_seg == req->total_segs) {
4221                 for (i = qpriv->s_tid_cur + 1; ; i++) {
4222                         if (i == qp->s_size)
4223                                 i = 0;
4224                         wqe = rvt_get_swqe_ptr(qp, i);
4225                         if (i == qpriv->s_tid_head)
4226                                 break;
4227                         if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE)
4228                                 break;
4229                 }
4230                 qpriv->s_tid_cur = i;
4231         }
4232         qp->s_flags &= ~HFI1_S_WAIT_TID_RESP;
4233         hfi1_schedule_tid_send(qp);
4234         goto ack_done;
4235
4236 ack_op_err:
4237         status = IB_WC_LOC_QP_OP_ERR;
4238 ack_err:
4239         rvt_error_qp(qp, status);
4240 ack_done:
4241         if (fecn)
4242                 qp->s_flags |= RVT_S_ECN;
4243         spin_unlock_irqrestore(&qp->s_lock, flags);
4244 }
4245
4246 bool hfi1_build_tid_rdma_packet(struct rvt_swqe *wqe,
4247                                 struct ib_other_headers *ohdr,
4248                                 u32 *bth1, u32 *bth2, u32 *len)
4249 {
4250         struct tid_rdma_request *req = wqe_to_tid_req(wqe);
4251         struct tid_rdma_flow *flow = &req->flows[req->clear_tail];
4252         struct tid_rdma_params *remote;
4253         struct rvt_qp *qp = req->qp;
4254         struct hfi1_qp_priv *qpriv = qp->priv;
4255         u32 tidentry = flow->tid_entry[flow->tid_idx];
4256         u32 tidlen = EXP_TID_GET(tidentry, LEN) << PAGE_SHIFT;
4257         struct tid_rdma_write_data *wd = &ohdr->u.tid_rdma.w_data;
4258         u32 next_offset, om = KDETH_OM_LARGE;
4259         bool last_pkt;
4260
4261         if (!tidlen) {
4262                 hfi1_trdma_send_complete(qp, wqe, IB_WC_REM_INV_RD_REQ_ERR);
4263                 rvt_error_qp(qp, IB_WC_REM_INV_RD_REQ_ERR);
4264         }
4265
4266         *len = min_t(u32, qp->pmtu, tidlen - flow->tid_offset);
4267         flow->sent += *len;
4268         next_offset = flow->tid_offset + *len;
4269         last_pkt = (flow->tid_idx == (flow->tidcnt - 1) &&
4270                     next_offset >= tidlen) || (flow->sent >= flow->length);
4271         trace_hfi1_tid_entry_build_write_data(qp, flow->tid_idx, tidentry);
4272         trace_hfi1_tid_flow_build_write_data(qp, req->clear_tail, flow);
4273
4274         rcu_read_lock();
4275         remote = rcu_dereference(qpriv->tid_rdma.remote);
4276         KDETH_RESET(wd->kdeth0, KVER, 0x1);
4277         KDETH_SET(wd->kdeth0, SH, !last_pkt);
4278         KDETH_SET(wd->kdeth0, INTR, !!(!last_pkt && remote->urg));
4279         KDETH_SET(wd->kdeth0, TIDCTRL, EXP_TID_GET(tidentry, CTRL));
4280         KDETH_SET(wd->kdeth0, TID, EXP_TID_GET(tidentry, IDX));
4281         KDETH_SET(wd->kdeth0, OM, om == KDETH_OM_LARGE);
4282         KDETH_SET(wd->kdeth0, OFFSET, flow->tid_offset / om);
4283         KDETH_RESET(wd->kdeth1, JKEY, remote->jkey);
4284         wd->verbs_qp = cpu_to_be32(qp->remote_qpn);
4285         rcu_read_unlock();
4286
4287         *bth1 = flow->tid_qpn;
4288         *bth2 = mask_psn(((flow->flow_state.spsn + flow->pkt++) &
4289                          HFI1_KDETH_BTH_SEQ_MASK) |
4290                          (flow->flow_state.generation <<
4291                           HFI1_KDETH_BTH_SEQ_SHIFT));
4292         if (last_pkt) {
4293                 /* PSNs are zero-based, so +1 to count number of packets */
4294                 if (flow->flow_state.lpsn + 1 +
4295                     rvt_div_round_up_mtu(qp, req->seg_len) >
4296                     MAX_TID_FLOW_PSN)
4297                         req->state = TID_REQUEST_SYNC;
4298                 *bth2 |= IB_BTH_REQ_ACK;
4299         }
4300
4301         if (next_offset >= tidlen) {
4302                 flow->tid_offset = 0;
4303                 flow->tid_idx++;
4304         } else {
4305                 flow->tid_offset = next_offset;
4306         }
4307         return last_pkt;
4308 }
4309
4310 void hfi1_rc_rcv_tid_rdma_write_data(struct hfi1_packet *packet)
4311 {
4312         struct rvt_qp *qp = packet->qp;
4313         struct hfi1_qp_priv *priv = qp->priv;
4314         struct hfi1_ctxtdata *rcd = priv->rcd;
4315         struct ib_other_headers *ohdr = packet->ohdr;
4316         struct rvt_ack_entry *e;
4317         struct tid_rdma_request *req;
4318         struct tid_rdma_flow *flow;
4319         struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
4320         unsigned long flags;
4321         u32 psn, next;
4322         u8 opcode;
4323         bool fecn;
4324
4325         fecn = process_ecn(qp, packet);
4326         psn = mask_psn(be32_to_cpu(ohdr->bth[2]));
4327         opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0xff;
4328
4329         /*
4330          * All error handling should be done by now. If we are here, the packet
4331          * is either good or been accepted by the error handler.
4332          */
4333         spin_lock_irqsave(&qp->s_lock, flags);
4334         e = &qp->s_ack_queue[priv->r_tid_tail];
4335         req = ack_to_tid_req(e);
4336         flow = &req->flows[req->clear_tail];
4337         if (cmp_psn(psn, full_flow_psn(flow, flow->flow_state.lpsn))) {
4338                 update_r_next_psn_fecn(packet, priv, rcd, flow, fecn);
4339
4340                 if (cmp_psn(psn, flow->flow_state.r_next_psn))
4341                         goto send_nak;
4342
4343                 flow->flow_state.r_next_psn = mask_psn(psn + 1);
4344                 /*
4345                  * Copy the payload to destination buffer if this packet is
4346                  * delivered as an eager packet due to RSM rule and FECN.
4347                  * The RSM rule selects FECN bit in BTH and SH bit in
4348                  * KDETH header and therefore will not match the last
4349                  * packet of each segment that has SH bit cleared.
4350                  */
4351                 if (fecn && packet->etype == RHF_RCV_TYPE_EAGER) {
4352                         struct rvt_sge_state ss;
4353                         u32 len;
4354                         u32 tlen = packet->tlen;
4355                         u16 hdrsize = packet->hlen;
4356                         u8 pad = packet->pad;
4357                         u8 extra_bytes = pad + packet->extra_byte +
4358                                 (SIZE_OF_CRC << 2);
4359                         u32 pmtu = qp->pmtu;
4360
4361                         if (unlikely(tlen != (hdrsize + pmtu + extra_bytes)))
4362                                 goto send_nak;
4363                         len = req->comp_seg * req->seg_len;
4364                         len += delta_psn(psn,
4365                                 full_flow_psn(flow, flow->flow_state.spsn)) *
4366                                 pmtu;
4367                         if (unlikely(req->total_len - len < pmtu))
4368                                 goto send_nak;
4369
4370                         /*
4371                          * The e->rdma_sge field is set when TID RDMA WRITE REQ
4372                          * is first received and is never modified thereafter.
4373                          */
4374                         ss.sge = e->rdma_sge;
4375                         ss.sg_list = NULL;
4376                         ss.num_sge = 1;
4377                         ss.total_len = req->total_len;
4378                         rvt_skip_sge(&ss, len, false);
4379                         rvt_copy_sge(qp, &ss, packet->payload, pmtu, false,
4380                                      false);
4381                         /* Raise the sw sequence check flag for next packet */
4382                         priv->r_next_psn_kdeth = mask_psn(psn + 1);
4383                         priv->s_flags |= HFI1_R_TID_SW_PSN;
4384                 }
4385                 goto exit;
4386         }
4387         flow->flow_state.r_next_psn = mask_psn(psn + 1);
4388         hfi1_kern_exp_rcv_clear(req);
4389         priv->alloc_w_segs--;
4390         rcd->flows[flow->idx].psn = psn & HFI1_KDETH_BTH_SEQ_MASK;
4391         req->comp_seg++;
4392         priv->s_nak_state = 0;
4393
4394         /*
4395          * Release the flow if one of the following conditions has been met:
4396          *  - The request has reached a sync point AND all outstanding
4397          *    segments have been completed, or
4398          *  - The entire request is complete and there are no more requests
4399          *    (of any kind) in the queue.
4400          */
4401         trace_hfi1_rsp_rcv_tid_write_data(qp, psn);
4402         trace_hfi1_tid_req_rcv_write_data(qp, 0, e->opcode, e->psn, e->lpsn,
4403                                           req);
4404         trace_hfi1_tid_write_rsp_rcv_data(qp);
4405         if (priv->r_tid_ack == HFI1_QP_WQE_INVALID)
4406                 priv->r_tid_ack = priv->r_tid_tail;
4407
4408         if (opcode == TID_OP(WRITE_DATA_LAST)) {
4409                 release_rdma_sge_mr(e);
4410                 for (next = priv->r_tid_tail + 1; ; next++) {
4411                         if (next > rvt_size_atomic(&dev->rdi))
4412                                 next = 0;
4413                         if (next == priv->r_tid_head)
4414                                 break;
4415                         e = &qp->s_ack_queue[next];
4416                         if (e->opcode == TID_OP(WRITE_REQ))
4417                                 break;
4418                 }
4419                 priv->r_tid_tail = next;
4420                 if (++qp->s_acked_ack_queue > rvt_size_atomic(&dev->rdi))
4421                         qp->s_acked_ack_queue = 0;
4422         }
4423
4424         hfi1_tid_write_alloc_resources(qp, true);
4425
4426         /*
4427          * If we need to generate more responses, schedule the
4428          * send engine.
4429          */
4430         if (req->cur_seg < req->total_segs ||
4431             qp->s_tail_ack_queue != qp->r_head_ack_queue) {
4432                 qp->s_flags |= RVT_S_RESP_PENDING;
4433                 hfi1_schedule_send(qp);
4434         }
4435
4436         priv->pending_tid_w_segs--;
4437         if (priv->s_flags & HFI1_R_TID_RSC_TIMER) {
4438                 if (priv->pending_tid_w_segs)
4439                         hfi1_mod_tid_reap_timer(req->qp);
4440                 else
4441                         hfi1_stop_tid_reap_timer(req->qp);
4442         }
4443
4444 done:
4445         priv->s_flags |= RVT_S_ACK_PENDING;
4446         hfi1_schedule_tid_send(qp);
4447 exit:
4448         priv->r_next_psn_kdeth = flow->flow_state.r_next_psn;
4449         if (fecn)
4450                 qp->s_flags |= RVT_S_ECN;
4451         spin_unlock_irqrestore(&qp->s_lock, flags);
4452         return;
4453
4454 send_nak:
4455         if (!priv->s_nak_state) {
4456                 priv->s_nak_state = IB_NAK_PSN_ERROR;
4457                 priv->s_nak_psn = flow->flow_state.r_next_psn;
4458                 priv->s_flags |= RVT_S_ACK_PENDING;
4459                 if (priv->r_tid_ack == HFI1_QP_WQE_INVALID)
4460                         priv->r_tid_ack = priv->r_tid_tail;
4461                 hfi1_schedule_tid_send(qp);
4462         }
4463         goto done;
4464 }
4465
4466 static bool hfi1_tid_rdma_is_resync_psn(u32 psn)
4467 {
4468         return (bool)((psn & HFI1_KDETH_BTH_SEQ_MASK) ==
4469                       HFI1_KDETH_BTH_SEQ_MASK);
4470 }
4471
4472 u32 hfi1_build_tid_rdma_write_ack(struct rvt_qp *qp, struct rvt_ack_entry *e,
4473                                   struct ib_other_headers *ohdr, u16 iflow,
4474                                   u32 *bth1, u32 *bth2)
4475 {
4476         struct hfi1_qp_priv *qpriv = qp->priv;
4477         struct tid_flow_state *fs = &qpriv->flow_state;
4478         struct tid_rdma_request *req = ack_to_tid_req(e);
4479         struct tid_rdma_flow *flow = &req->flows[iflow];
4480         struct tid_rdma_params *remote;
4481
4482         rcu_read_lock();
4483         remote = rcu_dereference(qpriv->tid_rdma.remote);
4484         KDETH_RESET(ohdr->u.tid_rdma.ack.kdeth1, JKEY, remote->jkey);
4485         ohdr->u.tid_rdma.ack.verbs_qp = cpu_to_be32(qp->remote_qpn);
4486         *bth1 = remote->qp;
4487         rcu_read_unlock();
4488
4489         if (qpriv->resync) {
4490                 *bth2 = mask_psn((fs->generation <<
4491                                   HFI1_KDETH_BTH_SEQ_SHIFT) - 1);
4492                 ohdr->u.tid_rdma.ack.aeth = rvt_compute_aeth(qp);
4493         } else if (qpriv->s_nak_state) {
4494                 *bth2 = mask_psn(qpriv->s_nak_psn);
4495                 ohdr->u.tid_rdma.ack.aeth =
4496                         cpu_to_be32((qp->r_msn & IB_MSN_MASK) |
4497                                     (qpriv->s_nak_state <<
4498                                      IB_AETH_CREDIT_SHIFT));
4499         } else {
4500                 *bth2 = full_flow_psn(flow, flow->flow_state.lpsn);
4501                 ohdr->u.tid_rdma.ack.aeth = rvt_compute_aeth(qp);
4502         }
4503         KDETH_RESET(ohdr->u.tid_rdma.ack.kdeth0, KVER, 0x1);
4504         ohdr->u.tid_rdma.ack.tid_flow_qp =
4505                 cpu_to_be32(qpriv->tid_rdma.local.qp |
4506                             ((flow->idx & TID_RDMA_DESTQP_FLOW_MASK) <<
4507                              TID_RDMA_DESTQP_FLOW_SHIFT) |
4508                             qpriv->rcd->ctxt);
4509
4510         ohdr->u.tid_rdma.ack.tid_flow_psn = 0;
4511         ohdr->u.tid_rdma.ack.verbs_psn =
4512                 cpu_to_be32(flow->flow_state.resp_ib_psn);
4513
4514         if (qpriv->resync) {
4515                 /*
4516                  * If the PSN before the current expect KDETH PSN is the
4517                  * RESYNC PSN, then we never received a good TID RDMA WRITE
4518                  * DATA packet after a previous RESYNC.
4519                  * In this case, the next expected KDETH PSN stays the same.
4520                  */
4521                 if (hfi1_tid_rdma_is_resync_psn(qpriv->r_next_psn_kdeth - 1)) {
4522                         ohdr->u.tid_rdma.ack.tid_flow_psn =
4523                                 cpu_to_be32(qpriv->r_next_psn_kdeth_save);
4524                 } else {
4525                         /*
4526                          * Because the KDETH PSNs jump during a RESYNC, it's
4527                          * not possible to infer (or compute) the previous value
4528                          * of r_next_psn_kdeth in the case of back-to-back
4529                          * RESYNC packets. Therefore, we save it.
4530                          */
4531                         qpriv->r_next_psn_kdeth_save =
4532                                 qpriv->r_next_psn_kdeth - 1;
4533                         ohdr->u.tid_rdma.ack.tid_flow_psn =
4534                                 cpu_to_be32(qpriv->r_next_psn_kdeth_save);
4535                         qpriv->r_next_psn_kdeth = mask_psn(*bth2 + 1);
4536                 }
4537                 qpriv->resync = false;
4538         }
4539
4540         return sizeof(ohdr->u.tid_rdma.ack) / sizeof(u32);
4541 }
4542
4543 void hfi1_rc_rcv_tid_rdma_ack(struct hfi1_packet *packet)
4544 {
4545         struct ib_other_headers *ohdr = packet->ohdr;
4546         struct rvt_qp *qp = packet->qp;
4547         struct hfi1_qp_priv *qpriv = qp->priv;
4548         struct rvt_swqe *wqe;
4549         struct tid_rdma_request *req;
4550         struct tid_rdma_flow *flow;
4551         u32 aeth, psn, req_psn, ack_psn, resync_psn, ack_kpsn;
4552         unsigned long flags;
4553         u16 fidx;
4554
4555         trace_hfi1_tid_write_sender_rcv_tid_ack(qp, 0);
4556         process_ecn(qp, packet);
4557         psn = mask_psn(be32_to_cpu(ohdr->bth[2]));
4558         aeth = be32_to_cpu(ohdr->u.tid_rdma.ack.aeth);
4559         req_psn = mask_psn(be32_to_cpu(ohdr->u.tid_rdma.ack.verbs_psn));
4560         resync_psn = mask_psn(be32_to_cpu(ohdr->u.tid_rdma.ack.tid_flow_psn));
4561
4562         spin_lock_irqsave(&qp->s_lock, flags);
4563         trace_hfi1_rcv_tid_ack(qp, aeth, psn, req_psn, resync_psn);
4564
4565         /* If we are waiting for an ACK to RESYNC, drop any other packets */
4566         if ((qp->s_flags & HFI1_S_WAIT_HALT) &&
4567             cmp_psn(psn, qpriv->s_resync_psn))
4568                 goto ack_op_err;
4569
4570         ack_psn = req_psn;
4571         if (hfi1_tid_rdma_is_resync_psn(psn))
4572                 ack_kpsn = resync_psn;
4573         else
4574                 ack_kpsn = psn;
4575         if (aeth >> 29) {
4576                 ack_psn--;
4577                 ack_kpsn--;
4578         }
4579
4580         wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
4581
4582         if (wqe->wr.opcode != IB_WR_TID_RDMA_WRITE)
4583                 goto ack_op_err;
4584
4585         req = wqe_to_tid_req(wqe);
4586         trace_hfi1_tid_req_rcv_tid_ack(qp, 0, wqe->wr.opcode, wqe->psn,
4587                                        wqe->lpsn, req);
4588         flow = &req->flows[req->acked_tail];
4589         trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail, flow);
4590
4591         /* Drop stale ACK/NAK */
4592         if (cmp_psn(psn, full_flow_psn(flow, flow->flow_state.spsn)) < 0)
4593                 goto ack_op_err;
4594
4595         while (cmp_psn(ack_kpsn,
4596                        full_flow_psn(flow, flow->flow_state.lpsn)) >= 0 &&
4597                req->ack_seg < req->cur_seg) {
4598                 req->ack_seg++;
4599                 /* advance acked segment pointer */
4600                 req->acked_tail = CIRC_NEXT(req->acked_tail, MAX_FLOWS);
4601                 req->r_last_acked = flow->flow_state.resp_ib_psn;
4602                 trace_hfi1_tid_req_rcv_tid_ack(qp, 0, wqe->wr.opcode, wqe->psn,
4603                                                wqe->lpsn, req);
4604                 if (req->ack_seg == req->total_segs) {
4605                         req->state = TID_REQUEST_COMPLETE;
4606                         wqe = do_rc_completion(qp, wqe,
4607                                                to_iport(qp->ibqp.device,
4608                                                         qp->port_num));
4609                         trace_hfi1_sender_rcv_tid_ack(qp);
4610                         atomic_dec(&qpriv->n_tid_requests);
4611                         if (qp->s_acked == qp->s_tail)
4612                                 break;
4613                         if (wqe->wr.opcode != IB_WR_TID_RDMA_WRITE)
4614                                 break;
4615                         req = wqe_to_tid_req(wqe);
4616                 }
4617                 flow = &req->flows[req->acked_tail];
4618                 trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail, flow);
4619         }
4620
4621         trace_hfi1_tid_req_rcv_tid_ack(qp, 0, wqe->wr.opcode, wqe->psn,
4622                                        wqe->lpsn, req);
4623         switch (aeth >> 29) {
4624         case 0:         /* ACK */
4625                 if (qpriv->s_flags & RVT_S_WAIT_ACK)
4626                         qpriv->s_flags &= ~RVT_S_WAIT_ACK;
4627                 if (!hfi1_tid_rdma_is_resync_psn(psn)) {
4628                         /* Check if there is any pending TID ACK */
4629                         if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE &&
4630                             req->ack_seg < req->cur_seg)
4631                                 hfi1_mod_tid_retry_timer(qp);
4632                         else
4633                                 hfi1_stop_tid_retry_timer(qp);
4634                         hfi1_schedule_send(qp);
4635                 } else {
4636                         u32 spsn, fpsn, last_acked, generation;
4637                         struct tid_rdma_request *rptr;
4638
4639                         /* ACK(RESYNC) */
4640                         hfi1_stop_tid_retry_timer(qp);
4641                         /* Allow new requests (see hfi1_make_tid_rdma_pkt) */
4642                         qp->s_flags &= ~HFI1_S_WAIT_HALT;
4643                         /*
4644                          * Clear RVT_S_SEND_ONE flag in case that the TID RDMA
4645                          * ACK is received after the TID retry timer is fired
4646                          * again. In this case, do not send any more TID
4647                          * RESYNC request or wait for any more TID ACK packet.
4648                          */
4649                         qpriv->s_flags &= ~RVT_S_SEND_ONE;
4650                         hfi1_schedule_send(qp);
4651
4652                         if ((qp->s_acked == qpriv->s_tid_tail &&
4653                              req->ack_seg == req->total_segs) ||
4654                             qp->s_acked == qp->s_tail) {
4655                                 qpriv->s_state = TID_OP(WRITE_DATA_LAST);
4656                                 goto done;
4657                         }
4658
4659                         if (req->ack_seg == req->comp_seg) {
4660                                 qpriv->s_state = TID_OP(WRITE_DATA);
4661                                 goto done;
4662                         }
4663
4664                         /*
4665                          * The PSN to start with is the next PSN after the
4666                          * RESYNC PSN.
4667                          */
4668                         psn = mask_psn(psn + 1);
4669                         generation = psn >> HFI1_KDETH_BTH_SEQ_SHIFT;
4670                         spsn = 0;
4671
4672                         /*
4673                          * Update to the correct WQE when we get an ACK(RESYNC)
4674                          * in the middle of a request.
4675                          */
4676                         if (delta_psn(ack_psn, wqe->lpsn))
4677                                 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
4678                         req = wqe_to_tid_req(wqe);
4679                         flow = &req->flows[req->acked_tail];
4680                         /*
4681                          * RESYNC re-numbers the PSN ranges of all remaining
4682                          * segments. Also, PSN's start from 0 in the middle of a
4683                          * segment and the first segment size is less than the
4684                          * default number of packets. flow->resync_npkts is used
4685                          * to track the number of packets from the start of the
4686                          * real segment to the point of 0 PSN after the RESYNC
4687                          * in order to later correctly rewind the SGE.
4688                          */
4689                         fpsn = full_flow_psn(flow, flow->flow_state.spsn);
4690                         req->r_ack_psn = psn;
4691                         flow->resync_npkts +=
4692                                 delta_psn(mask_psn(resync_psn + 1), fpsn);
4693                         /*
4694                          * Renumber all packet sequence number ranges
4695                          * based on the new generation.
4696                          */
4697                         last_acked = qp->s_acked;
4698                         rptr = req;
4699                         while (1) {
4700                                 /* start from last acked segment */
4701                                 for (fidx = rptr->acked_tail;
4702                                      CIRC_CNT(rptr->setup_head, fidx,
4703                                               MAX_FLOWS);
4704                                      fidx = CIRC_NEXT(fidx, MAX_FLOWS)) {
4705                                         u32 lpsn;
4706                                         u32 gen;
4707
4708                                         flow = &rptr->flows[fidx];
4709                                         gen = flow->flow_state.generation;
4710                                         if (WARN_ON(gen == generation &&
4711                                                     flow->flow_state.spsn !=
4712                                                      spsn))
4713                                                 continue;
4714                                         lpsn = flow->flow_state.lpsn;
4715                                         lpsn = full_flow_psn(flow, lpsn);
4716                                         flow->npkts =
4717                                                 delta_psn(lpsn,
4718                                                           mask_psn(resync_psn)
4719                                                           );
4720                                         flow->flow_state.generation =
4721                                                 generation;
4722                                         flow->flow_state.spsn = spsn;
4723                                         flow->flow_state.lpsn =
4724                                                 flow->flow_state.spsn +
4725                                                 flow->npkts - 1;
4726                                         flow->pkt = 0;
4727                                         spsn += flow->npkts;
4728                                         resync_psn += flow->npkts;
4729                                         trace_hfi1_tid_flow_rcv_tid_ack(qp,
4730                                                                         fidx,
4731                                                                         flow);
4732                                 }
4733                                 if (++last_acked == qpriv->s_tid_cur + 1)
4734                                         break;
4735                                 if (last_acked == qp->s_size)
4736                                         last_acked = 0;
4737                                 wqe = rvt_get_swqe_ptr(qp, last_acked);
4738                                 rptr = wqe_to_tid_req(wqe);
4739                         }
4740                         req->cur_seg = req->ack_seg;
4741                         qpriv->s_tid_tail = qp->s_acked;
4742                         qpriv->s_state = TID_OP(WRITE_REQ);
4743                         hfi1_schedule_tid_send(qp);
4744                 }
4745 done:
4746                 qpriv->s_retry = qp->s_retry_cnt;
4747                 break;
4748
4749         case 3:         /* NAK */
4750                 hfi1_stop_tid_retry_timer(qp);
4751                 switch ((aeth >> IB_AETH_CREDIT_SHIFT) &
4752                         IB_AETH_CREDIT_MASK) {
4753                 case 0: /* PSN sequence error */
4754                         flow = &req->flows[req->acked_tail];
4755                         trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail,
4756                                                         flow);
4757                         req->r_ack_psn = mask_psn(be32_to_cpu(ohdr->bth[2]));
4758                         req->cur_seg = req->ack_seg;
4759                         qpriv->s_tid_tail = qp->s_acked;
4760                         qpriv->s_state = TID_OP(WRITE_REQ);
4761                         qpriv->s_retry = qp->s_retry_cnt;
4762                         hfi1_schedule_tid_send(qp);
4763                         break;
4764
4765                 default:
4766                         break;
4767                 }
4768                 break;
4769
4770         default:
4771                 break;
4772         }
4773
4774 ack_op_err:
4775         spin_unlock_irqrestore(&qp->s_lock, flags);
4776 }
4777
4778 void hfi1_add_tid_retry_timer(struct rvt_qp *qp)
4779 {
4780         struct hfi1_qp_priv *priv = qp->priv;
4781         struct ib_qp *ibqp = &qp->ibqp;
4782         struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
4783
4784         lockdep_assert_held(&qp->s_lock);
4785         if (!(priv->s_flags & HFI1_S_TID_RETRY_TIMER)) {
4786                 priv->s_flags |= HFI1_S_TID_RETRY_TIMER;
4787                 priv->s_tid_retry_timer.expires = jiffies +
4788                         priv->tid_retry_timeout_jiffies + rdi->busy_jiffies;
4789                 add_timer(&priv->s_tid_retry_timer);
4790         }
4791 }
4792
4793 static void hfi1_mod_tid_retry_timer(struct rvt_qp *qp)
4794 {
4795         struct hfi1_qp_priv *priv = qp->priv;
4796         struct ib_qp *ibqp = &qp->ibqp;
4797         struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
4798
4799         lockdep_assert_held(&qp->s_lock);
4800         priv->s_flags |= HFI1_S_TID_RETRY_TIMER;
4801         mod_timer(&priv->s_tid_retry_timer, jiffies +
4802                   priv->tid_retry_timeout_jiffies + rdi->busy_jiffies);
4803 }
4804
4805 static int hfi1_stop_tid_retry_timer(struct rvt_qp *qp)
4806 {
4807         struct hfi1_qp_priv *priv = qp->priv;
4808         int rval = 0;
4809
4810         lockdep_assert_held(&qp->s_lock);
4811         if (priv->s_flags & HFI1_S_TID_RETRY_TIMER) {
4812                 rval = del_timer(&priv->s_tid_retry_timer);
4813                 priv->s_flags &= ~HFI1_S_TID_RETRY_TIMER;
4814         }
4815         return rval;
4816 }
4817
4818 void hfi1_del_tid_retry_timer(struct rvt_qp *qp)
4819 {
4820         struct hfi1_qp_priv *priv = qp->priv;
4821
4822         del_timer_sync(&priv->s_tid_retry_timer);
4823         priv->s_flags &= ~HFI1_S_TID_RETRY_TIMER;
4824 }
4825
4826 static void hfi1_tid_retry_timeout(struct timer_list *t)
4827 {
4828         struct hfi1_qp_priv *priv = from_timer(priv, t, s_tid_retry_timer);
4829         struct rvt_qp *qp = priv->owner;
4830         struct rvt_swqe *wqe;
4831         unsigned long flags;
4832         struct tid_rdma_request *req;
4833
4834         spin_lock_irqsave(&qp->r_lock, flags);
4835         spin_lock(&qp->s_lock);
4836         trace_hfi1_tid_write_sender_retry_timeout(qp, 0);
4837         if (priv->s_flags & HFI1_S_TID_RETRY_TIMER) {
4838                 hfi1_stop_tid_retry_timer(qp);
4839                 if (!priv->s_retry) {
4840                         trace_hfi1_msg_tid_retry_timeout(/* msg */
4841                                 qp,
4842                                 "Exhausted retries. Tid retry timeout = ",
4843                                 (u64)priv->tid_retry_timeout_jiffies);
4844
4845                         wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
4846                         hfi1_trdma_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR);
4847                         rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
4848                 } else {
4849                         wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
4850                         req = wqe_to_tid_req(wqe);
4851                         trace_hfi1_tid_req_tid_retry_timeout(/* req */
4852                            qp, 0, wqe->wr.opcode, wqe->psn, wqe->lpsn, req);
4853
4854                         priv->s_flags &= ~RVT_S_WAIT_ACK;
4855                         /* Only send one packet (the RESYNC) */
4856                         priv->s_flags |= RVT_S_SEND_ONE;
4857                         /*
4858                          * No additional request shall be made by this QP until
4859                          * the RESYNC has been complete.
4860                          */
4861                         qp->s_flags |= HFI1_S_WAIT_HALT;
4862                         priv->s_state = TID_OP(RESYNC);
4863                         priv->s_retry--;
4864                         hfi1_schedule_tid_send(qp);
4865                 }
4866         }
4867         spin_unlock(&qp->s_lock);
4868         spin_unlock_irqrestore(&qp->r_lock, flags);
4869 }
4870
4871 u32 hfi1_build_tid_rdma_resync(struct rvt_qp *qp, struct rvt_swqe *wqe,
4872                                struct ib_other_headers *ohdr, u32 *bth1,
4873                                u32 *bth2, u16 fidx)
4874 {
4875         struct hfi1_qp_priv *qpriv = qp->priv;
4876         struct tid_rdma_params *remote;
4877         struct tid_rdma_request *req = wqe_to_tid_req(wqe);
4878         struct tid_rdma_flow *flow = &req->flows[fidx];
4879         u32 generation;
4880
4881         rcu_read_lock();
4882         remote = rcu_dereference(qpriv->tid_rdma.remote);
4883         KDETH_RESET(ohdr->u.tid_rdma.ack.kdeth1, JKEY, remote->jkey);
4884         ohdr->u.tid_rdma.ack.verbs_qp = cpu_to_be32(qp->remote_qpn);
4885         *bth1 = remote->qp;
4886         rcu_read_unlock();
4887
4888         generation = kern_flow_generation_next(flow->flow_state.generation);
4889         *bth2 = mask_psn((generation << HFI1_KDETH_BTH_SEQ_SHIFT) - 1);
4890         qpriv->s_resync_psn = *bth2;
4891         *bth2 |= IB_BTH_REQ_ACK;
4892         KDETH_RESET(ohdr->u.tid_rdma.ack.kdeth0, KVER, 0x1);
4893
4894         return sizeof(ohdr->u.tid_rdma.resync) / sizeof(u32);
4895 }
4896
4897 void hfi1_rc_rcv_tid_rdma_resync(struct hfi1_packet *packet)
4898 {
4899         struct ib_other_headers *ohdr = packet->ohdr;
4900         struct rvt_qp *qp = packet->qp;
4901         struct hfi1_qp_priv *qpriv = qp->priv;
4902         struct hfi1_ctxtdata *rcd = qpriv->rcd;
4903         struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
4904         struct rvt_ack_entry *e;
4905         struct tid_rdma_request *req;
4906         struct tid_rdma_flow *flow;
4907         struct tid_flow_state *fs = &qpriv->flow_state;
4908         u32 psn, generation, idx, gen_next;
4909         bool fecn;
4910         unsigned long flags;
4911
4912         fecn = process_ecn(qp, packet);
4913         psn = mask_psn(be32_to_cpu(ohdr->bth[2]));
4914
4915         generation = mask_psn(psn + 1) >> HFI1_KDETH_BTH_SEQ_SHIFT;
4916         spin_lock_irqsave(&qp->s_lock, flags);
4917
4918         gen_next = (fs->generation == KERN_GENERATION_RESERVED) ?
4919                 generation : kern_flow_generation_next(fs->generation);
4920         /*
4921          * RESYNC packet contains the "next" generation and can only be
4922          * from the current or previous generations
4923          */
4924         if (generation != mask_generation(gen_next - 1) &&
4925             generation != gen_next)
4926                 goto bail;
4927         /* Already processing a resync */
4928         if (qpriv->resync)
4929                 goto bail;
4930
4931         spin_lock(&rcd->exp_lock);
4932         if (fs->index >= RXE_NUM_TID_FLOWS) {
4933                 /*
4934                  * If we don't have a flow, save the generation so it can be
4935                  * applied when a new flow is allocated
4936                  */
4937                 fs->generation = generation;
4938         } else {
4939                 /* Reprogram the QP flow with new generation */
4940                 rcd->flows[fs->index].generation = generation;
4941                 fs->generation = kern_setup_hw_flow(rcd, fs->index);
4942         }
4943         fs->psn = 0;
4944         /*
4945          * Disable SW PSN checking since a RESYNC is equivalent to a
4946          * sync point and the flow has/will be reprogrammed
4947          */
4948         qpriv->s_flags &= ~HFI1_R_TID_SW_PSN;
4949         trace_hfi1_tid_write_rsp_rcv_resync(qp);
4950
4951         /*
4952          * Reset all TID flow information with the new generation.
4953          * This is done for all requests and segments after the
4954          * last received segment
4955          */
4956         for (idx = qpriv->r_tid_tail; ; idx++) {
4957                 u16 flow_idx;
4958
4959                 if (idx > rvt_size_atomic(&dev->rdi))
4960                         idx = 0;
4961                 e = &qp->s_ack_queue[idx];
4962                 if (e->opcode == TID_OP(WRITE_REQ)) {
4963                         req = ack_to_tid_req(e);
4964                         trace_hfi1_tid_req_rcv_resync(qp, 0, e->opcode, e->psn,
4965                                                       e->lpsn, req);
4966
4967                         /* start from last unacked segment */
4968                         for (flow_idx = req->clear_tail;
4969                              CIRC_CNT(req->setup_head, flow_idx,
4970                                       MAX_FLOWS);
4971                              flow_idx = CIRC_NEXT(flow_idx, MAX_FLOWS)) {
4972                                 u32 lpsn;
4973                                 u32 next;
4974
4975                                 flow = &req->flows[flow_idx];
4976                                 lpsn = full_flow_psn(flow,
4977                                                      flow->flow_state.lpsn);
4978                                 next = flow->flow_state.r_next_psn;
4979                                 flow->npkts = delta_psn(lpsn, next - 1);
4980                                 flow->flow_state.generation = fs->generation;
4981                                 flow->flow_state.spsn = fs->psn;
4982                                 flow->flow_state.lpsn =
4983                                         flow->flow_state.spsn + flow->npkts - 1;
4984                                 flow->flow_state.r_next_psn =
4985                                         full_flow_psn(flow,
4986                                                       flow->flow_state.spsn);
4987                                 fs->psn += flow->npkts;
4988                                 trace_hfi1_tid_flow_rcv_resync(qp, flow_idx,
4989                                                                flow);
4990                         }
4991                 }
4992                 if (idx == qp->s_tail_ack_queue)
4993                         break;
4994         }
4995
4996         spin_unlock(&rcd->exp_lock);
4997         qpriv->resync = true;
4998         /* RESYNC request always gets a TID RDMA ACK. */
4999         qpriv->s_nak_state = 0;
5000         qpriv->s_flags |= RVT_S_ACK_PENDING;
5001         hfi1_schedule_tid_send(qp);
5002 bail:
5003         if (fecn)
5004                 qp->s_flags |= RVT_S_ECN;
5005         spin_unlock_irqrestore(&qp->s_lock, flags);
5006 }
5007
5008 /*
5009  * Call this function when the last TID RDMA WRITE DATA packet for a request
5010  * is built.
5011  */
5012 static void update_tid_tail(struct rvt_qp *qp)
5013         __must_hold(&qp->s_lock)
5014 {
5015         struct hfi1_qp_priv *priv = qp->priv;
5016         u32 i;
5017         struct rvt_swqe *wqe;
5018
5019         lockdep_assert_held(&qp->s_lock);
5020         /* Can't move beyond s_tid_cur */
5021         if (priv->s_tid_tail == priv->s_tid_cur)
5022                 return;
5023         for (i = priv->s_tid_tail + 1; ; i++) {
5024                 if (i == qp->s_size)
5025                         i = 0;
5026
5027                 if (i == priv->s_tid_cur)
5028                         break;
5029                 wqe = rvt_get_swqe_ptr(qp, i);
5030                 if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE)
5031                         break;
5032         }
5033         priv->s_tid_tail = i;
5034         priv->s_state = TID_OP(WRITE_RESP);
5035 }
5036
5037 int hfi1_make_tid_rdma_pkt(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
5038         __must_hold(&qp->s_lock)
5039 {
5040         struct hfi1_qp_priv *priv = qp->priv;
5041         struct rvt_swqe *wqe;
5042         u32 bth1 = 0, bth2 = 0, hwords = 5, len, middle = 0;
5043         struct ib_other_headers *ohdr;
5044         struct rvt_sge_state *ss = &qp->s_sge;
5045         struct rvt_ack_entry *e = &qp->s_ack_queue[qp->s_tail_ack_queue];
5046         struct tid_rdma_request *req = ack_to_tid_req(e);
5047         bool last = false;
5048         u8 opcode = TID_OP(WRITE_DATA);
5049
5050         lockdep_assert_held(&qp->s_lock);
5051         trace_hfi1_tid_write_sender_make_tid_pkt(qp, 0);
5052         /*
5053          * Prioritize the sending of the requests and responses over the
5054          * sending of the TID RDMA data packets.
5055          */
5056         if (((atomic_read(&priv->n_tid_requests) < HFI1_TID_RDMA_WRITE_CNT) &&
5057              atomic_read(&priv->n_requests) &&
5058              !(qp->s_flags & (RVT_S_BUSY | RVT_S_WAIT_ACK |
5059                              HFI1_S_ANY_WAIT_IO))) ||
5060             (e->opcode == TID_OP(WRITE_REQ) && req->cur_seg < req->alloc_seg &&
5061              !(qp->s_flags & (RVT_S_BUSY | HFI1_S_ANY_WAIT_IO)))) {
5062                 struct iowait_work *iowork;
5063
5064                 iowork = iowait_get_ib_work(&priv->s_iowait);
5065                 ps->s_txreq = get_waiting_verbs_txreq(iowork);
5066                 if (ps->s_txreq || hfi1_make_rc_req(qp, ps)) {
5067                         priv->s_flags |= HFI1_S_TID_BUSY_SET;
5068                         return 1;
5069                 }
5070         }
5071
5072         ps->s_txreq = get_txreq(ps->dev, qp);
5073         if (!ps->s_txreq)
5074                 goto bail_no_tx;
5075
5076         ohdr = &ps->s_txreq->phdr.hdr.ibh.u.oth;
5077
5078         if ((priv->s_flags & RVT_S_ACK_PENDING) &&
5079             make_tid_rdma_ack(qp, ohdr, ps))
5080                 return 1;
5081
5082         /*
5083          * Bail out if we can't send data.
5084          * Be reminded that this check must been done after the call to
5085          * make_tid_rdma_ack() because the responding QP could be in
5086          * RTR state where it can send TID RDMA ACK, not TID RDMA WRITE DATA.
5087          */
5088         if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK))
5089                 goto bail;
5090
5091         if (priv->s_flags & RVT_S_WAIT_ACK)
5092                 goto bail;
5093
5094         /* Check whether there is anything to do. */
5095         if (priv->s_tid_tail == HFI1_QP_WQE_INVALID)
5096                 goto bail;
5097         wqe = rvt_get_swqe_ptr(qp, priv->s_tid_tail);
5098         req = wqe_to_tid_req(wqe);
5099         trace_hfi1_tid_req_make_tid_pkt(qp, 0, wqe->wr.opcode, wqe->psn,
5100                                         wqe->lpsn, req);
5101         switch (priv->s_state) {
5102         case TID_OP(WRITE_REQ):
5103         case TID_OP(WRITE_RESP):
5104                 priv->tid_ss.sge = wqe->sg_list[0];
5105                 priv->tid_ss.sg_list = wqe->sg_list + 1;
5106                 priv->tid_ss.num_sge = wqe->wr.num_sge;
5107                 priv->tid_ss.total_len = wqe->length;
5108
5109                 if (priv->s_state == TID_OP(WRITE_REQ))
5110                         hfi1_tid_rdma_restart_req(qp, wqe, &bth2);
5111                 priv->s_state = TID_OP(WRITE_DATA);
5112                 /* fall through */
5113
5114         case TID_OP(WRITE_DATA):
5115                 /*
5116                  * 1. Check whether TID RDMA WRITE RESP available.
5117                  * 2. If no:
5118                  *    2.1 If have more segments and no TID RDMA WRITE RESP,
5119                  *        set HFI1_S_WAIT_TID_RESP
5120                  *    2.2 Return indicating no progress made.
5121                  * 3. If yes:
5122                  *    3.1 Build TID RDMA WRITE DATA packet.
5123                  *    3.2 If last packet in segment:
5124                  *        3.2.1 Change KDETH header bits
5125                  *        3.2.2 Advance RESP pointers.
5126                  *    3.3 Return indicating progress made.
5127                  */
5128                 trace_hfi1_sender_make_tid_pkt(qp);
5129                 trace_hfi1_tid_write_sender_make_tid_pkt(qp, 0);
5130                 wqe = rvt_get_swqe_ptr(qp, priv->s_tid_tail);
5131                 req = wqe_to_tid_req(wqe);
5132                 len = wqe->length;
5133
5134                 if (!req->comp_seg || req->cur_seg == req->comp_seg)
5135                         goto bail;
5136
5137                 trace_hfi1_tid_req_make_tid_pkt(qp, 0, wqe->wr.opcode,
5138                                                 wqe->psn, wqe->lpsn, req);
5139                 last = hfi1_build_tid_rdma_packet(wqe, ohdr, &bth1, &bth2,
5140                                                   &len);
5141
5142                 if (last) {
5143                         /* move pointer to next flow */
5144                         req->clear_tail = CIRC_NEXT(req->clear_tail,
5145                                                     MAX_FLOWS);
5146                         if (++req->cur_seg < req->total_segs) {
5147                                 if (!CIRC_CNT(req->setup_head, req->clear_tail,
5148                                               MAX_FLOWS))
5149                                         qp->s_flags |= HFI1_S_WAIT_TID_RESP;
5150                         } else {
5151                                 priv->s_state = TID_OP(WRITE_DATA_LAST);
5152                                 opcode = TID_OP(WRITE_DATA_LAST);
5153
5154                                 /* Advance the s_tid_tail now */
5155                                 update_tid_tail(qp);
5156                         }
5157                 }
5158                 hwords += sizeof(ohdr->u.tid_rdma.w_data) / sizeof(u32);
5159                 ss = &priv->tid_ss;
5160                 break;
5161
5162         case TID_OP(RESYNC):
5163                 trace_hfi1_sender_make_tid_pkt(qp);
5164                 /* Use generation from the most recently received response */
5165                 wqe = rvt_get_swqe_ptr(qp, priv->s_tid_cur);
5166                 req = wqe_to_tid_req(wqe);
5167                 /* If no responses for this WQE look at the previous one */
5168                 if (!req->comp_seg) {
5169                         wqe = rvt_get_swqe_ptr(qp,
5170                                                (!priv->s_tid_cur ? qp->s_size :
5171                                                 priv->s_tid_cur) - 1);
5172                         req = wqe_to_tid_req(wqe);
5173                 }
5174                 hwords += hfi1_build_tid_rdma_resync(qp, wqe, ohdr, &bth1,
5175                                                      &bth2,
5176                                                      CIRC_PREV(req->setup_head,
5177                                                                MAX_FLOWS));
5178                 ss = NULL;
5179                 len = 0;
5180                 opcode = TID_OP(RESYNC);
5181                 break;
5182
5183         default:
5184                 goto bail;
5185         }
5186         if (priv->s_flags & RVT_S_SEND_ONE) {
5187                 priv->s_flags &= ~RVT_S_SEND_ONE;
5188                 priv->s_flags |= RVT_S_WAIT_ACK;
5189                 bth2 |= IB_BTH_REQ_ACK;
5190         }
5191         qp->s_len -= len;
5192         ps->s_txreq->hdr_dwords = hwords;
5193         ps->s_txreq->sde = priv->s_sde;
5194         ps->s_txreq->ss = ss;
5195         ps->s_txreq->s_cur_size = len;
5196         hfi1_make_ruc_header(qp, ohdr, (opcode << 24), bth1, bth2,
5197                              middle, ps);
5198         return 1;
5199 bail:
5200         hfi1_put_txreq(ps->s_txreq);
5201 bail_no_tx:
5202         ps->s_txreq = NULL;
5203         priv->s_flags &= ~RVT_S_BUSY;
5204         /*
5205          * If we didn't get a txreq, the QP will be woken up later to try
5206          * again, set the flags to the the wake up which work item to wake
5207          * up.
5208          * (A better algorithm should be found to do this and generalize the
5209          * sleep/wakeup flags.)
5210          */
5211         iowait_set_flag(&priv->s_iowait, IOWAIT_PENDING_TID);
5212         return 0;
5213 }
5214
5215 static int make_tid_rdma_ack(struct rvt_qp *qp,
5216                              struct ib_other_headers *ohdr,
5217                              struct hfi1_pkt_state *ps)
5218 {
5219         struct rvt_ack_entry *e;
5220         struct hfi1_qp_priv *qpriv = qp->priv;
5221         struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
5222         u32 hwords, next;
5223         u32 len = 0;
5224         u32 bth1 = 0, bth2 = 0;
5225         int middle = 0;
5226         u16 flow;
5227         struct tid_rdma_request *req, *nreq;
5228
5229         trace_hfi1_tid_write_rsp_make_tid_ack(qp);
5230         /* Don't send an ACK if we aren't supposed to. */
5231         if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
5232                 goto bail;
5233
5234         /* header size in 32-bit words LRH+BTH = (8+12)/4. */
5235         hwords = 5;
5236
5237         e = &qp->s_ack_queue[qpriv->r_tid_ack];
5238         req = ack_to_tid_req(e);
5239         /*
5240          * In the RESYNC case, we are exactly one segment past the
5241          * previously sent ack or at the previously sent NAK. So to send
5242          * the resync ack, we go back one segment (which might be part of
5243          * the previous request) and let the do-while loop execute again.
5244          * The advantage of executing the do-while loop is that any data
5245          * received after the previous ack is automatically acked in the
5246          * RESYNC ack. It turns out that for the do-while loop we only need
5247          * to pull back qpriv->r_tid_ack, not the segment
5248          * indices/counters. The scheme works even if the previous request
5249          * was not a TID WRITE request.
5250          */
5251         if (qpriv->resync) {
5252                 if (!req->ack_seg || req->ack_seg == req->total_segs)
5253                         qpriv->r_tid_ack = !qpriv->r_tid_ack ?
5254                                 rvt_size_atomic(&dev->rdi) :
5255                                 qpriv->r_tid_ack - 1;
5256                 e = &qp->s_ack_queue[qpriv->r_tid_ack];
5257                 req = ack_to_tid_req(e);
5258         }
5259
5260         trace_hfi1_rsp_make_tid_ack(qp, e->psn);
5261         trace_hfi1_tid_req_make_tid_ack(qp, 0, e->opcode, e->psn, e->lpsn,
5262                                         req);
5263         /*
5264          * If we've sent all the ACKs that we can, we are done
5265          * until we get more segments...
5266          */
5267         if (!qpriv->s_nak_state && !qpriv->resync &&
5268             req->ack_seg == req->comp_seg)
5269                 goto bail;
5270
5271         do {
5272                 /*
5273                  * To deal with coalesced ACKs, the acked_tail pointer
5274                  * into the flow array is used. The distance between it
5275                  * and the clear_tail is the number of flows that are
5276                  * being ACK'ed.
5277                  */
5278                 req->ack_seg +=
5279                         /* Get up-to-date value */
5280                         CIRC_CNT(req->clear_tail, req->acked_tail,
5281                                  MAX_FLOWS);
5282                 /* Advance acked index */
5283                 req->acked_tail = req->clear_tail;
5284
5285                 /*
5286                  * req->clear_tail points to the segment currently being
5287                  * received. So, when sending an ACK, the previous
5288                  * segment is being ACK'ed.
5289                  */
5290                 flow = CIRC_PREV(req->acked_tail, MAX_FLOWS);
5291                 if (req->ack_seg != req->total_segs)
5292                         break;
5293                 req->state = TID_REQUEST_COMPLETE;
5294
5295                 next = qpriv->r_tid_ack + 1;
5296                 if (next > rvt_size_atomic(&dev->rdi))
5297                         next = 0;
5298                 qpriv->r_tid_ack = next;
5299                 if (qp->s_ack_queue[next].opcode != TID_OP(WRITE_REQ))
5300                         break;
5301                 nreq = ack_to_tid_req(&qp->s_ack_queue[next]);
5302                 if (!nreq->comp_seg || nreq->ack_seg == nreq->comp_seg)
5303                         break;
5304
5305                 /* Move to the next ack entry now */
5306                 e = &qp->s_ack_queue[qpriv->r_tid_ack];
5307                 req = ack_to_tid_req(e);
5308         } while (1);
5309
5310         /*
5311          * At this point qpriv->r_tid_ack == qpriv->r_tid_tail but e and
5312          * req could be pointing at the previous ack queue entry
5313          */
5314         if (qpriv->s_nak_state ||
5315             (qpriv->resync &&
5316              !hfi1_tid_rdma_is_resync_psn(qpriv->r_next_psn_kdeth - 1) &&
5317              (cmp_psn(qpriv->r_next_psn_kdeth - 1,
5318                       full_flow_psn(&req->flows[flow],
5319                                     req->flows[flow].flow_state.lpsn)) > 0))) {
5320                 /*
5321                  * A NAK will implicitly acknowledge all previous TID RDMA
5322                  * requests. Therefore, we NAK with the req->acked_tail
5323                  * segment for the request at qpriv->r_tid_ack (same at
5324                  * this point as the req->clear_tail segment for the
5325                  * qpriv->r_tid_tail request)
5326                  */
5327                 e = &qp->s_ack_queue[qpriv->r_tid_ack];
5328                 req = ack_to_tid_req(e);
5329                 flow = req->acked_tail;
5330         } else if (req->ack_seg == req->total_segs &&
5331                    qpriv->s_flags & HFI1_R_TID_WAIT_INTERLCK)
5332                 qpriv->s_flags &= ~HFI1_R_TID_WAIT_INTERLCK;
5333
5334         trace_hfi1_tid_write_rsp_make_tid_ack(qp);
5335         trace_hfi1_tid_req_make_tid_ack(qp, 0, e->opcode, e->psn, e->lpsn,
5336                                         req);
5337         hwords += hfi1_build_tid_rdma_write_ack(qp, e, ohdr, flow, &bth1,
5338                                                 &bth2);
5339         len = 0;
5340         qpriv->s_flags &= ~RVT_S_ACK_PENDING;
5341         ps->s_txreq->hdr_dwords = hwords;
5342         ps->s_txreq->sde = qpriv->s_sde;
5343         ps->s_txreq->s_cur_size = len;
5344         ps->s_txreq->ss = NULL;
5345         hfi1_make_ruc_header(qp, ohdr, (TID_OP(ACK) << 24), bth1, bth2, middle,
5346                              ps);
5347         ps->s_txreq->txreq.flags |= SDMA_TXREQ_F_VIP;
5348         return 1;
5349 bail:
5350         /*
5351          * Ensure s_rdma_ack_cnt changes are committed prior to resetting
5352          * RVT_S_RESP_PENDING
5353          */
5354         smp_wmb();
5355         qpriv->s_flags &= ~RVT_S_ACK_PENDING;
5356         return 0;
5357 }
5358
5359 static int hfi1_send_tid_ok(struct rvt_qp *qp)
5360 {
5361         struct hfi1_qp_priv *priv = qp->priv;
5362
5363         return !(priv->s_flags & RVT_S_BUSY ||
5364                  qp->s_flags & HFI1_S_ANY_WAIT_IO) &&
5365                 (verbs_txreq_queued(iowait_get_tid_work(&priv->s_iowait)) ||
5366                  (priv->s_flags & RVT_S_RESP_PENDING) ||
5367                  !(qp->s_flags & HFI1_S_ANY_TID_WAIT_SEND));
5368 }
5369
5370 void _hfi1_do_tid_send(struct work_struct *work)
5371 {
5372         struct iowait_work *w = container_of(work, struct iowait_work, iowork);
5373         struct rvt_qp *qp = iowait_to_qp(w->iow);
5374
5375         hfi1_do_tid_send(qp);
5376 }
5377
5378 static void hfi1_do_tid_send(struct rvt_qp *qp)
5379 {
5380         struct hfi1_pkt_state ps;
5381         struct hfi1_qp_priv *priv = qp->priv;
5382
5383         ps.dev = to_idev(qp->ibqp.device);
5384         ps.ibp = to_iport(qp->ibqp.device, qp->port_num);
5385         ps.ppd = ppd_from_ibp(ps.ibp);
5386         ps.wait = iowait_get_tid_work(&priv->s_iowait);
5387         ps.in_thread = false;
5388         ps.timeout_int = qp->timeout_jiffies / 8;
5389
5390         trace_hfi1_rc_do_tid_send(qp, false);
5391         spin_lock_irqsave(&qp->s_lock, ps.flags);
5392
5393         /* Return if we are already busy processing a work request. */
5394         if (!hfi1_send_tid_ok(qp)) {
5395                 if (qp->s_flags & HFI1_S_ANY_WAIT_IO)
5396                         iowait_set_flag(&priv->s_iowait, IOWAIT_PENDING_TID);
5397                 spin_unlock_irqrestore(&qp->s_lock, ps.flags);
5398                 return;
5399         }
5400
5401         priv->s_flags |= RVT_S_BUSY;
5402
5403         ps.timeout = jiffies + ps.timeout_int;
5404         ps.cpu = priv->s_sde ? priv->s_sde->cpu :
5405                 cpumask_first(cpumask_of_node(ps.ppd->dd->node));
5406         ps.pkts_sent = false;
5407
5408         /* insure a pre-built packet is handled  */
5409         ps.s_txreq = get_waiting_verbs_txreq(ps.wait);
5410         do {
5411                 /* Check for a constructed packet to be sent. */
5412                 if (ps.s_txreq) {
5413                         if (priv->s_flags & HFI1_S_TID_BUSY_SET) {
5414                                 qp->s_flags |= RVT_S_BUSY;
5415                                 ps.wait = iowait_get_ib_work(&priv->s_iowait);
5416                         }
5417                         spin_unlock_irqrestore(&qp->s_lock, ps.flags);
5418
5419                         /*
5420                          * If the packet cannot be sent now, return and
5421                          * the send tasklet will be woken up later.
5422                          */
5423                         if (hfi1_verbs_send(qp, &ps))
5424                                 return;
5425
5426                         /* allow other tasks to run */
5427                         if (hfi1_schedule_send_yield(qp, &ps, true))
5428                                 return;
5429
5430                         spin_lock_irqsave(&qp->s_lock, ps.flags);
5431                         if (priv->s_flags & HFI1_S_TID_BUSY_SET) {
5432                                 qp->s_flags &= ~RVT_S_BUSY;
5433                                 priv->s_flags &= ~HFI1_S_TID_BUSY_SET;
5434                                 ps.wait = iowait_get_tid_work(&priv->s_iowait);
5435                                 if (iowait_flag_set(&priv->s_iowait,
5436                                                     IOWAIT_PENDING_IB))
5437                                         hfi1_schedule_send(qp);
5438                         }
5439                 }
5440         } while (hfi1_make_tid_rdma_pkt(qp, &ps));
5441         iowait_starve_clear(ps.pkts_sent, &priv->s_iowait);
5442         spin_unlock_irqrestore(&qp->s_lock, ps.flags);
5443 }
5444
5445 static bool _hfi1_schedule_tid_send(struct rvt_qp *qp)
5446 {
5447         struct hfi1_qp_priv *priv = qp->priv;
5448         struct hfi1_ibport *ibp =
5449                 to_iport(qp->ibqp.device, qp->port_num);
5450         struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
5451         struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
5452
5453         return iowait_tid_schedule(&priv->s_iowait, ppd->hfi1_wq,
5454                                    priv->s_sde ?
5455                                    priv->s_sde->cpu :
5456                                    cpumask_first(cpumask_of_node(dd->node)));
5457 }
5458
5459 /**
5460  * hfi1_schedule_tid_send - schedule progress on TID RDMA state machine
5461  * @qp: the QP
5462  *
5463  * This schedules qp progress on the TID RDMA state machine. Caller
5464  * should hold the s_lock.
5465  * Unlike hfi1_schedule_send(), this cannot use hfi1_send_ok() because
5466  * the two state machines can step on each other with respect to the
5467  * RVT_S_BUSY flag.
5468  * Therefore, a modified test is used.
5469  * @return true if the second leg is scheduled;
5470  *  false if the second leg is not scheduled.
5471  */
5472 bool hfi1_schedule_tid_send(struct rvt_qp *qp)
5473 {
5474         lockdep_assert_held(&qp->s_lock);
5475         if (hfi1_send_tid_ok(qp)) {
5476                 /*
5477                  * The following call returns true if the qp is not on the
5478                  * queue and false if the qp is already on the queue before
5479                  * this call. Either way, the qp will be on the queue when the
5480                  * call returns.
5481                  */
5482                 _hfi1_schedule_tid_send(qp);
5483                 return true;
5484         }
5485         if (qp->s_flags & HFI1_S_ANY_WAIT_IO)
5486                 iowait_set_flag(&((struct hfi1_qp_priv *)qp->priv)->s_iowait,
5487                                 IOWAIT_PENDING_TID);
5488         return false;
5489 }
5490
5491 bool hfi1_tid_rdma_ack_interlock(struct rvt_qp *qp, struct rvt_ack_entry *e)
5492 {
5493         struct rvt_ack_entry *prev;
5494         struct tid_rdma_request *req;
5495         struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
5496         struct hfi1_qp_priv *priv = qp->priv;
5497         u32 s_prev;
5498
5499         s_prev = qp->s_tail_ack_queue == 0 ? rvt_size_atomic(&dev->rdi) :
5500                 (qp->s_tail_ack_queue - 1);
5501         prev = &qp->s_ack_queue[s_prev];
5502
5503         if ((e->opcode == TID_OP(READ_REQ) ||
5504              e->opcode == OP(RDMA_READ_REQUEST)) &&
5505             prev->opcode == TID_OP(WRITE_REQ)) {
5506                 req = ack_to_tid_req(prev);
5507                 if (req->ack_seg != req->total_segs) {
5508                         priv->s_flags |= HFI1_R_TID_WAIT_INTERLCK;
5509                         return true;
5510                 }
5511         }
5512         return false;
5513 }
5514
5515 static u32 read_r_next_psn(struct hfi1_devdata *dd, u8 ctxt, u8 fidx)
5516 {
5517         u64 reg;
5518
5519         /*
5520          * The only sane way to get the amount of
5521          * progress is to read the HW flow state.
5522          */
5523         reg = read_uctxt_csr(dd, ctxt, RCV_TID_FLOW_TABLE + (8 * fidx));
5524         return mask_psn(reg);
5525 }
5526
5527 static void tid_rdma_rcv_err(struct hfi1_packet *packet,
5528                              struct ib_other_headers *ohdr,
5529                              struct rvt_qp *qp, u32 psn, int diff, bool fecn)
5530 {
5531         unsigned long flags;
5532
5533         tid_rdma_rcv_error(packet, ohdr, qp, psn, diff);
5534         if (fecn) {
5535                 spin_lock_irqsave(&qp->s_lock, flags);
5536                 qp->s_flags |= RVT_S_ECN;
5537                 spin_unlock_irqrestore(&qp->s_lock, flags);
5538         }
5539 }
5540
5541 static void update_r_next_psn_fecn(struct hfi1_packet *packet,
5542                                    struct hfi1_qp_priv *priv,
5543                                    struct hfi1_ctxtdata *rcd,
5544                                    struct tid_rdma_flow *flow,
5545                                    bool fecn)
5546 {
5547         /*
5548          * If a start/middle packet is delivered here due to
5549          * RSM rule and FECN, we need to update the r_next_psn.
5550          */
5551         if (fecn && packet->etype == RHF_RCV_TYPE_EAGER &&
5552             !(priv->s_flags & HFI1_R_TID_SW_PSN)) {
5553                 struct hfi1_devdata *dd = rcd->dd;
5554
5555                 flow->flow_state.r_next_psn =
5556                         read_r_next_psn(dd, rcd->ctxt, flow->idx);
5557         }
5558 }