1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
3 * Copyright(c) 2018 Intel Corporation.
16 * DOC: TID RDMA READ protocol
18 * This is an end-to-end protocol at the hfi1 level between two nodes that
19 * improves performance by avoiding data copy on the requester side. It
20 * converts a qualified RDMA READ request into a TID RDMA READ request on
21 * the requester side and thereafter handles the request and response
22 * differently. To be qualified, the RDMA READ request should meet the
24 * -- The total data length should be greater than 256K;
25 * -- The total data length should be a multiple of 4K page size;
26 * -- Each local scatter-gather entry should be 4K page aligned;
27 * -- Each local scatter-gather entry should be a multiple of 4K page size;
30 #define RCV_TID_FLOW_TABLE_CTRL_FLOW_VALID_SMASK BIT_ULL(32)
31 #define RCV_TID_FLOW_TABLE_CTRL_HDR_SUPP_EN_SMASK BIT_ULL(33)
32 #define RCV_TID_FLOW_TABLE_CTRL_KEEP_AFTER_SEQ_ERR_SMASK BIT_ULL(34)
33 #define RCV_TID_FLOW_TABLE_CTRL_KEEP_ON_GEN_ERR_SMASK BIT_ULL(35)
34 #define RCV_TID_FLOW_TABLE_STATUS_SEQ_MISMATCH_SMASK BIT_ULL(37)
35 #define RCV_TID_FLOW_TABLE_STATUS_GEN_MISMATCH_SMASK BIT_ULL(38)
37 /* Maximum number of packets within a flow generation. */
38 #define MAX_TID_FLOW_PSN BIT(HFI1_KDETH_BTH_SEQ_SHIFT)
40 #define GENERATION_MASK 0xFFFFF
42 static u32 mask_generation(u32 a)
44 return a & GENERATION_MASK;
47 /* Reserved generation value to set to unused flows for kernel contexts */
48 #define KERN_GENERATION_RESERVED mask_generation(U32_MAX)
51 * J_KEY for kernel contexts when TID RDMA is used.
52 * See generate_jkey() in hfi.h for more information.
54 #define TID_RDMA_JKEY 32
55 #define HFI1_KERNEL_MIN_JKEY HFI1_ADMIN_JKEY_RANGE
56 #define HFI1_KERNEL_MAX_JKEY (2 * HFI1_ADMIN_JKEY_RANGE - 1)
58 /* Maximum number of segments in flight per QP request. */
59 #define TID_RDMA_MAX_READ_SEGS_PER_REQ 6
60 #define TID_RDMA_MAX_WRITE_SEGS_PER_REQ 4
61 #define MAX_REQ max_t(u16, TID_RDMA_MAX_READ_SEGS_PER_REQ, \
62 TID_RDMA_MAX_WRITE_SEGS_PER_REQ)
63 #define MAX_FLOWS roundup_pow_of_two(MAX_REQ + 1)
65 #define MAX_EXPECTED_PAGES (MAX_EXPECTED_BUFFER / PAGE_SIZE)
67 #define TID_RDMA_DESTQP_FLOW_SHIFT 11
68 #define TID_RDMA_DESTQP_FLOW_MASK 0x1f
70 #define TID_OPFN_QP_CTXT_MASK 0xff
71 #define TID_OPFN_QP_CTXT_SHIFT 56
72 #define TID_OPFN_QP_KDETH_MASK 0xff
73 #define TID_OPFN_QP_KDETH_SHIFT 48
74 #define TID_OPFN_MAX_LEN_MASK 0x7ff
75 #define TID_OPFN_MAX_LEN_SHIFT 37
76 #define TID_OPFN_TIMEOUT_MASK 0x1f
77 #define TID_OPFN_TIMEOUT_SHIFT 32
78 #define TID_OPFN_RESERVED_MASK 0x3f
79 #define TID_OPFN_RESERVED_SHIFT 26
80 #define TID_OPFN_URG_MASK 0x1
81 #define TID_OPFN_URG_SHIFT 25
82 #define TID_OPFN_VER_MASK 0x7
83 #define TID_OPFN_VER_SHIFT 22
84 #define TID_OPFN_JKEY_MASK 0x3f
85 #define TID_OPFN_JKEY_SHIFT 16
86 #define TID_OPFN_MAX_READ_MASK 0x3f
87 #define TID_OPFN_MAX_READ_SHIFT 10
88 #define TID_OPFN_MAX_WRITE_MASK 0x3f
89 #define TID_OPFN_MAX_WRITE_SHIFT 4
95 * NNNNNNNNKKKKKKKK MMMMMMMMMMMTTTTT DDDDDDUVVVJJJJJJ RRRRRRWWWWWWCCCC
96 * 3210987654321098 7654321098765432 1098765432109876 5432109876543210
97 * N - the context Number
110 static u32 tid_rdma_flow_wt;
112 static void tid_rdma_trigger_resume(struct work_struct *work);
113 static void hfi1_kern_exp_rcv_free_flows(struct tid_rdma_request *req);
114 static int hfi1_kern_exp_rcv_alloc_flows(struct tid_rdma_request *req,
116 static void hfi1_init_trdma_req(struct rvt_qp *qp,
117 struct tid_rdma_request *req);
118 static void hfi1_tid_write_alloc_resources(struct rvt_qp *qp, bool intr_ctx);
119 static void hfi1_tid_timeout(struct timer_list *t);
120 static void hfi1_add_tid_reap_timer(struct rvt_qp *qp);
121 static void hfi1_mod_tid_reap_timer(struct rvt_qp *qp);
122 static void hfi1_mod_tid_retry_timer(struct rvt_qp *qp);
123 static int hfi1_stop_tid_retry_timer(struct rvt_qp *qp);
124 static void hfi1_tid_retry_timeout(struct timer_list *t);
125 static int make_tid_rdma_ack(struct rvt_qp *qp,
126 struct ib_other_headers *ohdr,
127 struct hfi1_pkt_state *ps);
128 static void hfi1_do_tid_send(struct rvt_qp *qp);
129 static u32 read_r_next_psn(struct hfi1_devdata *dd, u8 ctxt, u8 fidx);
130 static void tid_rdma_rcv_err(struct hfi1_packet *packet,
131 struct ib_other_headers *ohdr,
132 struct rvt_qp *qp, u32 psn, int diff, bool fecn);
133 static void update_r_next_psn_fecn(struct hfi1_packet *packet,
134 struct hfi1_qp_priv *priv,
135 struct hfi1_ctxtdata *rcd,
136 struct tid_rdma_flow *flow,
139 static u64 tid_rdma_opfn_encode(struct tid_rdma_params *p)
142 (((u64)p->qp & TID_OPFN_QP_CTXT_MASK) <<
143 TID_OPFN_QP_CTXT_SHIFT) |
144 ((((u64)p->qp >> 16) & TID_OPFN_QP_KDETH_MASK) <<
145 TID_OPFN_QP_KDETH_SHIFT) |
146 (((u64)((p->max_len >> PAGE_SHIFT) - 1) &
147 TID_OPFN_MAX_LEN_MASK) << TID_OPFN_MAX_LEN_SHIFT) |
148 (((u64)p->timeout & TID_OPFN_TIMEOUT_MASK) <<
149 TID_OPFN_TIMEOUT_SHIFT) |
150 (((u64)p->urg & TID_OPFN_URG_MASK) << TID_OPFN_URG_SHIFT) |
151 (((u64)p->jkey & TID_OPFN_JKEY_MASK) << TID_OPFN_JKEY_SHIFT) |
152 (((u64)p->max_read & TID_OPFN_MAX_READ_MASK) <<
153 TID_OPFN_MAX_READ_SHIFT) |
154 (((u64)p->max_write & TID_OPFN_MAX_WRITE_MASK) <<
155 TID_OPFN_MAX_WRITE_SHIFT);
158 static void tid_rdma_opfn_decode(struct tid_rdma_params *p, u64 data)
160 p->max_len = (((data >> TID_OPFN_MAX_LEN_SHIFT) &
161 TID_OPFN_MAX_LEN_MASK) + 1) << PAGE_SHIFT;
162 p->jkey = (data >> TID_OPFN_JKEY_SHIFT) & TID_OPFN_JKEY_MASK;
163 p->max_write = (data >> TID_OPFN_MAX_WRITE_SHIFT) &
164 TID_OPFN_MAX_WRITE_MASK;
165 p->max_read = (data >> TID_OPFN_MAX_READ_SHIFT) &
166 TID_OPFN_MAX_READ_MASK;
168 ((((data >> TID_OPFN_QP_KDETH_SHIFT) & TID_OPFN_QP_KDETH_MASK)
170 ((data >> TID_OPFN_QP_CTXT_SHIFT) & TID_OPFN_QP_CTXT_MASK));
171 p->urg = (data >> TID_OPFN_URG_SHIFT) & TID_OPFN_URG_MASK;
172 p->timeout = (data >> TID_OPFN_TIMEOUT_SHIFT) & TID_OPFN_TIMEOUT_MASK;
175 void tid_rdma_opfn_init(struct rvt_qp *qp, struct tid_rdma_params *p)
177 struct hfi1_qp_priv *priv = qp->priv;
179 p->qp = (kdeth_qp << 16) | priv->rcd->ctxt;
180 p->max_len = TID_RDMA_MAX_SEGMENT_SIZE;
181 p->jkey = priv->rcd->jkey;
182 p->max_read = TID_RDMA_MAX_READ_SEGS_PER_REQ;
183 p->max_write = TID_RDMA_MAX_WRITE_SEGS_PER_REQ;
184 p->timeout = qp->timeout;
185 p->urg = is_urg_masked(priv->rcd);
188 bool tid_rdma_conn_req(struct rvt_qp *qp, u64 *data)
190 struct hfi1_qp_priv *priv = qp->priv;
192 *data = tid_rdma_opfn_encode(&priv->tid_rdma.local);
196 bool tid_rdma_conn_reply(struct rvt_qp *qp, u64 data)
198 struct hfi1_qp_priv *priv = qp->priv;
199 struct tid_rdma_params *remote, *old;
202 old = rcu_dereference_protected(priv->tid_rdma.remote,
203 lockdep_is_held(&priv->opfn.lock));
206 * If data passed in is zero, return true so as not to continue the
207 * negotiation process
209 if (!data || !HFI1_CAP_IS_KSET(TID_RDMA))
212 * If kzalloc fails, return false. This will result in:
213 * * at the requester a new OPFN request being generated to retry
215 * * at the responder, 0 being returned to the requester so as to
216 * disable TID RDMA at both the requester and the responder
218 remote = kzalloc(sizeof(*remote), GFP_ATOMIC);
224 tid_rdma_opfn_decode(remote, data);
225 priv->tid_timer_timeout_jiffies =
226 usecs_to_jiffies((((4096UL * (1UL << remote->timeout)) /
228 trace_hfi1_opfn_param(qp, 0, &priv->tid_rdma.local);
229 trace_hfi1_opfn_param(qp, 1, remote);
230 rcu_assign_pointer(priv->tid_rdma.remote, remote);
232 * A TID RDMA READ request's segment size is not equal to
233 * remote->max_len only when the request's data length is smaller
234 * than remote->max_len. In that case, there will be only one segment.
235 * Therefore, when priv->pkts_ps is used to calculate req->cur_seg
236 * during retry, it will lead to req->cur_seg = 0, which is exactly
239 priv->pkts_ps = (u16)rvt_div_mtu(qp, remote->max_len);
240 priv->timeout_shift = ilog2(priv->pkts_ps - 1) + 1;
243 RCU_INIT_POINTER(priv->tid_rdma.remote, NULL);
244 priv->timeout_shift = 0;
247 kfree_rcu(old, rcu_head);
251 bool tid_rdma_conn_resp(struct rvt_qp *qp, u64 *data)
255 ret = tid_rdma_conn_reply(qp, *data);
258 * If tid_rdma_conn_reply() returns error, set *data as 0 to indicate
259 * TID RDMA could not be enabled. This will result in TID RDMA being
260 * disabled at the requester too.
263 (void)tid_rdma_conn_req(qp, data);
267 void tid_rdma_conn_error(struct rvt_qp *qp)
269 struct hfi1_qp_priv *priv = qp->priv;
270 struct tid_rdma_params *old;
272 old = rcu_dereference_protected(priv->tid_rdma.remote,
273 lockdep_is_held(&priv->opfn.lock));
274 RCU_INIT_POINTER(priv->tid_rdma.remote, NULL);
276 kfree_rcu(old, rcu_head);
279 /* This is called at context initialization time */
280 int hfi1_kern_exp_rcv_init(struct hfi1_ctxtdata *rcd, int reinit)
285 BUILD_BUG_ON(TID_RDMA_JKEY < HFI1_KERNEL_MIN_JKEY);
286 BUILD_BUG_ON(TID_RDMA_JKEY > HFI1_KERNEL_MAX_JKEY);
287 rcd->jkey = TID_RDMA_JKEY;
288 hfi1_set_ctxt_jkey(rcd->dd, rcd, rcd->jkey);
289 return hfi1_alloc_ctxt_rcv_groups(rcd);
293 * qp_to_rcd - determine the receive context used by a qp
296 * This routine returns the receive context associated
299 * Returns the context.
301 static struct hfi1_ctxtdata *qp_to_rcd(struct rvt_dev_info *rdi,
304 struct hfi1_ibdev *verbs_dev = container_of(rdi,
307 struct hfi1_devdata *dd = container_of(verbs_dev,
312 if (qp->ibqp.qp_num == 0)
315 ctxt = hfi1_get_qp_map(dd, qp->ibqp.qp_num >> dd->qos_shift);
316 return dd->rcd[ctxt];
319 int hfi1_qp_priv_init(struct rvt_dev_info *rdi, struct rvt_qp *qp,
320 struct ib_qp_init_attr *init_attr)
322 struct hfi1_qp_priv *qpriv = qp->priv;
325 qpriv->rcd = qp_to_rcd(rdi, qp);
327 spin_lock_init(&qpriv->opfn.lock);
328 INIT_WORK(&qpriv->opfn.opfn_work, opfn_send_conn_request);
329 INIT_WORK(&qpriv->tid_rdma.trigger_work, tid_rdma_trigger_resume);
330 qpriv->flow_state.psn = 0;
331 qpriv->flow_state.index = RXE_NUM_TID_FLOWS;
332 qpriv->flow_state.last_index = RXE_NUM_TID_FLOWS;
333 qpriv->flow_state.generation = KERN_GENERATION_RESERVED;
334 qpriv->s_state = TID_OP(WRITE_RESP);
335 qpriv->s_tid_cur = HFI1_QP_WQE_INVALID;
336 qpriv->s_tid_head = HFI1_QP_WQE_INVALID;
337 qpriv->s_tid_tail = HFI1_QP_WQE_INVALID;
338 qpriv->rnr_nak_state = TID_RNR_NAK_INIT;
339 qpriv->r_tid_head = HFI1_QP_WQE_INVALID;
340 qpriv->r_tid_tail = HFI1_QP_WQE_INVALID;
341 qpriv->r_tid_ack = HFI1_QP_WQE_INVALID;
342 qpriv->r_tid_alloc = HFI1_QP_WQE_INVALID;
343 atomic_set(&qpriv->n_requests, 0);
344 atomic_set(&qpriv->n_tid_requests, 0);
345 timer_setup(&qpriv->s_tid_timer, hfi1_tid_timeout, 0);
346 timer_setup(&qpriv->s_tid_retry_timer, hfi1_tid_retry_timeout, 0);
347 INIT_LIST_HEAD(&qpriv->tid_wait);
349 if (init_attr->qp_type == IB_QPT_RC && HFI1_CAP_IS_KSET(TID_RDMA)) {
350 struct hfi1_devdata *dd = qpriv->rcd->dd;
352 qpriv->pages = kzalloc_node(TID_RDMA_MAX_PAGES *
353 sizeof(*qpriv->pages),
354 GFP_KERNEL, dd->node);
357 for (i = 0; i < qp->s_size; i++) {
358 struct hfi1_swqe_priv *priv;
359 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, i);
361 priv = kzalloc_node(sizeof(*priv), GFP_KERNEL,
366 hfi1_init_trdma_req(qp, &priv->tid_req);
367 priv->tid_req.e.swqe = wqe;
370 for (i = 0; i < rvt_max_atomic(rdi); i++) {
371 struct hfi1_ack_priv *priv;
373 priv = kzalloc_node(sizeof(*priv), GFP_KERNEL,
378 hfi1_init_trdma_req(qp, &priv->tid_req);
379 priv->tid_req.e.ack = &qp->s_ack_queue[i];
381 ret = hfi1_kern_exp_rcv_alloc_flows(&priv->tid_req,
387 qp->s_ack_queue[i].priv = priv;
394 void hfi1_qp_priv_tid_free(struct rvt_dev_info *rdi, struct rvt_qp *qp)
396 struct hfi1_qp_priv *qpriv = qp->priv;
397 struct rvt_swqe *wqe;
400 if (qp->ibqp.qp_type == IB_QPT_RC && HFI1_CAP_IS_KSET(TID_RDMA)) {
401 for (i = 0; i < qp->s_size; i++) {
402 wqe = rvt_get_swqe_ptr(qp, i);
406 for (i = 0; i < rvt_max_atomic(rdi); i++) {
407 struct hfi1_ack_priv *priv = qp->s_ack_queue[i].priv;
410 hfi1_kern_exp_rcv_free_flows(&priv->tid_req);
412 qp->s_ack_queue[i].priv = NULL;
414 cancel_work_sync(&qpriv->opfn.opfn_work);
420 /* Flow and tid waiter functions */
424 * There are two locks involved with the queuing
425 * routines: the qp s_lock and the exp_lock.
427 * Since the tid space allocation is called from
428 * the send engine, the qp s_lock is already held.
430 * The allocation routines will get the exp_lock.
432 * The first_qp() call is provided to allow the head of
433 * the rcd wait queue to be fetched under the exp_lock and
434 * followed by a drop of the exp_lock.
436 * Any qp in the wait list will have the qp reference count held
437 * to hold the qp in memory.
441 * return head of rcd wait list
443 * Must hold the exp_lock.
445 * Get a reference to the QP to hold the QP in memory.
447 * The caller must release the reference when the local
448 * is no longer being used.
450 static struct rvt_qp *first_qp(struct hfi1_ctxtdata *rcd,
451 struct tid_queue *queue)
452 __must_hold(&rcd->exp_lock)
454 struct hfi1_qp_priv *priv;
456 lockdep_assert_held(&rcd->exp_lock);
457 priv = list_first_entry_or_null(&queue->queue_head,
462 rvt_get_qp(priv->owner);
467 * kernel_tid_waiters - determine rcd wait
468 * @rcd: the receive context
469 * @qp: the head of the qp being processed
471 * This routine will return false IFF
472 * the list is NULL or the head of the
473 * list is the indicated qp.
475 * Must hold the qp s_lock and the exp_lock.
478 * false if either of the conditions below are satisfied:
479 * 1. The list is empty or
480 * 2. The indicated qp is at the head of the list and the
481 * HFI1_S_WAIT_TID_SPACE bit is set in qp->s_flags.
482 * true is returned otherwise.
484 static bool kernel_tid_waiters(struct hfi1_ctxtdata *rcd,
485 struct tid_queue *queue, struct rvt_qp *qp)
486 __must_hold(&rcd->exp_lock) __must_hold(&qp->s_lock)
491 lockdep_assert_held(&qp->s_lock);
492 lockdep_assert_held(&rcd->exp_lock);
493 fqp = first_qp(rcd, queue);
494 if (!fqp || (fqp == qp && (qp->s_flags & HFI1_S_WAIT_TID_SPACE)))
501 * dequeue_tid_waiter - dequeue the qp from the list
502 * @qp - the qp to remove the wait list
504 * This routine removes the indicated qp from the
505 * wait list if it is there.
507 * This should be done after the hardware flow and
508 * tid array resources have been allocated.
510 * Must hold the qp s_lock and the rcd exp_lock.
512 * It assumes the s_lock to protect the s_flags
513 * field and to reliably test the HFI1_S_WAIT_TID_SPACE flag.
515 static void dequeue_tid_waiter(struct hfi1_ctxtdata *rcd,
516 struct tid_queue *queue, struct rvt_qp *qp)
517 __must_hold(&rcd->exp_lock) __must_hold(&qp->s_lock)
519 struct hfi1_qp_priv *priv = qp->priv;
521 lockdep_assert_held(&qp->s_lock);
522 lockdep_assert_held(&rcd->exp_lock);
523 if (list_empty(&priv->tid_wait))
525 list_del_init(&priv->tid_wait);
526 qp->s_flags &= ~HFI1_S_WAIT_TID_SPACE;
532 * queue_qp_for_tid_wait - suspend QP on tid space
533 * @rcd: the receive context
536 * The qp is inserted at the tail of the rcd
537 * wait queue and the HFI1_S_WAIT_TID_SPACE s_flag is set.
539 * Must hold the qp s_lock and the exp_lock.
541 static void queue_qp_for_tid_wait(struct hfi1_ctxtdata *rcd,
542 struct tid_queue *queue, struct rvt_qp *qp)
543 __must_hold(&rcd->exp_lock) __must_hold(&qp->s_lock)
545 struct hfi1_qp_priv *priv = qp->priv;
547 lockdep_assert_held(&qp->s_lock);
548 lockdep_assert_held(&rcd->exp_lock);
549 if (list_empty(&priv->tid_wait)) {
550 qp->s_flags |= HFI1_S_WAIT_TID_SPACE;
551 list_add_tail(&priv->tid_wait, &queue->queue_head);
552 priv->tid_enqueue = ++queue->enqueue;
553 rcd->dd->verbs_dev.n_tidwait++;
554 trace_hfi1_qpsleep(qp, HFI1_S_WAIT_TID_SPACE);
560 * __trigger_tid_waiter - trigger tid waiter
563 * This is a private entrance to schedule the qp
564 * assuming the caller is holding the qp->s_lock.
566 static void __trigger_tid_waiter(struct rvt_qp *qp)
567 __must_hold(&qp->s_lock)
569 lockdep_assert_held(&qp->s_lock);
570 if (!(qp->s_flags & HFI1_S_WAIT_TID_SPACE))
572 trace_hfi1_qpwakeup(qp, HFI1_S_WAIT_TID_SPACE);
573 hfi1_schedule_send(qp);
577 * tid_rdma_schedule_tid_wakeup - schedule wakeup for a qp
580 * trigger a schedule or a waiting qp in a deadlock
581 * safe manner. The qp reference is held prior
582 * to this call via first_qp().
584 * If the qp trigger was already scheduled (!rval)
585 * the the reference is dropped, otherwise the resume
586 * or the destroy cancel will dispatch the reference.
588 static void tid_rdma_schedule_tid_wakeup(struct rvt_qp *qp)
590 struct hfi1_qp_priv *priv;
591 struct hfi1_ibport *ibp;
592 struct hfi1_pportdata *ppd;
593 struct hfi1_devdata *dd;
600 ibp = to_iport(qp->ibqp.device, qp->port_num);
601 ppd = ppd_from_ibp(ibp);
602 dd = dd_from_ibdev(qp->ibqp.device);
604 rval = queue_work_on(priv->s_sde ?
606 cpumask_first(cpumask_of_node(dd->node)),
608 &priv->tid_rdma.trigger_work);
614 * tid_rdma_trigger_resume - field a trigger work request
615 * @work - the work item
617 * Complete the off qp trigger processing by directly
618 * calling the progress routine.
620 static void tid_rdma_trigger_resume(struct work_struct *work)
622 struct tid_rdma_qp_params *tr;
623 struct hfi1_qp_priv *priv;
626 tr = container_of(work, struct tid_rdma_qp_params, trigger_work);
627 priv = container_of(tr, struct hfi1_qp_priv, tid_rdma);
629 spin_lock_irq(&qp->s_lock);
630 if (qp->s_flags & HFI1_S_WAIT_TID_SPACE) {
631 spin_unlock_irq(&qp->s_lock);
632 hfi1_do_send(priv->owner, true);
634 spin_unlock_irq(&qp->s_lock);
640 * tid_rdma_flush_wait - unwind any tid space wait
642 * This is called when resetting a qp to
643 * allow a destroy or reset to get rid
644 * of any tid space linkage and reference counts.
646 static void _tid_rdma_flush_wait(struct rvt_qp *qp, struct tid_queue *queue)
647 __must_hold(&qp->s_lock)
649 struct hfi1_qp_priv *priv;
653 lockdep_assert_held(&qp->s_lock);
655 qp->s_flags &= ~HFI1_S_WAIT_TID_SPACE;
656 spin_lock(&priv->rcd->exp_lock);
657 if (!list_empty(&priv->tid_wait)) {
658 list_del_init(&priv->tid_wait);
659 qp->s_flags &= ~HFI1_S_WAIT_TID_SPACE;
663 spin_unlock(&priv->rcd->exp_lock);
666 void hfi1_tid_rdma_flush_wait(struct rvt_qp *qp)
667 __must_hold(&qp->s_lock)
669 struct hfi1_qp_priv *priv = qp->priv;
671 _tid_rdma_flush_wait(qp, &priv->rcd->flow_queue);
672 _tid_rdma_flush_wait(qp, &priv->rcd->rarr_queue);
677 * kern_reserve_flow - allocate a hardware flow
678 * @rcd - the context to use for allocation
679 * @last - the index of the preferred flow. Use RXE_NUM_TID_FLOWS to
680 * signify "don't care".
682 * Use a bit mask based allocation to reserve a hardware
683 * flow for use in receiving KDETH data packets. If a preferred flow is
684 * specified the function will attempt to reserve that flow again, if
687 * The exp_lock must be held.
690 * On success: a value postive value between 0 and RXE_NUM_TID_FLOWS - 1
691 * On failure: -EAGAIN
693 static int kern_reserve_flow(struct hfi1_ctxtdata *rcd, int last)
694 __must_hold(&rcd->exp_lock)
698 /* Attempt to reserve the preferred flow index */
699 if (last >= 0 && last < RXE_NUM_TID_FLOWS &&
700 !test_and_set_bit(last, &rcd->flow_mask))
703 nr = ffz(rcd->flow_mask);
704 BUILD_BUG_ON(RXE_NUM_TID_FLOWS >=
705 (sizeof(rcd->flow_mask) * BITS_PER_BYTE));
706 if (nr > (RXE_NUM_TID_FLOWS - 1))
708 set_bit(nr, &rcd->flow_mask);
712 static void kern_set_hw_flow(struct hfi1_ctxtdata *rcd, u32 generation,
717 reg = ((u64)generation << HFI1_KDETH_BTH_SEQ_SHIFT) |
718 RCV_TID_FLOW_TABLE_CTRL_FLOW_VALID_SMASK |
719 RCV_TID_FLOW_TABLE_CTRL_KEEP_AFTER_SEQ_ERR_SMASK |
720 RCV_TID_FLOW_TABLE_CTRL_KEEP_ON_GEN_ERR_SMASK |
721 RCV_TID_FLOW_TABLE_STATUS_SEQ_MISMATCH_SMASK |
722 RCV_TID_FLOW_TABLE_STATUS_GEN_MISMATCH_SMASK;
724 if (generation != KERN_GENERATION_RESERVED)
725 reg |= RCV_TID_FLOW_TABLE_CTRL_HDR_SUPP_EN_SMASK;
727 write_uctxt_csr(rcd->dd, rcd->ctxt,
728 RCV_TID_FLOW_TABLE + 8 * flow_idx, reg);
731 static u32 kern_setup_hw_flow(struct hfi1_ctxtdata *rcd, u32 flow_idx)
732 __must_hold(&rcd->exp_lock)
734 u32 generation = rcd->flows[flow_idx].generation;
736 kern_set_hw_flow(rcd, generation, flow_idx);
740 static u32 kern_flow_generation_next(u32 gen)
742 u32 generation = mask_generation(gen + 1);
744 if (generation == KERN_GENERATION_RESERVED)
745 generation = mask_generation(generation + 1);
749 static void kern_clear_hw_flow(struct hfi1_ctxtdata *rcd, u32 flow_idx)
750 __must_hold(&rcd->exp_lock)
752 rcd->flows[flow_idx].generation =
753 kern_flow_generation_next(rcd->flows[flow_idx].generation);
754 kern_set_hw_flow(rcd, KERN_GENERATION_RESERVED, flow_idx);
757 int hfi1_kern_setup_hw_flow(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp)
759 struct hfi1_qp_priv *qpriv = (struct hfi1_qp_priv *)qp->priv;
760 struct tid_flow_state *fs = &qpriv->flow_state;
765 /* The QP already has an allocated flow */
766 if (fs->index != RXE_NUM_TID_FLOWS)
769 spin_lock_irqsave(&rcd->exp_lock, flags);
770 if (kernel_tid_waiters(rcd, &rcd->flow_queue, qp))
773 ret = kern_reserve_flow(rcd, fs->last_index);
777 fs->last_index = fs->index;
779 /* Generation received in a RESYNC overrides default flow generation */
780 if (fs->generation != KERN_GENERATION_RESERVED)
781 rcd->flows[fs->index].generation = fs->generation;
782 fs->generation = kern_setup_hw_flow(rcd, fs->index);
784 dequeue_tid_waiter(rcd, &rcd->flow_queue, qp);
785 /* get head before dropping lock */
786 fqp = first_qp(rcd, &rcd->flow_queue);
787 spin_unlock_irqrestore(&rcd->exp_lock, flags);
789 tid_rdma_schedule_tid_wakeup(fqp);
792 queue_qp_for_tid_wait(rcd, &rcd->flow_queue, qp);
793 spin_unlock_irqrestore(&rcd->exp_lock, flags);
797 void hfi1_kern_clear_hw_flow(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp)
799 struct hfi1_qp_priv *qpriv = (struct hfi1_qp_priv *)qp->priv;
800 struct tid_flow_state *fs = &qpriv->flow_state;
804 if (fs->index >= RXE_NUM_TID_FLOWS)
806 spin_lock_irqsave(&rcd->exp_lock, flags);
807 kern_clear_hw_flow(rcd, fs->index);
808 clear_bit(fs->index, &rcd->flow_mask);
809 fs->index = RXE_NUM_TID_FLOWS;
811 fs->generation = KERN_GENERATION_RESERVED;
813 /* get head before dropping lock */
814 fqp = first_qp(rcd, &rcd->flow_queue);
815 spin_unlock_irqrestore(&rcd->exp_lock, flags);
818 __trigger_tid_waiter(fqp);
821 tid_rdma_schedule_tid_wakeup(fqp);
825 void hfi1_kern_init_ctxt_generations(struct hfi1_ctxtdata *rcd)
829 for (i = 0; i < RXE_NUM_TID_FLOWS; i++) {
830 rcd->flows[i].generation = mask_generation(prandom_u32());
831 kern_set_hw_flow(rcd, KERN_GENERATION_RESERVED, i);
835 /* TID allocation functions */
836 static u8 trdma_pset_order(struct tid_rdma_pageset *s)
840 return ilog2(count) + 1;
844 * tid_rdma_find_phys_blocks_4k - get groups base on mr info
845 * @npages - number of pages
846 * @pages - pointer to an array of page structs
847 * @list - page set array to return
849 * This routine returns the number of groups associated with
850 * the current sge information. This implementation is based
851 * on the expected receive find_phys_blocks() adjusted to
852 * use the MR information vs. the pfn.
855 * the number of RcvArray entries
857 static u32 tid_rdma_find_phys_blocks_4k(struct tid_rdma_flow *flow,
860 struct tid_rdma_pageset *list)
862 u32 pagecount, pageidx, setcount = 0, i;
863 void *vaddr, *this_vaddr;
869 * Look for sets of physically contiguous pages in the user buffer.
870 * This will allow us to optimize Expected RcvArray entry usage by
871 * using the bigger supported sizes.
873 vaddr = page_address(pages[0]);
874 trace_hfi1_tid_flow_page(flow->req->qp, flow, 0, 0, 0, vaddr);
875 for (pageidx = 0, pagecount = 1, i = 1; i <= npages; i++) {
876 this_vaddr = i < npages ? page_address(pages[i]) : NULL;
877 trace_hfi1_tid_flow_page(flow->req->qp, flow, i, 0, 0,
880 * If the vaddr's are not sequential, pages are not physically
883 if (this_vaddr != (vaddr + PAGE_SIZE)) {
885 * At this point we have to loop over the set of
886 * physically contiguous pages and break them down it
887 * sizes supported by the HW.
888 * There are two main constraints:
889 * 1. The max buffer size is MAX_EXPECTED_BUFFER.
890 * If the total set size is bigger than that
891 * program only a MAX_EXPECTED_BUFFER chunk.
892 * 2. The buffer size has to be a power of two. If
893 * it is not, round down to the closes power of
894 * 2 and program that size.
897 int maxpages = pagecount;
898 u32 bufsize = pagecount * PAGE_SIZE;
900 if (bufsize > MAX_EXPECTED_BUFFER)
902 MAX_EXPECTED_BUFFER >>
904 else if (!is_power_of_2(bufsize))
906 rounddown_pow_of_two(bufsize) >>
909 list[setcount].idx = pageidx;
910 list[setcount].count = maxpages;
911 trace_hfi1_tid_pageset(flow->req->qp, setcount,
913 list[setcount].count);
914 pagecount -= maxpages;
926 /* insure we always return an even number of sets */
928 list[setcount++].count = 0;
933 * tid_flush_pages - dump out pages into pagesets
934 * @list - list of pagesets
935 * @idx - pointer to current page index
936 * @pages - number of pages to dump
937 * @sets - current number of pagesset
939 * This routine flushes out accumuated pages.
941 * To insure an even number of sets the
942 * code may add a filler.
944 * This can happen with when pages is not
945 * a power of 2 or pages is a power of 2
946 * less than the maximum pages.
949 * The new number of sets
952 static u32 tid_flush_pages(struct tid_rdma_pageset *list,
953 u32 *idx, u32 pages, u32 sets)
956 u32 maxpages = pages;
958 if (maxpages > MAX_EXPECTED_PAGES)
959 maxpages = MAX_EXPECTED_PAGES;
960 else if (!is_power_of_2(maxpages))
961 maxpages = rounddown_pow_of_two(maxpages);
962 list[sets].idx = *idx;
963 list[sets++].count = maxpages;
967 /* might need a filler */
969 list[sets++].count = 0;
974 * tid_rdma_find_phys_blocks_8k - get groups base on mr info
975 * @pages - pointer to an array of page structs
976 * @npages - number of pages
977 * @list - page set array to return
979 * This routine parses an array of pages to compute pagesets
980 * in an 8k compatible way.
982 * pages are tested two at a time, i, i + 1 for contiguous
983 * pages and i - 1 and i contiguous pages.
985 * If any condition is false, any accumlated pages are flushed and
986 * v0,v1 are emitted as separate PAGE_SIZE pagesets
988 * Otherwise, the current 8k is totaled for a future flush.
991 * The number of pagesets
992 * list set with the returned number of pagesets
995 static u32 tid_rdma_find_phys_blocks_8k(struct tid_rdma_flow *flow,
998 struct tid_rdma_pageset *list)
1000 u32 idx, sets = 0, i;
1002 void *v0, *v1, *vm1;
1006 for (idx = 0, i = 0, vm1 = NULL; i < npages; i += 2) {
1008 v0 = page_address(pages[i]);
1009 trace_hfi1_tid_flow_page(flow->req->qp, flow, i, 1, 0, v0);
1010 v1 = i + 1 < npages ?
1011 page_address(pages[i + 1]) : NULL;
1012 trace_hfi1_tid_flow_page(flow->req->qp, flow, i, 1, 1, v1);
1013 /* compare i, i + 1 vaddr */
1014 if (v1 != (v0 + PAGE_SIZE)) {
1015 /* flush out pages */
1016 sets = tid_flush_pages(list, &idx, pagecnt, sets);
1017 /* output v0,v1 as two pagesets */
1018 list[sets].idx = idx++;
1019 list[sets++].count = 1;
1021 list[sets].count = 1;
1022 list[sets++].idx = idx++;
1024 list[sets++].count = 0;
1030 /* i,i+1 consecutive, look at i-1,i */
1031 if (vm1 && v0 != (vm1 + PAGE_SIZE)) {
1032 /* flush out pages */
1033 sets = tid_flush_pages(list, &idx, pagecnt, sets);
1036 /* pages will always be a multiple of 8k */
1040 /* move to next pair */
1042 /* dump residual pages at end */
1043 sets = tid_flush_pages(list, &idx, npages - idx, sets);
1044 /* by design cannot be odd sets */
1050 * Find pages for one segment of a sge array represented by @ss. The function
1051 * does not check the sge, the sge must have been checked for alignment with a
1052 * prior call to hfi1_kern_trdma_ok. Other sge checking is done as part of
1053 * rvt_lkey_ok and rvt_rkey_ok. Also, the function only modifies the local sge
1054 * copy maintained in @ss->sge, the original sge is not modified.
1056 * Unlike IB RDMA WRITE, we can't decrement ss->num_sge here because we are not
1057 * releasing the MR reference count at the same time. Otherwise, we'll "leak"
1058 * references to the MR. This difference requires that we keep track of progress
1059 * into the sg_list. This is done by the cur_seg cursor in the tid_rdma_request
1062 static u32 kern_find_pages(struct tid_rdma_flow *flow,
1063 struct page **pages,
1064 struct rvt_sge_state *ss, bool *last)
1066 struct tid_rdma_request *req = flow->req;
1067 struct rvt_sge *sge = &ss->sge;
1068 u32 length = flow->req->seg_len;
1069 u32 len = PAGE_SIZE;
1072 while (length && req->isge < ss->num_sge) {
1073 pages[i++] = virt_to_page(sge->vaddr);
1077 sge->sge_length -= len;
1078 if (!sge->sge_length) {
1079 if (++req->isge < ss->num_sge)
1080 *sge = ss->sg_list[req->isge - 1];
1081 } else if (sge->length == 0 && sge->mr->lkey) {
1082 if (++sge->n >= RVT_SEGSZ) {
1086 sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
1087 sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
1092 flow->length = flow->req->seg_len - length;
1093 *last = req->isge == ss->num_sge ? false : true;
1097 static void dma_unmap_flow(struct tid_rdma_flow *flow)
1099 struct hfi1_devdata *dd;
1101 struct tid_rdma_pageset *pset;
1103 dd = flow->req->rcd->dd;
1104 for (i = 0, pset = &flow->pagesets[0]; i < flow->npagesets;
1106 if (pset->count && pset->addr) {
1107 dma_unmap_page(&dd->pcidev->dev,
1109 PAGE_SIZE * pset->count,
1116 static int dma_map_flow(struct tid_rdma_flow *flow, struct page **pages)
1119 struct hfi1_devdata *dd = flow->req->rcd->dd;
1120 struct tid_rdma_pageset *pset;
1122 for (i = 0, pset = &flow->pagesets[0]; i < flow->npagesets;
1125 pset->addr = dma_map_page(&dd->pcidev->dev,
1128 PAGE_SIZE * pset->count,
1131 if (dma_mapping_error(&dd->pcidev->dev, pset->addr)) {
1132 dma_unmap_flow(flow);
1141 static inline bool dma_mapped(struct tid_rdma_flow *flow)
1143 return !!flow->pagesets[0].mapped;
1147 * Get pages pointers and identify contiguous physical memory chunks for a
1148 * segment. All segments are of length flow->req->seg_len.
1150 static int kern_get_phys_blocks(struct tid_rdma_flow *flow,
1151 struct page **pages,
1152 struct rvt_sge_state *ss, bool *last)
1156 /* Reuse previously computed pagesets, if any */
1157 if (flow->npagesets) {
1158 trace_hfi1_tid_flow_alloc(flow->req->qp, flow->req->setup_head,
1160 if (!dma_mapped(flow))
1161 return dma_map_flow(flow, pages);
1165 npages = kern_find_pages(flow, pages, ss, last);
1167 if (flow->req->qp->pmtu == enum_to_mtu(OPA_MTU_4096))
1169 tid_rdma_find_phys_blocks_4k(flow, pages, npages,
1173 tid_rdma_find_phys_blocks_8k(flow, pages, npages,
1176 return dma_map_flow(flow, pages);
1179 static inline void kern_add_tid_node(struct tid_rdma_flow *flow,
1180 struct hfi1_ctxtdata *rcd, char *s,
1181 struct tid_group *grp, u8 cnt)
1183 struct kern_tid_node *node = &flow->tnode[flow->tnode_cnt++];
1185 WARN_ON_ONCE(flow->tnode_cnt >=
1186 (TID_RDMA_MAX_SEGMENT_SIZE >> PAGE_SHIFT));
1187 if (WARN_ON_ONCE(cnt & 1))
1189 "unexpected odd allocation cnt %u map 0x%x used %u",
1190 cnt, grp->map, grp->used);
1193 node->map = grp->map;
1195 trace_hfi1_tid_node_add(flow->req->qp, s, flow->tnode_cnt - 1,
1196 grp->base, grp->map, grp->used, cnt);
1200 * Try to allocate pageset_count TID's from TID groups for a context
1202 * This function allocates TID's without moving groups between lists or
1203 * modifying grp->map. This is done as follows, being cogizant of the lists
1204 * between which the TID groups will move:
1205 * 1. First allocate complete groups of 8 TID's since this is more efficient,
1206 * these groups will move from group->full without affecting used
1207 * 2. If more TID's are needed allocate from used (will move from used->full or
1209 * 3. If we still don't have the required number of TID's go back and look again
1210 * at a complete group (will move from group->used)
1212 static int kern_alloc_tids(struct tid_rdma_flow *flow)
1214 struct hfi1_ctxtdata *rcd = flow->req->rcd;
1215 struct hfi1_devdata *dd = rcd->dd;
1216 u32 ngroups, pageidx = 0;
1217 struct tid_group *group = NULL, *used;
1220 flow->tnode_cnt = 0;
1221 ngroups = flow->npagesets / dd->rcv_entries.group_size;
1225 /* First look at complete groups */
1226 list_for_each_entry(group, &rcd->tid_group_list.list, list) {
1227 kern_add_tid_node(flow, rcd, "complete groups", group,
1230 pageidx += group->size;
1235 if (pageidx >= flow->npagesets)
1239 /* Now look at partially used groups */
1240 list_for_each_entry(used, &rcd->tid_used_list.list, list) {
1241 use = min_t(u32, flow->npagesets - pageidx,
1242 used->size - used->used);
1243 kern_add_tid_node(flow, rcd, "used groups", used, use);
1246 if (pageidx >= flow->npagesets)
1251 * Look again at a complete group, continuing from where we left.
1252 * However, if we are at the head, we have reached the end of the
1253 * complete groups list from the first loop above
1255 if (group && &group->list == &rcd->tid_group_list.list)
1257 group = list_prepare_entry(group, &rcd->tid_group_list.list,
1259 if (list_is_last(&group->list, &rcd->tid_group_list.list))
1261 group = list_next_entry(group, list);
1262 use = min_t(u32, flow->npagesets - pageidx, group->size);
1263 kern_add_tid_node(flow, rcd, "complete continue", group, use);
1265 if (pageidx >= flow->npagesets)
1268 trace_hfi1_msg_alloc_tids(flow->req->qp, " insufficient tids: needed ",
1269 (u64)flow->npagesets);
1275 static void kern_program_rcv_group(struct tid_rdma_flow *flow, int grp_num,
1278 struct hfi1_ctxtdata *rcd = flow->req->rcd;
1279 struct hfi1_devdata *dd = rcd->dd;
1280 struct kern_tid_node *node = &flow->tnode[grp_num];
1281 struct tid_group *grp = node->grp;
1282 struct tid_rdma_pageset *pset;
1283 u32 pmtu_pg = flow->req->qp->pmtu >> PAGE_SHIFT;
1284 u32 rcventry, npages = 0, pair = 0, tidctrl;
1287 for (i = 0; i < grp->size; i++) {
1288 rcventry = grp->base + i;
1290 if (node->map & BIT(i) || cnt >= node->cnt) {
1291 rcv_array_wc_fill(dd, rcventry);
1294 pset = &flow->pagesets[(*pset_idx)++];
1296 hfi1_put_tid(dd, rcventry, PT_EXPECTED,
1297 pset->addr, trdma_pset_order(pset));
1299 hfi1_put_tid(dd, rcventry, PT_INVALID, 0, 0);
1301 npages += pset->count;
1303 rcventry -= rcd->expected_base;
1304 tidctrl = pair ? 0x3 : rcventry & 0x1 ? 0x2 : 0x1;
1306 * A single TID entry will be used to use a rcvarr pair (with
1307 * tidctrl 0x3), if ALL these are true (a) the bit pos is even
1308 * (b) the group map shows current and the next bits as free
1309 * indicating two consecutive rcvarry entries are available (c)
1310 * we actually need 2 more entries
1312 pair = !(i & 0x1) && !((node->map >> i) & 0x3) &&
1313 node->cnt >= cnt + 2;
1317 flow->tid_entry[flow->tidcnt++] =
1318 EXP_TID_SET(IDX, rcventry >> 1) |
1319 EXP_TID_SET(CTRL, tidctrl) |
1320 EXP_TID_SET(LEN, npages);
1321 trace_hfi1_tid_entry_alloc(/* entry */
1322 flow->req->qp, flow->tidcnt - 1,
1323 flow->tid_entry[flow->tidcnt - 1]);
1325 /* Efficient DIV_ROUND_UP(npages, pmtu_pg) */
1326 flow->npkts += (npages + pmtu_pg - 1) >> ilog2(pmtu_pg);
1330 if (grp->used == grp->size - 1)
1331 tid_group_move(grp, &rcd->tid_used_list,
1332 &rcd->tid_full_list);
1333 else if (!grp->used)
1334 tid_group_move(grp, &rcd->tid_group_list,
1335 &rcd->tid_used_list);
1343 static void kern_unprogram_rcv_group(struct tid_rdma_flow *flow, int grp_num)
1345 struct hfi1_ctxtdata *rcd = flow->req->rcd;
1346 struct hfi1_devdata *dd = rcd->dd;
1347 struct kern_tid_node *node = &flow->tnode[grp_num];
1348 struct tid_group *grp = node->grp;
1352 for (i = 0; i < grp->size; i++) {
1353 rcventry = grp->base + i;
1355 if (node->map & BIT(i) || cnt >= node->cnt) {
1356 rcv_array_wc_fill(dd, rcventry);
1360 hfi1_put_tid(dd, rcventry, PT_INVALID, 0, 0);
1363 grp->map &= ~BIT(i);
1366 if (grp->used == grp->size - 1)
1367 tid_group_move(grp, &rcd->tid_full_list,
1368 &rcd->tid_used_list);
1369 else if (!grp->used)
1370 tid_group_move(grp, &rcd->tid_used_list,
1371 &rcd->tid_group_list);
1373 if (WARN_ON_ONCE(cnt & 1)) {
1374 struct hfi1_ctxtdata *rcd = flow->req->rcd;
1375 struct hfi1_devdata *dd = rcd->dd;
1377 dd_dev_err(dd, "unexpected odd free cnt %u map 0x%x used %u",
1378 cnt, grp->map, grp->used);
1382 static void kern_program_rcvarray(struct tid_rdma_flow *flow)
1389 for (i = 0; i < flow->tnode_cnt; i++)
1390 kern_program_rcv_group(flow, i, &pset_idx);
1391 trace_hfi1_tid_flow_alloc(flow->req->qp, flow->req->setup_head, flow);
1395 * hfi1_kern_exp_rcv_setup() - setup TID's and flow for one segment of a
1398 * @req: TID RDMA request for which the segment/flow is being set up
1399 * @ss: sge state, maintains state across successive segments of a sge
1400 * @last: set to true after the last sge segment has been processed
1403 * (1) finds a free flow entry in the flow circular buffer
1404 * (2) finds pages and continuous physical chunks constituing one segment
1406 * (3) allocates TID group entries for those chunks
1407 * (4) programs rcvarray entries in the hardware corresponding to those
1409 * (5) computes a tidarray with formatted TID entries which can be sent
1411 * (6) Reserves and programs HW flows.
1412 * (7) It also manages queing the QP when TID/flow resources are not
1415 * @req points to struct tid_rdma_request of which the segments are a part. The
1416 * function uses qp, rcd and seg_len members of @req. In the absence of errors,
1417 * req->flow_idx is the index of the flow which has been prepared in this
1418 * invocation of function call. With flow = &req->flows[req->flow_idx],
1419 * flow->tid_entry contains the TID array which the sender can use for TID RDMA
1420 * sends and flow->npkts contains number of packets required to send the
1423 * hfi1_check_sge_align should be called prior to calling this function and if
1424 * it signals error TID RDMA cannot be used for this sge and this function
1425 * should not be called.
1427 * For the queuing, caller must hold the flow->req->qp s_lock from the send
1428 * engine and the function will procure the exp_lock.
1431 * The function returns -EAGAIN if sufficient number of TID/flow resources to
1432 * map the segment could not be allocated. In this case the function should be
1433 * called again with previous arguments to retry the TID allocation. There are
1434 * no other error returns. The function returns 0 on success.
1436 int hfi1_kern_exp_rcv_setup(struct tid_rdma_request *req,
1437 struct rvt_sge_state *ss, bool *last)
1438 __must_hold(&req->qp->s_lock)
1440 struct tid_rdma_flow *flow = &req->flows[req->setup_head];
1441 struct hfi1_ctxtdata *rcd = req->rcd;
1442 struct hfi1_qp_priv *qpriv = req->qp->priv;
1443 unsigned long flags;
1445 u16 clear_tail = req->clear_tail;
1447 lockdep_assert_held(&req->qp->s_lock);
1449 * We return error if either (a) we don't have space in the flow
1450 * circular buffer, or (b) we already have max entries in the buffer.
1451 * Max entries depend on the type of request we are processing and the
1452 * negotiated TID RDMA parameters.
1454 if (!CIRC_SPACE(req->setup_head, clear_tail, MAX_FLOWS) ||
1455 CIRC_CNT(req->setup_head, clear_tail, MAX_FLOWS) >=
1460 * Get pages, identify contiguous physical memory chunks for the segment
1461 * If we can not determine a DMA address mapping we will treat it just
1462 * like if we ran out of space above.
1464 if (kern_get_phys_blocks(flow, qpriv->pages, ss, last)) {
1465 hfi1_wait_kmem(flow->req->qp);
1469 spin_lock_irqsave(&rcd->exp_lock, flags);
1470 if (kernel_tid_waiters(rcd, &rcd->rarr_queue, flow->req->qp))
1474 * At this point we know the number of pagesets and hence the number of
1475 * TID's to map the segment. Allocate the TID's from the TID groups. If
1476 * we cannot allocate the required number we exit and try again later
1478 if (kern_alloc_tids(flow))
1481 * Finally program the TID entries with the pagesets, compute the
1482 * tidarray and enable the HW flow
1484 kern_program_rcvarray(flow);
1487 * Setup the flow state with relevant information.
1488 * This information is used for tracking the sequence of data packets
1490 * The flow is setup here as this is the most accurate time and place
1491 * to do so. Doing at a later time runs the risk of the flow data in
1492 * qpriv getting out of sync.
1494 memset(&flow->flow_state, 0x0, sizeof(flow->flow_state));
1495 flow->idx = qpriv->flow_state.index;
1496 flow->flow_state.generation = qpriv->flow_state.generation;
1497 flow->flow_state.spsn = qpriv->flow_state.psn;
1498 flow->flow_state.lpsn = flow->flow_state.spsn + flow->npkts - 1;
1499 flow->flow_state.r_next_psn =
1500 full_flow_psn(flow, flow->flow_state.spsn);
1501 qpriv->flow_state.psn += flow->npkts;
1503 dequeue_tid_waiter(rcd, &rcd->rarr_queue, flow->req->qp);
1504 /* get head before dropping lock */
1505 fqp = first_qp(rcd, &rcd->rarr_queue);
1506 spin_unlock_irqrestore(&rcd->exp_lock, flags);
1507 tid_rdma_schedule_tid_wakeup(fqp);
1509 req->setup_head = (req->setup_head + 1) & (MAX_FLOWS - 1);
1512 queue_qp_for_tid_wait(rcd, &rcd->rarr_queue, flow->req->qp);
1513 spin_unlock_irqrestore(&rcd->exp_lock, flags);
1517 static void hfi1_tid_rdma_reset_flow(struct tid_rdma_flow *flow)
1519 flow->npagesets = 0;
1523 * This function is called after one segment has been successfully sent to
1524 * release the flow and TID HW/SW resources for that segment. The segments for a
1525 * TID RDMA request are setup and cleared in FIFO order which is managed using a
1528 int hfi1_kern_exp_rcv_clear(struct tid_rdma_request *req)
1529 __must_hold(&req->qp->s_lock)
1531 struct tid_rdma_flow *flow = &req->flows[req->clear_tail];
1532 struct hfi1_ctxtdata *rcd = req->rcd;
1533 unsigned long flags;
1537 lockdep_assert_held(&req->qp->s_lock);
1538 /* Exit if we have nothing in the flow circular buffer */
1539 if (!CIRC_CNT(req->setup_head, req->clear_tail, MAX_FLOWS))
1542 spin_lock_irqsave(&rcd->exp_lock, flags);
1544 for (i = 0; i < flow->tnode_cnt; i++)
1545 kern_unprogram_rcv_group(flow, i);
1546 /* To prevent double unprogramming */
1547 flow->tnode_cnt = 0;
1548 /* get head before dropping lock */
1549 fqp = first_qp(rcd, &rcd->rarr_queue);
1550 spin_unlock_irqrestore(&rcd->exp_lock, flags);
1552 dma_unmap_flow(flow);
1554 hfi1_tid_rdma_reset_flow(flow);
1555 req->clear_tail = (req->clear_tail + 1) & (MAX_FLOWS - 1);
1557 if (fqp == req->qp) {
1558 __trigger_tid_waiter(fqp);
1561 tid_rdma_schedule_tid_wakeup(fqp);
1568 * This function is called to release all the tid entries for
1571 void hfi1_kern_exp_rcv_clear_all(struct tid_rdma_request *req)
1572 __must_hold(&req->qp->s_lock)
1574 /* Use memory barrier for proper ordering */
1575 while (CIRC_CNT(req->setup_head, req->clear_tail, MAX_FLOWS)) {
1576 if (hfi1_kern_exp_rcv_clear(req))
1582 * hfi1_kern_exp_rcv_free_flows - free priviously allocated flow information
1583 * @req - the tid rdma request to be cleaned
1585 static void hfi1_kern_exp_rcv_free_flows(struct tid_rdma_request *req)
1592 * __trdma_clean_swqe - clean up for large sized QPs
1593 * @qp: the queue patch
1594 * @wqe: the send wqe
1596 void __trdma_clean_swqe(struct rvt_qp *qp, struct rvt_swqe *wqe)
1598 struct hfi1_swqe_priv *p = wqe->priv;
1600 hfi1_kern_exp_rcv_free_flows(&p->tid_req);
1604 * This can be called at QP create time or in the data path.
1606 static int hfi1_kern_exp_rcv_alloc_flows(struct tid_rdma_request *req,
1609 struct tid_rdma_flow *flows;
1612 if (likely(req->flows))
1614 flows = kmalloc_node(MAX_FLOWS * sizeof(*flows), gfp,
1619 for (i = 0; i < MAX_FLOWS; i++) {
1621 flows[i].npagesets = 0;
1622 flows[i].pagesets[0].mapped = 0;
1628 static void hfi1_init_trdma_req(struct rvt_qp *qp,
1629 struct tid_rdma_request *req)
1631 struct hfi1_qp_priv *qpriv = qp->priv;
1634 * Initialize various TID RDMA request variables.
1635 * These variables are "static", which is why they
1636 * can be pre-initialized here before the WRs has
1637 * even been submitted.
1638 * However, non-NULL values for these variables do not
1639 * imply that this WQE has been enabled for TID RDMA.
1640 * Drivers should check the WQE's opcode to determine
1641 * if a request is a TID RDMA one or not.
1644 req->rcd = qpriv->rcd;
1647 u64 hfi1_access_sw_tid_wait(const struct cntr_entry *entry,
1648 void *context, int vl, int mode, u64 data)
1650 struct hfi1_devdata *dd = context;
1652 return dd->verbs_dev.n_tidwait;
1655 static struct tid_rdma_flow *find_flow_ib(struct tid_rdma_request *req,
1659 struct tid_rdma_flow *flow;
1661 head = req->setup_head;
1662 tail = req->clear_tail;
1663 for ( ; CIRC_CNT(head, tail, MAX_FLOWS);
1664 tail = CIRC_NEXT(tail, MAX_FLOWS)) {
1665 flow = &req->flows[tail];
1666 if (cmp_psn(psn, flow->flow_state.ib_spsn) >= 0 &&
1667 cmp_psn(psn, flow->flow_state.ib_lpsn) <= 0) {
1676 static struct tid_rdma_flow *
1677 __find_flow_ranged(struct tid_rdma_request *req, u16 head, u16 tail,
1680 for ( ; CIRC_CNT(head, tail, MAX_FLOWS);
1681 tail = CIRC_NEXT(tail, MAX_FLOWS)) {
1682 struct tid_rdma_flow *flow = &req->flows[tail];
1685 spsn = full_flow_psn(flow, flow->flow_state.spsn);
1686 lpsn = full_flow_psn(flow, flow->flow_state.lpsn);
1688 if (cmp_psn(psn, spsn) >= 0 && cmp_psn(psn, lpsn) <= 0) {
1697 static struct tid_rdma_flow *find_flow(struct tid_rdma_request *req,
1700 return __find_flow_ranged(req, req->setup_head, req->clear_tail, psn,
1704 /* TID RDMA READ functions */
1705 u32 hfi1_build_tid_rdma_read_packet(struct rvt_swqe *wqe,
1706 struct ib_other_headers *ohdr, u32 *bth1,
1707 u32 *bth2, u32 *len)
1709 struct tid_rdma_request *req = wqe_to_tid_req(wqe);
1710 struct tid_rdma_flow *flow = &req->flows[req->flow_idx];
1711 struct rvt_qp *qp = req->qp;
1712 struct hfi1_qp_priv *qpriv = qp->priv;
1713 struct hfi1_swqe_priv *wpriv = wqe->priv;
1714 struct tid_rdma_read_req *rreq = &ohdr->u.tid_rdma.r_req;
1715 struct tid_rdma_params *remote;
1717 void *req_addr = NULL;
1719 /* This is the IB psn used to send the request */
1720 *bth2 = mask_psn(flow->flow_state.ib_spsn + flow->pkt);
1721 trace_hfi1_tid_flow_build_read_pkt(qp, req->flow_idx, flow);
1723 /* TID Entries for TID RDMA READ payload */
1724 req_addr = &flow->tid_entry[flow->tid_idx];
1725 req_len = sizeof(*flow->tid_entry) *
1726 (flow->tidcnt - flow->tid_idx);
1728 memset(&ohdr->u.tid_rdma.r_req, 0, sizeof(ohdr->u.tid_rdma.r_req));
1729 wpriv->ss.sge.vaddr = req_addr;
1730 wpriv->ss.sge.sge_length = req_len;
1731 wpriv->ss.sge.length = wpriv->ss.sge.sge_length;
1733 * We can safely zero these out. Since the first SGE covers the
1734 * entire packet, nothing else should even look at the MR.
1736 wpriv->ss.sge.mr = NULL;
1737 wpriv->ss.sge.m = 0;
1738 wpriv->ss.sge.n = 0;
1740 wpriv->ss.sg_list = NULL;
1741 wpriv->ss.total_len = wpriv->ss.sge.sge_length;
1742 wpriv->ss.num_sge = 1;
1744 /* Construct the TID RDMA READ REQ packet header */
1746 remote = rcu_dereference(qpriv->tid_rdma.remote);
1748 KDETH_RESET(rreq->kdeth0, KVER, 0x1);
1749 KDETH_RESET(rreq->kdeth1, JKEY, remote->jkey);
1750 rreq->reth.vaddr = cpu_to_be64(wqe->rdma_wr.remote_addr +
1751 req->cur_seg * req->seg_len + flow->sent);
1752 rreq->reth.rkey = cpu_to_be32(wqe->rdma_wr.rkey);
1753 rreq->reth.length = cpu_to_be32(*len);
1754 rreq->tid_flow_psn =
1755 cpu_to_be32((flow->flow_state.generation <<
1756 HFI1_KDETH_BTH_SEQ_SHIFT) |
1757 ((flow->flow_state.spsn + flow->pkt) &
1758 HFI1_KDETH_BTH_SEQ_MASK));
1760 cpu_to_be32(qpriv->tid_rdma.local.qp |
1761 ((flow->idx & TID_RDMA_DESTQP_FLOW_MASK) <<
1762 TID_RDMA_DESTQP_FLOW_SHIFT) |
1764 rreq->verbs_qp = cpu_to_be32(qp->remote_qpn);
1765 *bth1 &= ~RVT_QPN_MASK;
1766 *bth1 |= remote->qp;
1767 *bth2 |= IB_BTH_REQ_ACK;
1770 /* We are done with this segment */
1773 qp->s_state = TID_OP(READ_REQ);
1775 req->flow_idx = (req->flow_idx + 1) & (MAX_FLOWS - 1);
1776 qpriv->pending_tid_r_segs++;
1777 qp->s_num_rd_atomic++;
1779 /* Set the TID RDMA READ request payload size */
1782 return sizeof(ohdr->u.tid_rdma.r_req) / sizeof(u32);
1786 * @len: contains the data length to read upon entry and the read request
1787 * payload length upon exit.
1789 u32 hfi1_build_tid_rdma_read_req(struct rvt_qp *qp, struct rvt_swqe *wqe,
1790 struct ib_other_headers *ohdr, u32 *bth1,
1791 u32 *bth2, u32 *len)
1792 __must_hold(&qp->s_lock)
1794 struct hfi1_qp_priv *qpriv = qp->priv;
1795 struct tid_rdma_request *req = wqe_to_tid_req(wqe);
1796 struct tid_rdma_flow *flow = NULL;
1800 u32 npkts = rvt_div_round_up_mtu(qp, *len);
1802 trace_hfi1_tid_req_build_read_req(qp, 0, wqe->wr.opcode, wqe->psn,
1805 * Check sync conditions. Make sure that there are no pending
1806 * segments before freeing the flow.
1809 if (req->state == TID_REQUEST_SYNC) {
1810 if (qpriv->pending_tid_r_segs)
1813 hfi1_kern_clear_hw_flow(req->rcd, qp);
1814 qpriv->s_flags &= ~HFI1_R_TID_SW_PSN;
1815 req->state = TID_REQUEST_ACTIVE;
1819 * If the request for this segment is resent, the tid resources should
1820 * have been allocated before. In this case, req->flow_idx should
1821 * fall behind req->setup_head.
1823 if (req->flow_idx == req->setup_head) {
1825 if (req->state == TID_REQUEST_RESEND) {
1827 * This is the first new segment for a request whose
1828 * earlier segments have been re-sent. We need to
1829 * set up the sge pointer correctly.
1831 restart_sge(&qp->s_sge, wqe, req->s_next_psn,
1834 req->state = TID_REQUEST_ACTIVE;
1838 * Check sync. The last PSN of each generation is reserved for
1841 if ((qpriv->flow_state.psn + npkts) > MAX_TID_FLOW_PSN - 1) {
1842 req->state = TID_REQUEST_SYNC;
1846 /* Allocate the flow if not yet */
1847 if (hfi1_kern_setup_hw_flow(qpriv->rcd, qp))
1851 * The following call will advance req->setup_head after
1852 * allocating the tid entries.
1854 if (hfi1_kern_exp_rcv_setup(req, &qp->s_sge, &last)) {
1855 req->state = TID_REQUEST_QUEUED;
1858 * We don't have resources for this segment. The QP has
1859 * already been queued.
1865 /* req->flow_idx should only be one slot behind req->setup_head */
1866 flow = &req->flows[req->flow_idx];
1871 /* Set the first and last IB PSN for the flow in use.*/
1872 flow->flow_state.ib_spsn = req->s_next_psn;
1873 flow->flow_state.ib_lpsn =
1874 flow->flow_state.ib_spsn + flow->npkts - 1;
1877 /* Calculate the next segment start psn.*/
1878 req->s_next_psn += flow->npkts;
1880 /* Build the packet header */
1881 hdwords = hfi1_build_tid_rdma_read_packet(wqe, ohdr, bth1, bth2, len);
1887 * Validate and accept the TID RDMA READ request parameters.
1888 * Return 0 if the request is accepted successfully;
1889 * Return 1 otherwise.
1891 static int tid_rdma_rcv_read_request(struct rvt_qp *qp,
1892 struct rvt_ack_entry *e,
1893 struct hfi1_packet *packet,
1894 struct ib_other_headers *ohdr,
1895 u32 bth0, u32 psn, u64 vaddr, u32 len)
1897 struct hfi1_qp_priv *qpriv = qp->priv;
1898 struct tid_rdma_request *req;
1899 struct tid_rdma_flow *flow;
1900 u32 flow_psn, i, tidlen = 0, pktlen, tlen;
1902 req = ack_to_tid_req(e);
1904 /* Validate the payload first */
1905 flow = &req->flows[req->setup_head];
1907 /* payload length = packet length - (header length + ICRC length) */
1908 pktlen = packet->tlen - (packet->hlen + 4);
1909 if (pktlen > sizeof(flow->tid_entry))
1911 memcpy(flow->tid_entry, packet->ebuf, pktlen);
1912 flow->tidcnt = pktlen / sizeof(*flow->tid_entry);
1915 * Walk the TID_ENTRY list to make sure we have enough space for a
1916 * complete segment. Also calculate the number of required packets.
1918 flow->npkts = rvt_div_round_up_mtu(qp, len);
1919 for (i = 0; i < flow->tidcnt; i++) {
1920 trace_hfi1_tid_entry_rcv_read_req(qp, i,
1921 flow->tid_entry[i]);
1922 tlen = EXP_TID_GET(flow->tid_entry[i], LEN);
1927 * For tid pair (tidctr == 3), the buffer size of the pair
1928 * should be the sum of the buffer size described by each
1929 * tid entry. However, only the first entry needs to be
1930 * specified in the request (see WFR HAS Section 8.5.7.1).
1934 if (tidlen * PAGE_SIZE < len)
1937 /* Empty the flow array */
1938 req->clear_tail = req->setup_head;
1941 flow->tid_offset = 0;
1943 flow->tid_qpn = be32_to_cpu(ohdr->u.tid_rdma.r_req.tid_flow_qp);
1944 flow->idx = (flow->tid_qpn >> TID_RDMA_DESTQP_FLOW_SHIFT) &
1945 TID_RDMA_DESTQP_FLOW_MASK;
1946 flow_psn = mask_psn(be32_to_cpu(ohdr->u.tid_rdma.r_req.tid_flow_psn));
1947 flow->flow_state.generation = flow_psn >> HFI1_KDETH_BTH_SEQ_SHIFT;
1948 flow->flow_state.spsn = flow_psn & HFI1_KDETH_BTH_SEQ_MASK;
1951 flow->flow_state.lpsn = flow->flow_state.spsn +
1953 flow->flow_state.ib_spsn = psn;
1954 flow->flow_state.ib_lpsn = flow->flow_state.ib_spsn + flow->npkts - 1;
1956 trace_hfi1_tid_flow_rcv_read_req(qp, req->setup_head, flow);
1957 /* Set the initial flow index to the current flow. */
1958 req->flow_idx = req->setup_head;
1960 /* advance circular buffer head */
1961 req->setup_head = (req->setup_head + 1) & (MAX_FLOWS - 1);
1964 * Compute last PSN for request.
1966 e->opcode = (bth0 >> 24) & 0xff;
1968 e->lpsn = psn + flow->npkts - 1;
1971 req->n_flows = qpriv->tid_rdma.local.max_read;
1972 req->state = TID_REQUEST_ACTIVE;
1977 req->seg_len = qpriv->tid_rdma.local.max_len;
1978 req->total_len = len;
1979 req->total_segs = 1;
1980 req->r_flow_psn = e->psn;
1982 trace_hfi1_tid_req_rcv_read_req(qp, 0, e->opcode, e->psn, e->lpsn,
1987 static int tid_rdma_rcv_error(struct hfi1_packet *packet,
1988 struct ib_other_headers *ohdr,
1989 struct rvt_qp *qp, u32 psn, int diff)
1991 struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
1992 struct hfi1_ctxtdata *rcd = ((struct hfi1_qp_priv *)qp->priv)->rcd;
1993 struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
1994 struct hfi1_qp_priv *qpriv = qp->priv;
1995 struct rvt_ack_entry *e;
1996 struct tid_rdma_request *req;
1997 unsigned long flags;
2001 trace_hfi1_rsp_tid_rcv_error(qp, psn);
2002 trace_hfi1_tid_rdma_rcv_err(qp, 0, psn, diff);
2004 /* sequence error */
2005 if (!qp->r_nak_state) {
2006 ibp->rvp.n_rc_seqnak++;
2007 qp->r_nak_state = IB_NAK_PSN_ERROR;
2008 qp->r_ack_psn = qp->r_psn;
2009 rc_defered_ack(rcd, qp);
2014 ibp->rvp.n_rc_dupreq++;
2016 spin_lock_irqsave(&qp->s_lock, flags);
2017 e = find_prev_entry(qp, psn, &prev, NULL, &old_req);
2018 if (!e || (e->opcode != TID_OP(READ_REQ) &&
2019 e->opcode != TID_OP(WRITE_REQ)))
2022 req = ack_to_tid_req(e);
2023 req->r_flow_psn = psn;
2024 trace_hfi1_tid_req_rcv_err(qp, 0, e->opcode, e->psn, e->lpsn, req);
2025 if (e->opcode == TID_OP(READ_REQ)) {
2026 struct ib_reth *reth;
2033 reth = &ohdr->u.tid_rdma.r_req.reth;
2035 * The requester always restarts from the start of the original
2038 len = be32_to_cpu(reth->length);
2039 if (psn != e->psn || len != req->total_len)
2042 release_rdma_sge_mr(e);
2044 rkey = be32_to_cpu(reth->rkey);
2045 vaddr = get_ib_reth_vaddr(reth);
2048 ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey,
2049 IB_ACCESS_REMOTE_READ);
2054 * If all the response packets for the current request have
2055 * been sent out and this request is complete (old_request
2056 * == false) and the TID flow may be unusable (the
2057 * req->clear_tail is advanced). However, when an earlier
2058 * request is received, this request will not be complete any
2059 * more (qp->s_tail_ack_queue is moved back, see below).
2060 * Consequently, we need to update the TID flow info everytime
2061 * a duplicate request is received.
2063 bth0 = be32_to_cpu(ohdr->bth[0]);
2064 if (tid_rdma_rcv_read_request(qp, e, packet, ohdr, bth0, psn,
2069 * True if the request is already scheduled (between
2070 * qp->s_tail_ack_queue and qp->r_head_ack_queue);
2075 struct flow_state *fstate;
2076 bool schedule = false;
2079 if (req->state == TID_REQUEST_RESEND) {
2080 req->state = TID_REQUEST_RESEND_ACTIVE;
2081 } else if (req->state == TID_REQUEST_INIT_RESEND) {
2082 req->state = TID_REQUEST_INIT;
2087 * True if the request is already scheduled (between
2088 * qp->s_tail_ack_queue and qp->r_head_ack_queue).
2089 * Also, don't change requests, which are at the SYNC
2090 * point and haven't generated any responses yet.
2091 * There is nothing to retransmit for them yet.
2093 if (old_req || req->state == TID_REQUEST_INIT ||
2094 (req->state == TID_REQUEST_SYNC && !req->cur_seg)) {
2095 for (i = prev + 1; ; i++) {
2096 if (i > rvt_size_atomic(&dev->rdi))
2098 if (i == qp->r_head_ack_queue)
2100 e = &qp->s_ack_queue[i];
2101 req = ack_to_tid_req(e);
2102 if (e->opcode == TID_OP(WRITE_REQ) &&
2103 req->state == TID_REQUEST_INIT)
2104 req->state = TID_REQUEST_INIT_RESEND;
2107 * If the state of the request has been changed,
2108 * the first leg needs to get scheduled in order to
2109 * pick up the change. Otherwise, normal response
2110 * processing should take care of it.
2117 * If there is no more allocated segment, just schedule the qp
2118 * without changing any state.
2120 if (req->clear_tail == req->setup_head)
2123 * If this request has sent responses for segments, which have
2124 * not received data yet (flow_idx != clear_tail), the flow_idx
2125 * pointer needs to be adjusted so the same responses can be
2128 if (CIRC_CNT(req->flow_idx, req->clear_tail, MAX_FLOWS)) {
2129 fstate = &req->flows[req->clear_tail].flow_state;
2130 qpriv->pending_tid_w_segs -=
2131 CIRC_CNT(req->flow_idx, req->clear_tail,
2134 CIRC_ADD(req->clear_tail,
2135 delta_psn(psn, fstate->resp_ib_psn),
2137 qpriv->pending_tid_w_segs +=
2138 delta_psn(psn, fstate->resp_ib_psn);
2140 * When flow_idx == setup_head, we've gotten a duplicate
2141 * request for a segment, which has not been allocated
2142 * yet. In that case, don't adjust this request.
2143 * However, we still want to go through the loop below
2144 * to adjust all subsequent requests.
2146 if (CIRC_CNT(req->setup_head, req->flow_idx,
2148 req->cur_seg = delta_psn(psn, e->psn);
2149 req->state = TID_REQUEST_RESEND_ACTIVE;
2153 for (i = prev + 1; ; i++) {
2155 * Look at everything up to and including
2158 if (i > rvt_size_atomic(&dev->rdi))
2160 if (i == qp->r_head_ack_queue)
2162 e = &qp->s_ack_queue[i];
2163 req = ack_to_tid_req(e);
2164 trace_hfi1_tid_req_rcv_err(qp, 0, e->opcode, e->psn,
2166 if (e->opcode != TID_OP(WRITE_REQ) ||
2167 req->cur_seg == req->comp_seg ||
2168 req->state == TID_REQUEST_INIT ||
2169 req->state == TID_REQUEST_INIT_RESEND) {
2170 if (req->state == TID_REQUEST_INIT)
2171 req->state = TID_REQUEST_INIT_RESEND;
2174 qpriv->pending_tid_w_segs -=
2175 CIRC_CNT(req->flow_idx,
2178 req->flow_idx = req->clear_tail;
2179 req->state = TID_REQUEST_RESEND;
2180 req->cur_seg = req->comp_seg;
2182 qpriv->s_flags &= ~HFI1_R_TID_WAIT_INTERLCK;
2184 /* Re-process old requests.*/
2185 if (qp->s_acked_ack_queue == qp->s_tail_ack_queue)
2186 qp->s_acked_ack_queue = prev;
2187 qp->s_tail_ack_queue = prev;
2189 * Since the qp->s_tail_ack_queue is modified, the
2190 * qp->s_ack_state must be changed to re-initialize
2191 * qp->s_ack_rdma_sge; Otherwise, we will end up in
2192 * wrong memory region.
2194 qp->s_ack_state = OP(ACKNOWLEDGE);
2197 * It's possible to receive a retry psn that is earlier than an RNRNAK
2198 * psn. In this case, the rnrnak state should be cleared.
2200 if (qpriv->rnr_nak_state) {
2201 qp->s_nak_state = 0;
2202 qpriv->rnr_nak_state = TID_RNR_NAK_INIT;
2203 qp->r_psn = e->lpsn + 1;
2204 hfi1_tid_write_alloc_resources(qp, true);
2207 qp->r_state = e->opcode;
2208 qp->r_nak_state = 0;
2209 qp->s_flags |= RVT_S_RESP_PENDING;
2210 hfi1_schedule_send(qp);
2212 spin_unlock_irqrestore(&qp->s_lock, flags);
2217 void hfi1_rc_rcv_tid_rdma_read_req(struct hfi1_packet *packet)
2219 /* HANDLER FOR TID RDMA READ REQUEST packet (Responder side)*/
2222 * 1. Verify TID RDMA READ REQ as per IB_OPCODE_RC_RDMA_READ
2223 * (see hfi1_rc_rcv())
2224 * 2. Put TID RDMA READ REQ into the response queueu (s_ack_queue)
2225 * - Setup struct tid_rdma_req with request info
2226 * - Initialize struct tid_rdma_flow info;
2227 * - Copy TID entries;
2228 * 3. Set the qp->s_ack_state.
2229 * 4. Set RVT_S_RESP_PENDING in s_flags.
2230 * 5. Kick the send engine (hfi1_schedule_send())
2232 struct hfi1_ctxtdata *rcd = packet->rcd;
2233 struct rvt_qp *qp = packet->qp;
2234 struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
2235 struct ib_other_headers *ohdr = packet->ohdr;
2236 struct rvt_ack_entry *e;
2237 unsigned long flags;
2238 struct ib_reth *reth;
2239 struct hfi1_qp_priv *qpriv = qp->priv;
2240 u32 bth0, psn, len, rkey;
2245 u8 nack_state = IB_NAK_INVALID_REQUEST;
2247 bth0 = be32_to_cpu(ohdr->bth[0]);
2248 if (hfi1_ruc_check_hdr(ibp, packet))
2251 fecn = process_ecn(qp, packet);
2252 psn = mask_psn(be32_to_cpu(ohdr->bth[2]));
2253 trace_hfi1_rsp_rcv_tid_read_req(qp, psn);
2255 if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST))
2258 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
2261 reth = &ohdr->u.tid_rdma.r_req.reth;
2262 vaddr = be64_to_cpu(reth->vaddr);
2263 len = be32_to_cpu(reth->length);
2264 /* The length needs to be in multiples of PAGE_SIZE */
2265 if (!len || len & ~PAGE_MASK || len > qpriv->tid_rdma.local.max_len)
2268 diff = delta_psn(psn, qp->r_psn);
2269 if (unlikely(diff)) {
2270 tid_rdma_rcv_err(packet, ohdr, qp, psn, diff, fecn);
2274 /* We've verified the request, insert it into the ack queue. */
2275 next = qp->r_head_ack_queue + 1;
2276 if (next > rvt_size_atomic(ib_to_rvt(qp->ibqp.device)))
2278 spin_lock_irqsave(&qp->s_lock, flags);
2279 if (unlikely(next == qp->s_tail_ack_queue)) {
2280 if (!qp->s_ack_queue[next].sent) {
2281 nack_state = IB_NAK_REMOTE_OPERATIONAL_ERROR;
2282 goto nack_inv_unlock;
2284 update_ack_queue(qp, next);
2286 e = &qp->s_ack_queue[qp->r_head_ack_queue];
2287 release_rdma_sge_mr(e);
2289 rkey = be32_to_cpu(reth->rkey);
2292 if (unlikely(!rvt_rkey_ok(qp, &e->rdma_sge, qp->r_len, vaddr,
2293 rkey, IB_ACCESS_REMOTE_READ)))
2296 /* Accept the request parameters */
2297 if (tid_rdma_rcv_read_request(qp, e, packet, ohdr, bth0, psn, vaddr,
2299 goto nack_inv_unlock;
2301 qp->r_state = e->opcode;
2302 qp->r_nak_state = 0;
2304 * We need to increment the MSN here instead of when we
2305 * finish sending the result since a duplicate request would
2306 * increment it more than once.
2309 qp->r_psn += e->lpsn - e->psn + 1;
2311 qp->r_head_ack_queue = next;
2314 * For all requests other than TID WRITE which are added to the ack
2315 * queue, qpriv->r_tid_alloc follows qp->r_head_ack_queue. It is ok to
2316 * do this because of interlocks between these and TID WRITE
2317 * requests. The same change has also been made in hfi1_rc_rcv().
2319 qpriv->r_tid_alloc = qp->r_head_ack_queue;
2321 /* Schedule the send tasklet. */
2322 qp->s_flags |= RVT_S_RESP_PENDING;
2324 qp->s_flags |= RVT_S_ECN;
2325 hfi1_schedule_send(qp);
2327 spin_unlock_irqrestore(&qp->s_lock, flags);
2331 spin_unlock_irqrestore(&qp->s_lock, flags);
2333 rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
2334 qp->r_nak_state = nack_state;
2335 qp->r_ack_psn = qp->r_psn;
2336 /* Queue NAK for later */
2337 rc_defered_ack(rcd, qp);
2340 spin_unlock_irqrestore(&qp->s_lock, flags);
2341 rvt_rc_error(qp, IB_WC_LOC_PROT_ERR);
2342 qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
2343 qp->r_ack_psn = qp->r_psn;
2346 u32 hfi1_build_tid_rdma_read_resp(struct rvt_qp *qp, struct rvt_ack_entry *e,
2347 struct ib_other_headers *ohdr, u32 *bth0,
2348 u32 *bth1, u32 *bth2, u32 *len, bool *last)
2350 struct hfi1_ack_priv *epriv = e->priv;
2351 struct tid_rdma_request *req = &epriv->tid_req;
2352 struct hfi1_qp_priv *qpriv = qp->priv;
2353 struct tid_rdma_flow *flow = &req->flows[req->clear_tail];
2354 u32 tidentry = flow->tid_entry[flow->tid_idx];
2355 u32 tidlen = EXP_TID_GET(tidentry, LEN) << PAGE_SHIFT;
2356 struct tid_rdma_read_resp *resp = &ohdr->u.tid_rdma.r_rsp;
2357 u32 next_offset, om = KDETH_OM_LARGE;
2360 struct tid_rdma_params *remote;
2362 *len = min_t(u32, qp->pmtu, tidlen - flow->tid_offset);
2364 next_offset = flow->tid_offset + *len;
2365 last_pkt = (flow->sent >= flow->length);
2367 trace_hfi1_tid_entry_build_read_resp(qp, flow->tid_idx, tidentry);
2368 trace_hfi1_tid_flow_build_read_resp(qp, req->clear_tail, flow);
2371 remote = rcu_dereference(qpriv->tid_rdma.remote);
2376 KDETH_RESET(resp->kdeth0, KVER, 0x1);
2377 KDETH_SET(resp->kdeth0, SH, !last_pkt);
2378 KDETH_SET(resp->kdeth0, INTR, !!(!last_pkt && remote->urg));
2379 KDETH_SET(resp->kdeth0, TIDCTRL, EXP_TID_GET(tidentry, CTRL));
2380 KDETH_SET(resp->kdeth0, TID, EXP_TID_GET(tidentry, IDX));
2381 KDETH_SET(resp->kdeth0, OM, om == KDETH_OM_LARGE);
2382 KDETH_SET(resp->kdeth0, OFFSET, flow->tid_offset / om);
2383 KDETH_RESET(resp->kdeth1, JKEY, remote->jkey);
2384 resp->verbs_qp = cpu_to_be32(qp->remote_qpn);
2387 resp->aeth = rvt_compute_aeth(qp);
2388 resp->verbs_psn = cpu_to_be32(mask_psn(flow->flow_state.ib_spsn +
2391 *bth0 = TID_OP(READ_RESP) << 24;
2392 *bth1 = flow->tid_qpn;
2393 *bth2 = mask_psn(((flow->flow_state.spsn + flow->pkt++) &
2394 HFI1_KDETH_BTH_SEQ_MASK) |
2395 (flow->flow_state.generation <<
2396 HFI1_KDETH_BTH_SEQ_SHIFT));
2399 /* Advance to next flow */
2400 req->clear_tail = (req->clear_tail + 1) &
2403 if (next_offset >= tidlen) {
2404 flow->tid_offset = 0;
2407 flow->tid_offset = next_offset;
2410 hdwords = sizeof(ohdr->u.tid_rdma.r_rsp) / sizeof(u32);
2416 static inline struct tid_rdma_request *
2417 find_tid_request(struct rvt_qp *qp, u32 psn, enum ib_wr_opcode opcode)
2418 __must_hold(&qp->s_lock)
2420 struct rvt_swqe *wqe;
2421 struct tid_rdma_request *req = NULL;
2424 end = qp->s_cur + 1;
2425 if (end == qp->s_size)
2427 for (i = qp->s_acked; i != end;) {
2428 wqe = rvt_get_swqe_ptr(qp, i);
2429 if (cmp_psn(psn, wqe->psn) >= 0 &&
2430 cmp_psn(psn, wqe->lpsn) <= 0) {
2431 if (wqe->wr.opcode == opcode)
2432 req = wqe_to_tid_req(wqe);
2435 if (++i == qp->s_size)
2442 void hfi1_rc_rcv_tid_rdma_read_resp(struct hfi1_packet *packet)
2444 /* HANDLER FOR TID RDMA READ RESPONSE packet (Requestor side */
2447 * 1. Find matching SWQE
2448 * 2. Check that the entire segment has been read.
2449 * 3. Remove HFI1_S_WAIT_TID_RESP from s_flags.
2450 * 4. Free the TID flow resources.
2451 * 5. Kick the send engine (hfi1_schedule_send())
2453 struct ib_other_headers *ohdr = packet->ohdr;
2454 struct rvt_qp *qp = packet->qp;
2455 struct hfi1_qp_priv *priv = qp->priv;
2456 struct hfi1_ctxtdata *rcd = packet->rcd;
2457 struct tid_rdma_request *req;
2458 struct tid_rdma_flow *flow;
2461 unsigned long flags;
2464 trace_hfi1_sender_rcv_tid_read_resp(qp);
2465 fecn = process_ecn(qp, packet);
2466 kpsn = mask_psn(be32_to_cpu(ohdr->bth[2]));
2467 aeth = be32_to_cpu(ohdr->u.tid_rdma.r_rsp.aeth);
2468 opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0xff;
2470 spin_lock_irqsave(&qp->s_lock, flags);
2471 ipsn = mask_psn(be32_to_cpu(ohdr->u.tid_rdma.r_rsp.verbs_psn));
2472 req = find_tid_request(qp, ipsn, IB_WR_TID_RDMA_READ);
2476 flow = &req->flows[req->clear_tail];
2477 /* When header suppression is disabled */
2478 if (cmp_psn(ipsn, flow->flow_state.ib_lpsn)) {
2479 update_r_next_psn_fecn(packet, priv, rcd, flow, fecn);
2481 if (cmp_psn(kpsn, flow->flow_state.r_next_psn))
2483 flow->flow_state.r_next_psn = mask_psn(kpsn + 1);
2485 * Copy the payload to destination buffer if this packet is
2486 * delivered as an eager packet due to RSM rule and FECN.
2487 * The RSM rule selects FECN bit in BTH and SH bit in
2488 * KDETH header and therefore will not match the last
2489 * packet of each segment that has SH bit cleared.
2491 if (fecn && packet->etype == RHF_RCV_TYPE_EAGER) {
2492 struct rvt_sge_state ss;
2494 u32 tlen = packet->tlen;
2495 u16 hdrsize = packet->hlen;
2496 u8 pad = packet->pad;
2497 u8 extra_bytes = pad + packet->extra_byte +
2499 u32 pmtu = qp->pmtu;
2501 if (unlikely(tlen != (hdrsize + pmtu + extra_bytes)))
2503 len = restart_sge(&ss, req->e.swqe, ipsn, pmtu);
2504 if (unlikely(len < pmtu))
2506 rvt_copy_sge(qp, &ss, packet->payload, pmtu, false,
2508 /* Raise the sw sequence check flag for next packet */
2509 priv->s_flags |= HFI1_R_TID_SW_PSN;
2514 flow->flow_state.r_next_psn = mask_psn(kpsn + 1);
2516 priv->pending_tid_r_segs--;
2517 qp->s_num_rd_atomic--;
2518 if ((qp->s_flags & RVT_S_WAIT_FENCE) &&
2519 !qp->s_num_rd_atomic) {
2520 qp->s_flags &= ~(RVT_S_WAIT_FENCE |
2522 hfi1_schedule_send(qp);
2524 if (qp->s_flags & RVT_S_WAIT_RDMAR) {
2525 qp->s_flags &= ~(RVT_S_WAIT_RDMAR | RVT_S_WAIT_ACK);
2526 hfi1_schedule_send(qp);
2529 trace_hfi1_ack(qp, ipsn);
2530 trace_hfi1_tid_req_rcv_read_resp(qp, 0, req->e.swqe->wr.opcode,
2531 req->e.swqe->psn, req->e.swqe->lpsn,
2533 trace_hfi1_tid_flow_rcv_read_resp(qp, req->clear_tail, flow);
2535 /* Release the tid resources */
2536 hfi1_kern_exp_rcv_clear(req);
2538 if (!do_rc_ack(qp, aeth, ipsn, opcode, 0, rcd))
2541 /* If not done yet, build next read request */
2542 if (++req->comp_seg >= req->total_segs) {
2544 req->state = TID_REQUEST_COMPLETE;
2548 * Clear the hw flow under two conditions:
2549 * 1. This request is a sync point and it is complete;
2550 * 2. Current request is completed and there are no more requests.
2552 if ((req->state == TID_REQUEST_SYNC &&
2553 req->comp_seg == req->cur_seg) ||
2554 priv->tid_r_comp == priv->tid_r_reqs) {
2555 hfi1_kern_clear_hw_flow(priv->rcd, qp);
2556 priv->s_flags &= ~HFI1_R_TID_SW_PSN;
2557 if (req->state == TID_REQUEST_SYNC)
2558 req->state = TID_REQUEST_ACTIVE;
2561 hfi1_schedule_send(qp);
2566 * The test indicates that the send engine has finished its cleanup
2567 * after sending the request and it's now safe to put the QP into error
2568 * state. However, if the wqe queue is empty (qp->s_acked == qp->s_tail
2569 * == qp->s_head), it would be unsafe to complete the wqe pointed by
2570 * qp->s_acked here. Putting the qp into error state will safely flush
2571 * all remaining requests.
2573 if (qp->s_last == qp->s_acked)
2574 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
2577 spin_unlock_irqrestore(&qp->s_lock, flags);
2580 void hfi1_kern_read_tid_flow_free(struct rvt_qp *qp)
2581 __must_hold(&qp->s_lock)
2583 u32 n = qp->s_acked;
2584 struct rvt_swqe *wqe;
2585 struct tid_rdma_request *req;
2586 struct hfi1_qp_priv *priv = qp->priv;
2588 lockdep_assert_held(&qp->s_lock);
2589 /* Free any TID entries */
2590 while (n != qp->s_tail) {
2591 wqe = rvt_get_swqe_ptr(qp, n);
2592 if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) {
2593 req = wqe_to_tid_req(wqe);
2594 hfi1_kern_exp_rcv_clear_all(req);
2597 if (++n == qp->s_size)
2601 hfi1_kern_clear_hw_flow(priv->rcd, qp);
2604 static bool tid_rdma_tid_err(struct hfi1_ctxtdata *rcd,
2605 struct hfi1_packet *packet, u8 rcv_type,
2608 struct rvt_qp *qp = packet->qp;
2609 struct hfi1_qp_priv *qpriv = qp->priv;
2611 struct ib_other_headers *ohdr = packet->ohdr;
2612 struct rvt_ack_entry *e;
2613 struct tid_rdma_request *req;
2614 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2617 if (rcv_type >= RHF_RCV_TYPE_IB)
2620 spin_lock(&qp->s_lock);
2623 * We've ran out of space in the eager buffer.
2624 * Eagerly received KDETH packets which require space in the
2625 * Eager buffer (packet that have payload) are TID RDMA WRITE
2626 * response packets. In this case, we have to re-transmit the
2627 * TID RDMA WRITE request.
2629 if (rcv_type == RHF_RCV_TYPE_EAGER) {
2630 hfi1_restart_rc(qp, qp->s_last_psn + 1, 1);
2631 hfi1_schedule_send(qp);
2636 * For TID READ response, error out QP after freeing the tid
2639 if (opcode == TID_OP(READ_RESP)) {
2640 ipsn = mask_psn(be32_to_cpu(ohdr->u.tid_rdma.r_rsp.verbs_psn));
2641 if (cmp_psn(ipsn, qp->s_last_psn) > 0 &&
2642 cmp_psn(ipsn, qp->s_psn) < 0) {
2643 hfi1_kern_read_tid_flow_free(qp);
2644 spin_unlock(&qp->s_lock);
2645 rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
2652 * Error out the qp for TID RDMA WRITE
2654 hfi1_kern_clear_hw_flow(qpriv->rcd, qp);
2655 for (i = 0; i < rvt_max_atomic(rdi); i++) {
2656 e = &qp->s_ack_queue[i];
2657 if (e->opcode == TID_OP(WRITE_REQ)) {
2658 req = ack_to_tid_req(e);
2659 hfi1_kern_exp_rcv_clear_all(req);
2662 spin_unlock(&qp->s_lock);
2663 rvt_rc_error(qp, IB_WC_LOC_LEN_ERR);
2667 spin_unlock(&qp->s_lock);
2672 static void restart_tid_rdma_read_req(struct hfi1_ctxtdata *rcd,
2673 struct rvt_qp *qp, struct rvt_swqe *wqe)
2675 struct tid_rdma_request *req;
2676 struct tid_rdma_flow *flow;
2678 /* Start from the right segment */
2679 qp->r_flags |= RVT_R_RDMAR_SEQ;
2680 req = wqe_to_tid_req(wqe);
2681 flow = &req->flows[req->clear_tail];
2682 hfi1_restart_rc(qp, flow->flow_state.ib_spsn, 0);
2683 if (list_empty(&qp->rspwait)) {
2684 qp->r_flags |= RVT_R_RSP_SEND;
2686 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
2691 * Handle the KDETH eflags for TID RDMA READ response.
2693 * Return true if the last packet for a segment has been received and it is
2694 * time to process the response normally; otherwise, return true.
2696 * The caller must hold the packet->qp->r_lock and the rcu_read_lock.
2698 static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd,
2699 struct hfi1_packet *packet, u8 rcv_type,
2700 u8 rte, u32 psn, u32 ibpsn)
2701 __must_hold(&packet->qp->r_lock) __must_hold(RCU)
2703 struct hfi1_pportdata *ppd = rcd->ppd;
2704 struct hfi1_devdata *dd = ppd->dd;
2705 struct hfi1_ibport *ibp;
2706 struct rvt_swqe *wqe;
2707 struct tid_rdma_request *req;
2708 struct tid_rdma_flow *flow;
2710 struct rvt_qp *qp = packet->qp;
2711 struct hfi1_qp_priv *priv = qp->priv;
2716 lockdep_assert_held(&qp->r_lock);
2717 /* If the psn is out of valid range, drop the packet */
2718 if (cmp_psn(ibpsn, qp->s_last_psn) < 0 ||
2719 cmp_psn(ibpsn, qp->s_psn) > 0)
2722 spin_lock(&qp->s_lock);
2724 * Note that NAKs implicitly ACK outstanding SEND and RDMA write
2725 * requests and implicitly NAK RDMA read and atomic requests issued
2726 * before the NAK'ed request.
2728 ack_psn = ibpsn - 1;
2729 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
2730 ibp = to_iport(qp->ibqp.device, qp->port_num);
2732 /* Complete WQEs that the PSN finishes. */
2733 while ((int)delta_psn(ack_psn, wqe->lpsn) >= 0) {
2735 * If this request is a RDMA read or atomic, and the NACK is
2736 * for a later operation, this NACK NAKs the RDMA read or
2739 if (wqe->wr.opcode == IB_WR_RDMA_READ ||
2740 wqe->wr.opcode == IB_WR_TID_RDMA_READ ||
2741 wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
2742 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
2743 /* Retry this request. */
2744 if (!(qp->r_flags & RVT_R_RDMAR_SEQ)) {
2745 qp->r_flags |= RVT_R_RDMAR_SEQ;
2746 if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) {
2747 restart_tid_rdma_read_req(rcd, qp,
2750 hfi1_restart_rc(qp, qp->s_last_psn + 1,
2752 if (list_empty(&qp->rspwait)) {
2753 qp->r_flags |= RVT_R_RSP_SEND;
2755 list_add_tail(/* wait */
2757 &rcd->qp_wait_list);
2762 * No need to process the NAK since we are
2763 * restarting an earlier request.
2768 wqe = do_rc_completion(qp, wqe, ibp);
2769 if (qp->s_acked == qp->s_tail)
2773 /* Handle the eflags for the request */
2774 if (wqe->wr.opcode != IB_WR_TID_RDMA_READ)
2777 req = wqe_to_tid_req(wqe);
2779 case RHF_RCV_TYPE_EXPECTED:
2781 case RHF_RTE_EXPECTED_FLOW_SEQ_ERR:
2783 * On the first occurrence of a Flow Sequence error,
2784 * the flag TID_FLOW_SW_PSN is set.
2786 * After that, the flow is *not* reprogrammed and the
2787 * protocol falls back to SW PSN checking. This is done
2788 * to prevent continuous Flow Sequence errors for any
2789 * packets that could be still in the fabric.
2791 flow = find_flow(req, psn, NULL);
2794 * We can't find the IB PSN matching the
2795 * received KDETH PSN. The only thing we can
2796 * do at this point is report the error to
2799 hfi1_kern_read_tid_flow_free(qp);
2800 spin_unlock(&qp->s_lock);
2801 rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
2804 if (priv->s_flags & HFI1_R_TID_SW_PSN) {
2806 flow->flow_state.r_next_psn);
2808 if (!(qp->r_flags & RVT_R_RDMAR_SEQ))
2809 restart_tid_rdma_read_req(rcd,
2813 /* Drop the packet.*/
2815 } else if (diff < 0) {
2817 * If a response packet for a restarted
2818 * request has come back, reset the
2821 if (qp->r_flags & RVT_R_RDMAR_SEQ)
2825 /* Drop the packet.*/
2830 * If SW PSN verification is successful and
2831 * this is the last packet in the segment, tell
2832 * the caller to process it as a normal packet.
2834 fpsn = full_flow_psn(flow,
2835 flow->flow_state.lpsn);
2836 if (cmp_psn(fpsn, psn) == 0) {
2838 if (qp->r_flags & RVT_R_RDMAR_SEQ)
2842 flow->flow_state.r_next_psn =
2847 last_psn = read_r_next_psn(dd, rcd->ctxt,
2849 flow->flow_state.r_next_psn = last_psn;
2850 priv->s_flags |= HFI1_R_TID_SW_PSN;
2852 * If no request has been restarted yet,
2853 * restart the current one.
2855 if (!(qp->r_flags & RVT_R_RDMAR_SEQ))
2856 restart_tid_rdma_read_req(rcd, qp,
2862 case RHF_RTE_EXPECTED_FLOW_GEN_ERR:
2864 * Since the TID flow is able to ride through
2865 * generation mismatch, drop this stale packet.
2874 case RHF_RCV_TYPE_ERROR:
2876 case RHF_RTE_ERROR_OP_CODE_ERR:
2877 case RHF_RTE_ERROR_KHDR_MIN_LEN_ERR:
2878 case RHF_RTE_ERROR_KHDR_HCRC_ERR:
2879 case RHF_RTE_ERROR_KHDR_KVER_ERR:
2880 case RHF_RTE_ERROR_CONTEXT_ERR:
2881 case RHF_RTE_ERROR_KHDR_TID_ERR:
2889 spin_unlock(&qp->s_lock);
2893 bool hfi1_handle_kdeth_eflags(struct hfi1_ctxtdata *rcd,
2894 struct hfi1_pportdata *ppd,
2895 struct hfi1_packet *packet)
2897 struct hfi1_ibport *ibp = &ppd->ibport_data;
2898 struct hfi1_devdata *dd = ppd->dd;
2899 struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
2900 u8 rcv_type = rhf_rcv_type(packet->rhf);
2901 u8 rte = rhf_rcv_type_err(packet->rhf);
2902 struct ib_header *hdr = packet->hdr;
2903 struct ib_other_headers *ohdr = NULL;
2904 int lnh = be16_to_cpu(hdr->lrh[0]) & 3;
2905 u16 lid = be16_to_cpu(hdr->lrh[1]);
2907 u32 qp_num, psn, ibpsn;
2909 struct hfi1_qp_priv *qpriv;
2910 unsigned long flags;
2912 struct rvt_ack_entry *e;
2913 struct tid_rdma_request *req;
2914 struct tid_rdma_flow *flow;
2917 trace_hfi1_msg_handle_kdeth_eflags(NULL, "Kdeth error: rhf ",
2919 if (packet->rhf & RHF_ICRC_ERR)
2922 packet->ohdr = &hdr->u.oth;
2923 ohdr = packet->ohdr;
2924 trace_input_ibhdr(rcd->dd, packet, !!(rhf_dc_info(packet->rhf)));
2926 /* Get the destination QP number. */
2927 qp_num = be32_to_cpu(ohdr->u.tid_rdma.r_rsp.verbs_qp) &
2929 if (lid >= be16_to_cpu(IB_MULTICAST_LID_BASE))
2932 psn = mask_psn(be32_to_cpu(ohdr->bth[2]));
2933 opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0xff;
2936 qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num);
2942 /* Check for valid receive state. */
2943 spin_lock_irqsave(&qp->r_lock, flags);
2944 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
2945 ibp->rvp.n_pkt_drops++;
2949 if (packet->rhf & RHF_TID_ERR) {
2950 /* For TIDERR and RC QPs preemptively schedule a NAK */
2951 u32 tlen = rhf_pkt_len(packet->rhf); /* in bytes */
2953 /* Sanity check packet */
2958 * Check for GRH. We should never get packets with GRH in this
2961 if (lnh == HFI1_LRH_GRH)
2964 if (tid_rdma_tid_err(rcd, packet, rcv_type, opcode))
2968 /* handle TID RDMA READ */
2969 if (opcode == TID_OP(READ_RESP)) {
2970 ibpsn = be32_to_cpu(ohdr->u.tid_rdma.r_rsp.verbs_psn);
2971 ibpsn = mask_psn(ibpsn);
2972 ret = handle_read_kdeth_eflags(rcd, packet, rcv_type, rte, psn,
2978 * qp->s_tail_ack_queue points to the rvt_ack_entry currently being
2979 * processed. These a completed sequentially so we can be sure that
2980 * the pointer will not change until the entire request has completed.
2982 spin_lock(&qp->s_lock);
2984 e = &qp->s_ack_queue[qpriv->r_tid_tail];
2985 req = ack_to_tid_req(e);
2986 flow = &req->flows[req->clear_tail];
2987 trace_hfi1_eflags_err_write(qp, rcv_type, rte, psn);
2988 trace_hfi1_rsp_handle_kdeth_eflags(qp, psn);
2989 trace_hfi1_tid_write_rsp_handle_kdeth_eflags(qp);
2990 trace_hfi1_tid_req_handle_kdeth_eflags(qp, 0, e->opcode, e->psn,
2992 trace_hfi1_tid_flow_handle_kdeth_eflags(qp, req->clear_tail, flow);
2995 case RHF_RCV_TYPE_EXPECTED:
2997 case RHF_RTE_EXPECTED_FLOW_SEQ_ERR:
2998 if (!(qpriv->s_flags & HFI1_R_TID_SW_PSN)) {
2999 qpriv->s_flags |= HFI1_R_TID_SW_PSN;
3000 flow->flow_state.r_next_psn =
3001 read_r_next_psn(dd, rcd->ctxt,
3003 qpriv->r_next_psn_kdeth =
3004 flow->flow_state.r_next_psn;
3008 * If the received PSN does not match the next
3009 * expected PSN, NAK the packet.
3010 * However, only do that if we know that the a
3011 * NAK has already been sent. Otherwise, this
3012 * mismatch could be due to packets that were
3013 * already in flight.
3016 flow->flow_state.r_next_psn);
3022 qpriv->s_nak_state = 0;
3024 * If SW PSN verification is successful and this
3025 * is the last packet in the segment, tell the
3026 * caller to process it as a normal packet.
3028 if (psn == full_flow_psn(flow,
3029 flow->flow_state.lpsn))
3031 flow->flow_state.r_next_psn =
3033 qpriv->r_next_psn_kdeth =
3034 flow->flow_state.r_next_psn;
3038 case RHF_RTE_EXPECTED_FLOW_GEN_ERR:
3046 case RHF_RCV_TYPE_ERROR:
3048 case RHF_RTE_ERROR_OP_CODE_ERR:
3049 case RHF_RTE_ERROR_KHDR_MIN_LEN_ERR:
3050 case RHF_RTE_ERROR_KHDR_HCRC_ERR:
3051 case RHF_RTE_ERROR_KHDR_KVER_ERR:
3052 case RHF_RTE_ERROR_CONTEXT_ERR:
3053 case RHF_RTE_ERROR_KHDR_TID_ERR:
3062 spin_unlock(&qp->s_lock);
3064 spin_unlock_irqrestore(&qp->r_lock, flags);
3070 ibp->rvp.n_rc_seqnak++;
3071 if (!qpriv->s_nak_state) {
3072 qpriv->s_nak_state = IB_NAK_PSN_ERROR;
3073 /* We are NAK'ing the next expected PSN */
3074 qpriv->s_nak_psn = mask_psn(flow->flow_state.r_next_psn);
3075 qpriv->s_flags |= RVT_S_ACK_PENDING;
3076 if (qpriv->r_tid_ack == HFI1_QP_WQE_INVALID)
3077 qpriv->r_tid_ack = qpriv->r_tid_tail;
3078 hfi1_schedule_tid_send(qp);
3084 * "Rewind" the TID request information.
3085 * This means that we reset the state back to ACTIVE,
3086 * find the proper flow, set the flow index to that flow,
3087 * and reset the flow information.
3089 void hfi1_tid_rdma_restart_req(struct rvt_qp *qp, struct rvt_swqe *wqe,
3092 struct tid_rdma_request *req = wqe_to_tid_req(wqe);
3093 struct tid_rdma_flow *flow;
3094 struct hfi1_qp_priv *qpriv = qp->priv;
3095 int diff, delta_pkts;
3099 if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) {
3100 *bth2 = mask_psn(qp->s_psn);
3101 flow = find_flow_ib(req, *bth2, &fidx);
3103 trace_hfi1_msg_tid_restart_req(/* msg */
3104 qp, "!!!!!! Could not find flow to restart: bth2 ",
3106 trace_hfi1_tid_req_restart_req(qp, 0, wqe->wr.opcode,
3107 wqe->psn, wqe->lpsn,
3112 fidx = req->acked_tail;
3113 flow = &req->flows[fidx];
3114 *bth2 = mask_psn(req->r_ack_psn);
3117 if (wqe->wr.opcode == IB_WR_TID_RDMA_READ)
3118 delta_pkts = delta_psn(*bth2, flow->flow_state.ib_spsn);
3120 delta_pkts = delta_psn(*bth2,
3122 flow->flow_state.spsn));
3124 trace_hfi1_tid_flow_restart_req(qp, fidx, flow);
3125 diff = delta_pkts + flow->resync_npkts;
3130 flow->tid_offset = 0;
3132 for (tididx = 0; tididx < flow->tidcnt; tididx++) {
3133 u32 tidentry = flow->tid_entry[tididx], tidlen,
3136 flow->tid_offset = 0;
3137 tidlen = EXP_TID_GET(tidentry, LEN) * PAGE_SIZE;
3138 tidnpkts = rvt_div_round_up_mtu(qp, tidlen);
3139 npkts = min_t(u32, diff, tidnpkts);
3141 flow->sent += (npkts == tidnpkts ? tidlen :
3143 flow->tid_offset += npkts * qp->pmtu;
3149 if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE) {
3150 rvt_skip_sge(&qpriv->tid_ss, (req->cur_seg * req->seg_len) +
3153 * Packet PSN is based on flow_state.spsn + flow->pkt. However,
3154 * during a RESYNC, the generation is incremented and the
3155 * sequence is reset to 0. Since we've adjusted the npkts in the
3156 * flow and the SGE has been sufficiently advanced, we have to
3157 * adjust flow->pkt in order to calculate the correct PSN.
3159 flow->pkt -= flow->resync_npkts;
3162 if (flow->tid_offset ==
3163 EXP_TID_GET(flow->tid_entry[tididx], LEN) * PAGE_SIZE) {
3165 flow->tid_offset = 0;
3167 flow->tid_idx = tididx;
3168 if (wqe->wr.opcode == IB_WR_TID_RDMA_READ)
3169 /* Move flow_idx to correct index */
3170 req->flow_idx = fidx;
3172 req->clear_tail = fidx;
3174 trace_hfi1_tid_flow_restart_req(qp, fidx, flow);
3175 trace_hfi1_tid_req_restart_req(qp, 0, wqe->wr.opcode, wqe->psn,
3177 req->state = TID_REQUEST_ACTIVE;
3178 if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE) {
3179 /* Reset all the flows that we are going to resend */
3180 fidx = CIRC_NEXT(fidx, MAX_FLOWS);
3181 i = qpriv->s_tid_tail;
3183 for (; CIRC_CNT(req->setup_head, fidx, MAX_FLOWS);
3184 fidx = CIRC_NEXT(fidx, MAX_FLOWS)) {
3185 req->flows[fidx].sent = 0;
3186 req->flows[fidx].pkt = 0;
3187 req->flows[fidx].tid_idx = 0;
3188 req->flows[fidx].tid_offset = 0;
3189 req->flows[fidx].resync_npkts = 0;
3191 if (i == qpriv->s_tid_cur)
3194 i = (++i == qp->s_size ? 0 : i);
3195 wqe = rvt_get_swqe_ptr(qp, i);
3196 } while (wqe->wr.opcode != IB_WR_TID_RDMA_WRITE);
3197 req = wqe_to_tid_req(wqe);
3198 req->cur_seg = req->ack_seg;
3199 fidx = req->acked_tail;
3200 /* Pull req->clear_tail back */
3201 req->clear_tail = fidx;
3206 void hfi1_qp_kern_exp_rcv_clear_all(struct rvt_qp *qp)
3209 struct hfi1_qp_priv *qpriv = qp->priv;
3210 struct tid_flow_state *fs;
3212 if (qp->ibqp.qp_type != IB_QPT_RC || !HFI1_CAP_IS_KSET(TID_RDMA))
3216 * First, clear the flow to help prevent any delayed packets from
3219 fs = &qpriv->flow_state;
3220 if (fs->index != RXE_NUM_TID_FLOWS)
3221 hfi1_kern_clear_hw_flow(qpriv->rcd, qp);
3223 for (i = qp->s_acked; i != qp->s_head;) {
3224 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, i);
3226 if (++i == qp->s_size)
3228 /* Free only locally allocated TID entries */
3229 if (wqe->wr.opcode != IB_WR_TID_RDMA_READ)
3232 struct hfi1_swqe_priv *priv = wqe->priv;
3234 ret = hfi1_kern_exp_rcv_clear(&priv->tid_req);
3237 for (i = qp->s_acked_ack_queue; i != qp->r_head_ack_queue;) {
3238 struct rvt_ack_entry *e = &qp->s_ack_queue[i];
3240 if (++i == rvt_max_atomic(ib_to_rvt(qp->ibqp.device)))
3242 /* Free only locally allocated TID entries */
3243 if (e->opcode != TID_OP(WRITE_REQ))
3246 struct hfi1_ack_priv *priv = e->priv;
3248 ret = hfi1_kern_exp_rcv_clear(&priv->tid_req);
3253 bool hfi1_tid_rdma_wqe_interlock(struct rvt_qp *qp, struct rvt_swqe *wqe)
3255 struct rvt_swqe *prev;
3256 struct hfi1_qp_priv *priv = qp->priv;
3258 struct tid_rdma_request *req;
3260 s_prev = (qp->s_cur == 0 ? qp->s_size : qp->s_cur) - 1;
3261 prev = rvt_get_swqe_ptr(qp, s_prev);
3263 switch (wqe->wr.opcode) {
3265 case IB_WR_SEND_WITH_IMM:
3266 case IB_WR_SEND_WITH_INV:
3267 case IB_WR_ATOMIC_CMP_AND_SWP:
3268 case IB_WR_ATOMIC_FETCH_AND_ADD:
3269 case IB_WR_RDMA_WRITE:
3270 switch (prev->wr.opcode) {
3271 case IB_WR_TID_RDMA_WRITE:
3272 req = wqe_to_tid_req(prev);
3273 if (req->ack_seg != req->total_segs)
3279 case IB_WR_RDMA_READ:
3280 if (prev->wr.opcode != IB_WR_TID_RDMA_WRITE)
3283 case IB_WR_TID_RDMA_READ:
3284 switch (prev->wr.opcode) {
3285 case IB_WR_RDMA_READ:
3286 if (qp->s_acked != qp->s_cur)
3289 case IB_WR_TID_RDMA_WRITE:
3290 req = wqe_to_tid_req(prev);
3291 if (req->ack_seg != req->total_segs)
3302 priv->s_flags |= HFI1_S_TID_WAIT_INTERLCK;
3306 /* Does @sge meet the alignment requirements for tid rdma? */
3307 static inline bool hfi1_check_sge_align(struct rvt_qp *qp,
3308 struct rvt_sge *sge, int num_sge)
3312 for (i = 0; i < num_sge; i++, sge++) {
3313 trace_hfi1_sge_check_align(qp, i, sge);
3314 if ((u64)sge->vaddr & ~PAGE_MASK ||
3315 sge->sge_length & ~PAGE_MASK)
3321 void setup_tid_rdma_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe)
3323 struct hfi1_qp_priv *qpriv = (struct hfi1_qp_priv *)qp->priv;
3324 struct hfi1_swqe_priv *priv = wqe->priv;
3325 struct tid_rdma_params *remote;
3326 enum ib_wr_opcode new_opcode;
3327 bool do_tid_rdma = false;
3328 struct hfi1_pportdata *ppd = qpriv->rcd->ppd;
3330 if ((rdma_ah_get_dlid(&qp->remote_ah_attr) & ~((1 << ppd->lmc) - 1)) ==
3333 if (qpriv->hdr_type != HFI1_PKT_TYPE_9B)
3337 remote = rcu_dereference(qpriv->tid_rdma.remote);
3339 * If TID RDMA is disabled by the negotiation, don't
3345 if (wqe->wr.opcode == IB_WR_RDMA_READ) {
3346 if (hfi1_check_sge_align(qp, &wqe->sg_list[0],
3348 new_opcode = IB_WR_TID_RDMA_READ;
3351 } else if (wqe->wr.opcode == IB_WR_RDMA_WRITE) {
3353 * TID RDMA is enabled for this RDMA WRITE request iff:
3354 * 1. The remote address is page-aligned,
3355 * 2. The length is larger than the minimum segment size,
3356 * 3. The length is page-multiple.
3358 if (!(wqe->rdma_wr.remote_addr & ~PAGE_MASK) &&
3359 !(wqe->length & ~PAGE_MASK)) {
3360 new_opcode = IB_WR_TID_RDMA_WRITE;
3366 if (hfi1_kern_exp_rcv_alloc_flows(&priv->tid_req, GFP_ATOMIC))
3368 wqe->wr.opcode = new_opcode;
3369 priv->tid_req.seg_len =
3370 min_t(u32, remote->max_len, wqe->length);
3371 priv->tid_req.total_segs =
3372 DIV_ROUND_UP(wqe->length, priv->tid_req.seg_len);
3373 /* Compute the last PSN of the request */
3374 wqe->lpsn = wqe->psn;
3375 if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) {
3376 priv->tid_req.n_flows = remote->max_read;
3377 qpriv->tid_r_reqs++;
3378 wqe->lpsn += rvt_div_round_up_mtu(qp, wqe->length) - 1;
3380 wqe->lpsn += priv->tid_req.total_segs - 1;
3381 atomic_inc(&qpriv->n_requests);
3384 priv->tid_req.cur_seg = 0;
3385 priv->tid_req.comp_seg = 0;
3386 priv->tid_req.ack_seg = 0;
3387 priv->tid_req.state = TID_REQUEST_INACTIVE;
3390 * TID RDMA READ does not have ACKs so it does not
3391 * update the pointer. We have to reset it so TID RDMA
3392 * WRITE does not get confused.
3394 priv->tid_req.acked_tail = priv->tid_req.setup_head;
3395 trace_hfi1_tid_req_setup_tid_wqe(qp, 1, wqe->wr.opcode,
3396 wqe->psn, wqe->lpsn,
3403 /* TID RDMA WRITE functions */
3405 u32 hfi1_build_tid_rdma_write_req(struct rvt_qp *qp, struct rvt_swqe *wqe,
3406 struct ib_other_headers *ohdr,
3407 u32 *bth1, u32 *bth2, u32 *len)
3409 struct hfi1_qp_priv *qpriv = qp->priv;
3410 struct tid_rdma_request *req = wqe_to_tid_req(wqe);
3411 struct tid_rdma_params *remote;
3414 remote = rcu_dereference(qpriv->tid_rdma.remote);
3416 * Set the number of flow to be used based on negotiated
3419 req->n_flows = remote->max_write;
3420 req->state = TID_REQUEST_ACTIVE;
3422 KDETH_RESET(ohdr->u.tid_rdma.w_req.kdeth0, KVER, 0x1);
3423 KDETH_RESET(ohdr->u.tid_rdma.w_req.kdeth1, JKEY, remote->jkey);
3424 ohdr->u.tid_rdma.w_req.reth.vaddr =
3425 cpu_to_be64(wqe->rdma_wr.remote_addr + (wqe->length - *len));
3426 ohdr->u.tid_rdma.w_req.reth.rkey =
3427 cpu_to_be32(wqe->rdma_wr.rkey);
3428 ohdr->u.tid_rdma.w_req.reth.length = cpu_to_be32(*len);
3429 ohdr->u.tid_rdma.w_req.verbs_qp = cpu_to_be32(qp->remote_qpn);
3430 *bth1 &= ~RVT_QPN_MASK;
3431 *bth1 |= remote->qp;
3432 qp->s_state = TID_OP(WRITE_REQ);
3433 qp->s_flags |= HFI1_S_WAIT_TID_RESP;
3434 *bth2 |= IB_BTH_REQ_ACK;
3438 return sizeof(ohdr->u.tid_rdma.w_req) / sizeof(u32);
3441 void hfi1_compute_tid_rdma_flow_wt(void)
3444 * Heuristic for computing the RNR timeout when waiting on the flow
3445 * queue. Rather than a computationaly expensive exact estimate of when
3446 * a flow will be available, we assume that if a QP is at position N in
3447 * the flow queue it has to wait approximately (N + 1) * (number of
3448 * segments between two sync points), assuming PMTU of 4K. The rationale
3449 * for this is that flows are released and recycled at each sync point.
3451 tid_rdma_flow_wt = MAX_TID_FLOW_PSN * enum_to_mtu(OPA_MTU_4096) /
3452 TID_RDMA_MAX_SEGMENT_SIZE;
3455 static u32 position_in_queue(struct hfi1_qp_priv *qpriv,
3456 struct tid_queue *queue)
3458 return qpriv->tid_enqueue - queue->dequeue;
3462 * @qp: points to rvt_qp context.
3463 * @to_seg: desired RNR timeout in segments.
3464 * Return: index of the next highest timeout in the ib_hfi1_rnr_table[]
3466 static u32 hfi1_compute_tid_rnr_timeout(struct rvt_qp *qp, u32 to_seg)
3468 struct hfi1_qp_priv *qpriv = qp->priv;
3473 bytes_per_us = active_egress_rate(qpriv->rcd->ppd) / 8;
3474 timeout = (to_seg * TID_RDMA_MAX_SEGMENT_SIZE) / bytes_per_us;
3476 * Find the next highest value in the RNR table to the required
3477 * timeout. This gives the responder some padding.
3479 for (i = 1; i <= IB_AETH_CREDIT_MASK; i++)
3480 if (rvt_rnr_tbl_to_usec(i) >= timeout)
3486 * Central place for resource allocation at TID write responder,
3487 * is called from write_req and write_data interrupt handlers as
3488 * well as the send thread when a queued QP is scheduled for
3489 * resource allocation.
3491 * Iterates over (a) segments of a request and then (b) queued requests
3492 * themselves to allocate resources for up to local->max_write
3493 * segments across multiple requests. Stop allocating when we
3494 * hit a sync point, resume allocating after data packets at
3495 * sync point have been received.
3497 * Resource allocation and sending of responses is decoupled. The
3498 * request/segment which are being allocated and sent are as follows.
3499 * Resources are allocated for:
3500 * [request: qpriv->r_tid_alloc, segment: req->alloc_seg]
3501 * The send thread sends:
3502 * [request: qp->s_tail_ack_queue, segment:req->cur_seg]
3504 static void hfi1_tid_write_alloc_resources(struct rvt_qp *qp, bool intr_ctx)
3506 struct tid_rdma_request *req;
3507 struct hfi1_qp_priv *qpriv = qp->priv;
3508 struct hfi1_ctxtdata *rcd = qpriv->rcd;
3509 struct tid_rdma_params *local = &qpriv->tid_rdma.local;
3510 struct rvt_ack_entry *e;
3515 lockdep_assert_held(&qp->s_lock);
3518 trace_hfi1_rsp_tid_write_alloc_res(qp, 0);
3519 trace_hfi1_tid_write_rsp_alloc_res(qp);
3521 * Don't allocate more segments if a RNR NAK has already been
3522 * scheduled to avoid messing up qp->r_psn: the RNR NAK will
3523 * be sent only when all allocated segments have been sent.
3524 * However, if more segments are allocated before that, TID RDMA
3525 * WRITE RESP packets will be sent out for these new segments
3526 * before the RNR NAK packet. When the requester receives the
3527 * RNR NAK packet, it will restart with qp->s_last_psn + 1,
3528 * which does not match qp->r_psn and will be dropped.
3529 * Consequently, the requester will exhaust its retries and
3530 * put the qp into error state.
3532 if (qpriv->rnr_nak_state == TID_RNR_NAK_SEND)
3535 /* No requests left to process */
3536 if (qpriv->r_tid_alloc == qpriv->r_tid_head) {
3537 /* If all data has been received, clear the flow */
3538 if (qpriv->flow_state.index < RXE_NUM_TID_FLOWS &&
3539 !qpriv->alloc_w_segs) {
3540 hfi1_kern_clear_hw_flow(rcd, qp);
3541 qpriv->s_flags &= ~HFI1_R_TID_SW_PSN;
3546 e = &qp->s_ack_queue[qpriv->r_tid_alloc];
3547 if (e->opcode != TID_OP(WRITE_REQ))
3549 req = ack_to_tid_req(e);
3550 trace_hfi1_tid_req_write_alloc_res(qp, 0, e->opcode, e->psn,
3552 /* Finished allocating for all segments of this request */
3553 if (req->alloc_seg >= req->total_segs)
3556 /* Can allocate only a maximum of local->max_write for a QP */
3557 if (qpriv->alloc_w_segs >= local->max_write)
3560 /* Don't allocate at a sync point with data packets pending */
3561 if (qpriv->sync_pt && qpriv->alloc_w_segs)
3564 /* All data received at the sync point, continue */
3565 if (qpriv->sync_pt && !qpriv->alloc_w_segs) {
3566 hfi1_kern_clear_hw_flow(rcd, qp);
3567 qpriv->sync_pt = false;
3568 qpriv->s_flags &= ~HFI1_R_TID_SW_PSN;
3571 /* Allocate flow if we don't have one */
3572 if (qpriv->flow_state.index >= RXE_NUM_TID_FLOWS) {
3573 ret = hfi1_kern_setup_hw_flow(qpriv->rcd, qp);
3575 to_seg = tid_rdma_flow_wt *
3576 position_in_queue(qpriv,
3582 npkts = rvt_div_round_up_mtu(qp, req->seg_len);
3585 * We are at a sync point if we run out of KDETH PSN space.
3586 * Last PSN of every generation is reserved for RESYNC.
3588 if (qpriv->flow_state.psn + npkts > MAX_TID_FLOW_PSN - 1) {
3589 qpriv->sync_pt = true;
3594 * If overtaking req->acked_tail, send an RNR NAK. Because the
3595 * QP is not queued in this case, and the issue can only be
3596 * caused due a delay in scheduling the second leg which we
3597 * cannot estimate, we use a rather arbitrary RNR timeout of
3598 * (MAX_FLOWS / 2) segments
3600 if (!CIRC_SPACE(req->setup_head, req->acked_tail,
3603 to_seg = MAX_FLOWS >> 1;
3604 qpriv->s_flags |= RVT_S_ACK_PENDING;
3605 hfi1_schedule_tid_send(qp);
3609 /* Try to allocate rcv array / TID entries */
3610 ret = hfi1_kern_exp_rcv_setup(req, &req->ss, &last);
3612 to_seg = position_in_queue(qpriv, &rcd->rarr_queue);
3616 qpriv->alloc_w_segs++;
3620 /* Begin processing the next request */
3621 if (++qpriv->r_tid_alloc >
3622 rvt_size_atomic(ib_to_rvt(qp->ibqp.device)))
3623 qpriv->r_tid_alloc = 0;
3627 * Schedule an RNR NAK to be sent if (a) flow or rcv array allocation
3628 * has failed (b) we are called from the rcv handler interrupt context
3629 * (c) an RNR NAK has not already been scheduled
3631 if (ret == -EAGAIN && intr_ctx && !qp->r_nak_state)
3637 lockdep_assert_held(&qp->r_lock);
3639 /* Set r_nak_state to prevent unrelated events from generating NAK's */
3640 qp->r_nak_state = hfi1_compute_tid_rnr_timeout(qp, to_seg) | IB_RNR_NAK;
3642 /* Pull back r_psn to the segment being RNR NAK'd */
3643 qp->r_psn = e->psn + req->alloc_seg;
3644 qp->r_ack_psn = qp->r_psn;
3646 * Pull back r_head_ack_queue to the ack entry following the request
3647 * being RNR NAK'd. This allows resources to be allocated to the request
3648 * if the queued QP is scheduled.
3650 qp->r_head_ack_queue = qpriv->r_tid_alloc + 1;
3651 if (qp->r_head_ack_queue > rvt_size_atomic(ib_to_rvt(qp->ibqp.device)))
3652 qp->r_head_ack_queue = 0;
3653 qpriv->r_tid_head = qp->r_head_ack_queue;
3655 * These send side fields are used in make_rc_ack(). They are set in
3656 * hfi1_send_rc_ack() but must be set here before dropping qp->s_lock
3659 qp->s_nak_state = qp->r_nak_state;
3660 qp->s_ack_psn = qp->r_ack_psn;
3662 * Clear the ACK PENDING flag to prevent unwanted ACK because we
3663 * have modified qp->s_ack_psn here.
3665 qp->s_flags &= ~(RVT_S_ACK_PENDING);
3667 trace_hfi1_rsp_tid_write_alloc_res(qp, qp->r_psn);
3669 * qpriv->rnr_nak_state is used to determine when the scheduled RNR NAK
3670 * has actually been sent. qp->s_flags RVT_S_ACK_PENDING bit cannot be
3671 * used for this because qp->s_lock is dropped before calling
3672 * hfi1_send_rc_ack() leading to inconsistency between the receive
3673 * interrupt handlers and the send thread in make_rc_ack()
3675 qpriv->rnr_nak_state = TID_RNR_NAK_SEND;
3678 * Schedule RNR NAK to be sent. RNR NAK's are scheduled from the receive
3679 * interrupt handlers but will be sent from the send engine behind any
3680 * previous responses that may have been scheduled
3682 rc_defered_ack(rcd, qp);
3685 void hfi1_rc_rcv_tid_rdma_write_req(struct hfi1_packet *packet)
3687 /* HANDLER FOR TID RDMA WRITE REQUEST packet (Responder side)*/
3690 * 1. Verify TID RDMA WRITE REQ as per IB_OPCODE_RC_RDMA_WRITE_FIRST
3691 * (see hfi1_rc_rcv())
3692 * - Don't allow 0-length requests.
3693 * 2. Put TID RDMA WRITE REQ into the response queueu (s_ack_queue)
3694 * - Setup struct tid_rdma_req with request info
3695 * - Prepare struct tid_rdma_flow array?
3696 * 3. Set the qp->s_ack_state as state diagram in design doc.
3697 * 4. Set RVT_S_RESP_PENDING in s_flags.
3698 * 5. Kick the send engine (hfi1_schedule_send())
3700 struct hfi1_ctxtdata *rcd = packet->rcd;
3701 struct rvt_qp *qp = packet->qp;
3702 struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
3703 struct ib_other_headers *ohdr = packet->ohdr;
3704 struct rvt_ack_entry *e;
3705 unsigned long flags;
3706 struct ib_reth *reth;
3707 struct hfi1_qp_priv *qpriv = qp->priv;
3708 struct tid_rdma_request *req;
3709 u32 bth0, psn, len, rkey, num_segs;
3715 bth0 = be32_to_cpu(ohdr->bth[0]);
3716 if (hfi1_ruc_check_hdr(ibp, packet))
3719 fecn = process_ecn(qp, packet);
3720 psn = mask_psn(be32_to_cpu(ohdr->bth[2]));
3721 trace_hfi1_rsp_rcv_tid_write_req(qp, psn);
3723 if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST))
3726 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
3729 reth = &ohdr->u.tid_rdma.w_req.reth;
3730 vaddr = be64_to_cpu(reth->vaddr);
3731 len = be32_to_cpu(reth->length);
3733 num_segs = DIV_ROUND_UP(len, qpriv->tid_rdma.local.max_len);
3734 diff = delta_psn(psn, qp->r_psn);
3735 if (unlikely(diff)) {
3736 tid_rdma_rcv_err(packet, ohdr, qp, psn, diff, fecn);
3741 * The resent request which was previously RNR NAK'd is inserted at the
3742 * location of the original request, which is one entry behind
3745 if (qpriv->rnr_nak_state)
3746 qp->r_head_ack_queue = qp->r_head_ack_queue ?
3747 qp->r_head_ack_queue - 1 :
3748 rvt_size_atomic(ib_to_rvt(qp->ibqp.device));
3750 /* We've verified the request, insert it into the ack queue. */
3751 next = qp->r_head_ack_queue + 1;
3752 if (next > rvt_size_atomic(ib_to_rvt(qp->ibqp.device)))
3754 spin_lock_irqsave(&qp->s_lock, flags);
3755 if (unlikely(next == qp->s_acked_ack_queue)) {
3756 if (!qp->s_ack_queue[next].sent)
3757 goto nack_inv_unlock;
3758 update_ack_queue(qp, next);
3760 e = &qp->s_ack_queue[qp->r_head_ack_queue];
3761 req = ack_to_tid_req(e);
3763 /* Bring previously RNR NAK'd request back to life */
3764 if (qpriv->rnr_nak_state) {
3765 qp->r_nak_state = 0;
3766 qp->s_nak_state = 0;
3767 qpriv->rnr_nak_state = TID_RNR_NAK_INIT;
3768 qp->r_psn = e->lpsn + 1;
3769 req->state = TID_REQUEST_INIT;
3773 release_rdma_sge_mr(e);
3775 /* The length needs to be in multiples of PAGE_SIZE */
3776 if (!len || len & ~PAGE_MASK)
3777 goto nack_inv_unlock;
3779 rkey = be32_to_cpu(reth->rkey);
3782 if (e->opcode == TID_OP(WRITE_REQ) &&
3783 (req->setup_head != req->clear_tail ||
3784 req->clear_tail != req->acked_tail))
3785 goto nack_inv_unlock;
3787 if (unlikely(!rvt_rkey_ok(qp, &e->rdma_sge, qp->r_len, vaddr,
3788 rkey, IB_ACCESS_REMOTE_WRITE)))
3791 qp->r_psn += num_segs - 1;
3793 e->opcode = (bth0 >> 24) & 0xff;
3795 e->lpsn = qp->r_psn;
3798 req->n_flows = min_t(u16, num_segs, qpriv->tid_rdma.local.max_write);
3799 req->state = TID_REQUEST_INIT;
3805 req->seg_len = qpriv->tid_rdma.local.max_len;
3806 req->total_len = len;
3807 req->total_segs = num_segs;
3808 req->r_flow_psn = e->psn;
3809 req->ss.sge = e->rdma_sge;
3810 req->ss.num_sge = 1;
3812 req->flow_idx = req->setup_head;
3813 req->clear_tail = req->setup_head;
3814 req->acked_tail = req->setup_head;
3816 qp->r_state = e->opcode;
3817 qp->r_nak_state = 0;
3819 * We need to increment the MSN here instead of when we
3820 * finish sending the result since a duplicate request would
3821 * increment it more than once.
3826 trace_hfi1_tid_req_rcv_write_req(qp, 0, e->opcode, e->psn, e->lpsn,
3829 if (qpriv->r_tid_tail == HFI1_QP_WQE_INVALID) {
3830 qpriv->r_tid_tail = qp->r_head_ack_queue;
3831 } else if (qpriv->r_tid_tail == qpriv->r_tid_head) {
3832 struct tid_rdma_request *ptr;
3834 e = &qp->s_ack_queue[qpriv->r_tid_tail];
3835 ptr = ack_to_tid_req(e);
3837 if (e->opcode != TID_OP(WRITE_REQ) ||
3838 ptr->comp_seg == ptr->total_segs) {
3839 if (qpriv->r_tid_tail == qpriv->r_tid_ack)
3840 qpriv->r_tid_ack = qp->r_head_ack_queue;
3841 qpriv->r_tid_tail = qp->r_head_ack_queue;
3845 qp->r_head_ack_queue = next;
3846 qpriv->r_tid_head = qp->r_head_ack_queue;
3848 hfi1_tid_write_alloc_resources(qp, true);
3849 trace_hfi1_tid_write_rsp_rcv_req(qp);
3851 /* Schedule the send tasklet. */
3852 qp->s_flags |= RVT_S_RESP_PENDING;
3854 qp->s_flags |= RVT_S_ECN;
3855 hfi1_schedule_send(qp);
3857 spin_unlock_irqrestore(&qp->s_lock, flags);
3861 spin_unlock_irqrestore(&qp->s_lock, flags);
3863 rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
3864 qp->r_nak_state = IB_NAK_INVALID_REQUEST;
3865 qp->r_ack_psn = qp->r_psn;
3866 /* Queue NAK for later */
3867 rc_defered_ack(rcd, qp);
3870 spin_unlock_irqrestore(&qp->s_lock, flags);
3871 rvt_rc_error(qp, IB_WC_LOC_PROT_ERR);
3872 qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
3873 qp->r_ack_psn = qp->r_psn;
3876 u32 hfi1_build_tid_rdma_write_resp(struct rvt_qp *qp, struct rvt_ack_entry *e,
3877 struct ib_other_headers *ohdr, u32 *bth1,
3879 struct rvt_sge_state **ss)
3881 struct hfi1_ack_priv *epriv = e->priv;
3882 struct tid_rdma_request *req = &epriv->tid_req;
3883 struct hfi1_qp_priv *qpriv = qp->priv;
3884 struct tid_rdma_flow *flow = NULL;
3885 u32 resp_len = 0, hdwords = 0;
3886 void *resp_addr = NULL;
3887 struct tid_rdma_params *remote;
3889 trace_hfi1_tid_req_build_write_resp(qp, 0, e->opcode, e->psn, e->lpsn,
3891 trace_hfi1_tid_write_rsp_build_resp(qp);
3892 trace_hfi1_rsp_build_tid_write_resp(qp, bth2);
3893 flow = &req->flows[req->flow_idx];
3894 switch (req->state) {
3897 * Try to allocate resources here in case QP was queued and was
3898 * later scheduled when resources became available
3900 hfi1_tid_write_alloc_resources(qp, false);
3902 /* We've already sent everything which is ready */
3903 if (req->cur_seg >= req->alloc_seg)
3907 * Resources can be assigned but responses cannot be sent in
3908 * rnr_nak state, till the resent request is received
3910 if (qpriv->rnr_nak_state == TID_RNR_NAK_SENT)
3913 req->state = TID_REQUEST_ACTIVE;
3914 trace_hfi1_tid_flow_build_write_resp(qp, req->flow_idx, flow);
3915 req->flow_idx = CIRC_NEXT(req->flow_idx, MAX_FLOWS);
3916 hfi1_add_tid_reap_timer(qp);
3919 case TID_REQUEST_RESEND_ACTIVE:
3920 case TID_REQUEST_RESEND:
3921 trace_hfi1_tid_flow_build_write_resp(qp, req->flow_idx, flow);
3922 req->flow_idx = CIRC_NEXT(req->flow_idx, MAX_FLOWS);
3923 if (!CIRC_CNT(req->setup_head, req->flow_idx, MAX_FLOWS))
3924 req->state = TID_REQUEST_ACTIVE;
3926 hfi1_mod_tid_reap_timer(qp);
3929 flow->flow_state.resp_ib_psn = bth2;
3930 resp_addr = (void *)flow->tid_entry;
3931 resp_len = sizeof(*flow->tid_entry) * flow->tidcnt;
3934 memset(&ohdr->u.tid_rdma.w_rsp, 0, sizeof(ohdr->u.tid_rdma.w_rsp));
3935 epriv->ss.sge.vaddr = resp_addr;
3936 epriv->ss.sge.sge_length = resp_len;
3937 epriv->ss.sge.length = epriv->ss.sge.sge_length;
3939 * We can safely zero these out. Since the first SGE covers the
3940 * entire packet, nothing else should even look at the MR.
3942 epriv->ss.sge.mr = NULL;
3943 epriv->ss.sge.m = 0;
3944 epriv->ss.sge.n = 0;
3946 epriv->ss.sg_list = NULL;
3947 epriv->ss.total_len = epriv->ss.sge.sge_length;
3948 epriv->ss.num_sge = 1;
3951 *len = epriv->ss.total_len;
3953 /* Construct the TID RDMA WRITE RESP packet header */
3955 remote = rcu_dereference(qpriv->tid_rdma.remote);
3957 KDETH_RESET(ohdr->u.tid_rdma.w_rsp.kdeth0, KVER, 0x1);
3958 KDETH_RESET(ohdr->u.tid_rdma.w_rsp.kdeth1, JKEY, remote->jkey);
3959 ohdr->u.tid_rdma.w_rsp.aeth = rvt_compute_aeth(qp);
3960 ohdr->u.tid_rdma.w_rsp.tid_flow_psn =
3961 cpu_to_be32((flow->flow_state.generation <<
3962 HFI1_KDETH_BTH_SEQ_SHIFT) |
3963 (flow->flow_state.spsn &
3964 HFI1_KDETH_BTH_SEQ_MASK));
3965 ohdr->u.tid_rdma.w_rsp.tid_flow_qp =
3966 cpu_to_be32(qpriv->tid_rdma.local.qp |
3967 ((flow->idx & TID_RDMA_DESTQP_FLOW_MASK) <<
3968 TID_RDMA_DESTQP_FLOW_SHIFT) |
3970 ohdr->u.tid_rdma.w_rsp.verbs_qp = cpu_to_be32(qp->remote_qpn);
3973 hdwords = sizeof(ohdr->u.tid_rdma.w_rsp) / sizeof(u32);
3974 qpriv->pending_tid_w_segs++;
3979 static void hfi1_add_tid_reap_timer(struct rvt_qp *qp)
3981 struct hfi1_qp_priv *qpriv = qp->priv;
3983 lockdep_assert_held(&qp->s_lock);
3984 if (!(qpriv->s_flags & HFI1_R_TID_RSC_TIMER)) {
3985 qpriv->s_flags |= HFI1_R_TID_RSC_TIMER;
3986 qpriv->s_tid_timer.expires = jiffies +
3987 qpriv->tid_timer_timeout_jiffies;
3988 add_timer(&qpriv->s_tid_timer);
3992 static void hfi1_mod_tid_reap_timer(struct rvt_qp *qp)
3994 struct hfi1_qp_priv *qpriv = qp->priv;
3996 lockdep_assert_held(&qp->s_lock);
3997 qpriv->s_flags |= HFI1_R_TID_RSC_TIMER;
3998 mod_timer(&qpriv->s_tid_timer, jiffies +
3999 qpriv->tid_timer_timeout_jiffies);
4002 static int hfi1_stop_tid_reap_timer(struct rvt_qp *qp)
4004 struct hfi1_qp_priv *qpriv = qp->priv;
4007 lockdep_assert_held(&qp->s_lock);
4008 if (qpriv->s_flags & HFI1_R_TID_RSC_TIMER) {
4009 rval = del_timer(&qpriv->s_tid_timer);
4010 qpriv->s_flags &= ~HFI1_R_TID_RSC_TIMER;
4015 void hfi1_del_tid_reap_timer(struct rvt_qp *qp)
4017 struct hfi1_qp_priv *qpriv = qp->priv;
4019 del_timer_sync(&qpriv->s_tid_timer);
4020 qpriv->s_flags &= ~HFI1_R_TID_RSC_TIMER;
4023 static void hfi1_tid_timeout(struct timer_list *t)
4025 struct hfi1_qp_priv *qpriv = from_timer(qpriv, t, s_tid_timer);
4026 struct rvt_qp *qp = qpriv->owner;
4027 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
4028 unsigned long flags;
4031 spin_lock_irqsave(&qp->r_lock, flags);
4032 spin_lock(&qp->s_lock);
4033 if (qpriv->s_flags & HFI1_R_TID_RSC_TIMER) {
4034 dd_dev_warn(dd_from_ibdev(qp->ibqp.device), "[QP%u] %s %d\n",
4035 qp->ibqp.qp_num, __func__, __LINE__);
4036 trace_hfi1_msg_tid_timeout(/* msg */
4037 qp, "resource timeout = ",
4038 (u64)qpriv->tid_timer_timeout_jiffies);
4039 hfi1_stop_tid_reap_timer(qp);
4041 * Go though the entire ack queue and clear any outstanding
4042 * HW flow and RcvArray resources.
4044 hfi1_kern_clear_hw_flow(qpriv->rcd, qp);
4045 for (i = 0; i < rvt_max_atomic(rdi); i++) {
4046 struct tid_rdma_request *req =
4047 ack_to_tid_req(&qp->s_ack_queue[i]);
4049 hfi1_kern_exp_rcv_clear_all(req);
4051 spin_unlock(&qp->s_lock);
4052 if (qp->ibqp.event_handler) {
4055 ev.device = qp->ibqp.device;
4056 ev.element.qp = &qp->ibqp;
4057 ev.event = IB_EVENT_QP_FATAL;
4058 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
4060 rvt_rc_error(qp, IB_WC_RESP_TIMEOUT_ERR);
4063 spin_unlock(&qp->s_lock);
4065 spin_unlock_irqrestore(&qp->r_lock, flags);
4068 void hfi1_rc_rcv_tid_rdma_write_resp(struct hfi1_packet *packet)
4070 /* HANDLER FOR TID RDMA WRITE RESPONSE packet (Requestor side */
4073 * 1. Find matching SWQE
4074 * 2. Check that TIDENTRY array has enough space for a complete
4075 * segment. If not, put QP in error state.
4076 * 3. Save response data in struct tid_rdma_req and struct tid_rdma_flow
4077 * 4. Remove HFI1_S_WAIT_TID_RESP from s_flags.
4078 * 5. Set qp->s_state
4079 * 6. Kick the send engine (hfi1_schedule_send())
4081 struct ib_other_headers *ohdr = packet->ohdr;
4082 struct rvt_qp *qp = packet->qp;
4083 struct hfi1_qp_priv *qpriv = qp->priv;
4084 struct hfi1_ctxtdata *rcd = packet->rcd;
4085 struct rvt_swqe *wqe;
4086 struct tid_rdma_request *req;
4087 struct tid_rdma_flow *flow;
4088 enum ib_wc_status status;
4089 u32 opcode, aeth, psn, flow_psn, i, tidlen = 0, pktlen;
4091 unsigned long flags;
4093 fecn = process_ecn(qp, packet);
4094 psn = mask_psn(be32_to_cpu(ohdr->bth[2]));
4095 aeth = be32_to_cpu(ohdr->u.tid_rdma.w_rsp.aeth);
4096 opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0xff;
4098 spin_lock_irqsave(&qp->s_lock, flags);
4100 /* Ignore invalid responses */
4101 if (cmp_psn(psn, qp->s_next_psn) >= 0)
4104 /* Ignore duplicate responses. */
4105 if (unlikely(cmp_psn(psn, qp->s_last_psn) <= 0))
4108 if (unlikely(qp->s_acked == qp->s_tail))
4112 * If we are waiting for a particular packet sequence number
4113 * due to a request being resent, check for it. Otherwise,
4114 * ensure that we haven't missed anything.
4116 if (qp->r_flags & RVT_R_RDMAR_SEQ) {
4117 if (cmp_psn(psn, qp->s_last_psn + 1) != 0)
4119 qp->r_flags &= ~RVT_R_RDMAR_SEQ;
4122 wqe = rvt_get_swqe_ptr(qp, qpriv->s_tid_cur);
4123 if (unlikely(wqe->wr.opcode != IB_WR_TID_RDMA_WRITE))
4126 req = wqe_to_tid_req(wqe);
4128 * If we've lost ACKs and our acked_tail pointer is too far
4129 * behind, don't overwrite segments. Just drop the packet and
4130 * let the reliability protocol take care of it.
4132 if (!CIRC_SPACE(req->setup_head, req->acked_tail, MAX_FLOWS))
4136 * The call to do_rc_ack() should be last in the chain of
4137 * packet checks because it will end up updating the QP state.
4138 * Therefore, anything that would prevent the packet from
4139 * being accepted as a successful response should be prior
4142 if (!do_rc_ack(qp, aeth, psn, opcode, 0, rcd))
4145 trace_hfi1_ack(qp, psn);
4147 flow = &req->flows[req->setup_head];
4150 flow->tid_offset = 0;
4152 flow->resync_npkts = 0;
4153 flow->tid_qpn = be32_to_cpu(ohdr->u.tid_rdma.w_rsp.tid_flow_qp);
4154 flow->idx = (flow->tid_qpn >> TID_RDMA_DESTQP_FLOW_SHIFT) &
4155 TID_RDMA_DESTQP_FLOW_MASK;
4156 flow_psn = mask_psn(be32_to_cpu(ohdr->u.tid_rdma.w_rsp.tid_flow_psn));
4157 flow->flow_state.generation = flow_psn >> HFI1_KDETH_BTH_SEQ_SHIFT;
4158 flow->flow_state.spsn = flow_psn & HFI1_KDETH_BTH_SEQ_MASK;
4159 flow->flow_state.resp_ib_psn = psn;
4160 flow->length = min_t(u32, req->seg_len,
4161 (wqe->length - (req->comp_seg * req->seg_len)));
4163 flow->npkts = rvt_div_round_up_mtu(qp, flow->length);
4164 flow->flow_state.lpsn = flow->flow_state.spsn +
4166 /* payload length = packet length - (header length + ICRC length) */
4167 pktlen = packet->tlen - (packet->hlen + 4);
4168 if (pktlen > sizeof(flow->tid_entry)) {
4169 status = IB_WC_LOC_LEN_ERR;
4172 memcpy(flow->tid_entry, packet->ebuf, pktlen);
4173 flow->tidcnt = pktlen / sizeof(*flow->tid_entry);
4174 trace_hfi1_tid_flow_rcv_write_resp(qp, req->setup_head, flow);
4177 trace_hfi1_tid_write_sender_rcv_resp(qp, 0);
4179 * Walk the TID_ENTRY list to make sure we have enough space for a
4182 for (i = 0; i < flow->tidcnt; i++) {
4183 trace_hfi1_tid_entry_rcv_write_resp(/* entry */
4184 qp, i, flow->tid_entry[i]);
4185 if (!EXP_TID_GET(flow->tid_entry[i], LEN)) {
4186 status = IB_WC_LOC_LEN_ERR;
4189 tidlen += EXP_TID_GET(flow->tid_entry[i], LEN);
4191 if (tidlen * PAGE_SIZE < flow->length) {
4192 status = IB_WC_LOC_LEN_ERR;
4196 trace_hfi1_tid_req_rcv_write_resp(qp, 0, wqe->wr.opcode, wqe->psn,
4199 * If this is the first response for this request, set the initial
4200 * flow index to the current flow.
4202 if (!cmp_psn(psn, wqe->psn)) {
4203 req->r_last_acked = mask_psn(wqe->psn - 1);
4204 /* Set acked flow index to head index */
4205 req->acked_tail = req->setup_head;
4208 /* advance circular buffer head */
4209 req->setup_head = CIRC_NEXT(req->setup_head, MAX_FLOWS);
4210 req->state = TID_REQUEST_ACTIVE;
4213 * If all responses for this TID RDMA WRITE request have been received
4214 * advance the pointer to the next one.
4215 * Since TID RDMA requests could be mixed in with regular IB requests,
4216 * they might not appear sequentially in the queue. Therefore, the
4217 * next request needs to be "found".
4219 if (qpriv->s_tid_cur != qpriv->s_tid_head &&
4220 req->comp_seg == req->total_segs) {
4221 for (i = qpriv->s_tid_cur + 1; ; i++) {
4222 if (i == qp->s_size)
4224 wqe = rvt_get_swqe_ptr(qp, i);
4225 if (i == qpriv->s_tid_head)
4227 if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE)
4230 qpriv->s_tid_cur = i;
4232 qp->s_flags &= ~HFI1_S_WAIT_TID_RESP;
4233 hfi1_schedule_tid_send(qp);
4237 status = IB_WC_LOC_QP_OP_ERR;
4239 rvt_error_qp(qp, status);
4242 qp->s_flags |= RVT_S_ECN;
4243 spin_unlock_irqrestore(&qp->s_lock, flags);
4246 bool hfi1_build_tid_rdma_packet(struct rvt_swqe *wqe,
4247 struct ib_other_headers *ohdr,
4248 u32 *bth1, u32 *bth2, u32 *len)
4250 struct tid_rdma_request *req = wqe_to_tid_req(wqe);
4251 struct tid_rdma_flow *flow = &req->flows[req->clear_tail];
4252 struct tid_rdma_params *remote;
4253 struct rvt_qp *qp = req->qp;
4254 struct hfi1_qp_priv *qpriv = qp->priv;
4255 u32 tidentry = flow->tid_entry[flow->tid_idx];
4256 u32 tidlen = EXP_TID_GET(tidentry, LEN) << PAGE_SHIFT;
4257 struct tid_rdma_write_data *wd = &ohdr->u.tid_rdma.w_data;
4258 u32 next_offset, om = KDETH_OM_LARGE;
4262 hfi1_trdma_send_complete(qp, wqe, IB_WC_REM_INV_RD_REQ_ERR);
4263 rvt_error_qp(qp, IB_WC_REM_INV_RD_REQ_ERR);
4266 *len = min_t(u32, qp->pmtu, tidlen - flow->tid_offset);
4268 next_offset = flow->tid_offset + *len;
4269 last_pkt = (flow->tid_idx == (flow->tidcnt - 1) &&
4270 next_offset >= tidlen) || (flow->sent >= flow->length);
4271 trace_hfi1_tid_entry_build_write_data(qp, flow->tid_idx, tidentry);
4272 trace_hfi1_tid_flow_build_write_data(qp, req->clear_tail, flow);
4275 remote = rcu_dereference(qpriv->tid_rdma.remote);
4276 KDETH_RESET(wd->kdeth0, KVER, 0x1);
4277 KDETH_SET(wd->kdeth0, SH, !last_pkt);
4278 KDETH_SET(wd->kdeth0, INTR, !!(!last_pkt && remote->urg));
4279 KDETH_SET(wd->kdeth0, TIDCTRL, EXP_TID_GET(tidentry, CTRL));
4280 KDETH_SET(wd->kdeth0, TID, EXP_TID_GET(tidentry, IDX));
4281 KDETH_SET(wd->kdeth0, OM, om == KDETH_OM_LARGE);
4282 KDETH_SET(wd->kdeth0, OFFSET, flow->tid_offset / om);
4283 KDETH_RESET(wd->kdeth1, JKEY, remote->jkey);
4284 wd->verbs_qp = cpu_to_be32(qp->remote_qpn);
4287 *bth1 = flow->tid_qpn;
4288 *bth2 = mask_psn(((flow->flow_state.spsn + flow->pkt++) &
4289 HFI1_KDETH_BTH_SEQ_MASK) |
4290 (flow->flow_state.generation <<
4291 HFI1_KDETH_BTH_SEQ_SHIFT));
4293 /* PSNs are zero-based, so +1 to count number of packets */
4294 if (flow->flow_state.lpsn + 1 +
4295 rvt_div_round_up_mtu(qp, req->seg_len) >
4297 req->state = TID_REQUEST_SYNC;
4298 *bth2 |= IB_BTH_REQ_ACK;
4301 if (next_offset >= tidlen) {
4302 flow->tid_offset = 0;
4305 flow->tid_offset = next_offset;
4310 void hfi1_rc_rcv_tid_rdma_write_data(struct hfi1_packet *packet)
4312 struct rvt_qp *qp = packet->qp;
4313 struct hfi1_qp_priv *priv = qp->priv;
4314 struct hfi1_ctxtdata *rcd = priv->rcd;
4315 struct ib_other_headers *ohdr = packet->ohdr;
4316 struct rvt_ack_entry *e;
4317 struct tid_rdma_request *req;
4318 struct tid_rdma_flow *flow;
4319 struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
4320 unsigned long flags;
4325 fecn = process_ecn(qp, packet);
4326 psn = mask_psn(be32_to_cpu(ohdr->bth[2]));
4327 opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0xff;
4330 * All error handling should be done by now. If we are here, the packet
4331 * is either good or been accepted by the error handler.
4333 spin_lock_irqsave(&qp->s_lock, flags);
4334 e = &qp->s_ack_queue[priv->r_tid_tail];
4335 req = ack_to_tid_req(e);
4336 flow = &req->flows[req->clear_tail];
4337 if (cmp_psn(psn, full_flow_psn(flow, flow->flow_state.lpsn))) {
4338 update_r_next_psn_fecn(packet, priv, rcd, flow, fecn);
4340 if (cmp_psn(psn, flow->flow_state.r_next_psn))
4343 flow->flow_state.r_next_psn = mask_psn(psn + 1);
4345 * Copy the payload to destination buffer if this packet is
4346 * delivered as an eager packet due to RSM rule and FECN.
4347 * The RSM rule selects FECN bit in BTH and SH bit in
4348 * KDETH header and therefore will not match the last
4349 * packet of each segment that has SH bit cleared.
4351 if (fecn && packet->etype == RHF_RCV_TYPE_EAGER) {
4352 struct rvt_sge_state ss;
4354 u32 tlen = packet->tlen;
4355 u16 hdrsize = packet->hlen;
4356 u8 pad = packet->pad;
4357 u8 extra_bytes = pad + packet->extra_byte +
4359 u32 pmtu = qp->pmtu;
4361 if (unlikely(tlen != (hdrsize + pmtu + extra_bytes)))
4363 len = req->comp_seg * req->seg_len;
4364 len += delta_psn(psn,
4365 full_flow_psn(flow, flow->flow_state.spsn)) *
4367 if (unlikely(req->total_len - len < pmtu))
4371 * The e->rdma_sge field is set when TID RDMA WRITE REQ
4372 * is first received and is never modified thereafter.
4374 ss.sge = e->rdma_sge;
4377 ss.total_len = req->total_len;
4378 rvt_skip_sge(&ss, len, false);
4379 rvt_copy_sge(qp, &ss, packet->payload, pmtu, false,
4381 /* Raise the sw sequence check flag for next packet */
4382 priv->r_next_psn_kdeth = mask_psn(psn + 1);
4383 priv->s_flags |= HFI1_R_TID_SW_PSN;
4387 flow->flow_state.r_next_psn = mask_psn(psn + 1);
4388 hfi1_kern_exp_rcv_clear(req);
4389 priv->alloc_w_segs--;
4390 rcd->flows[flow->idx].psn = psn & HFI1_KDETH_BTH_SEQ_MASK;
4392 priv->s_nak_state = 0;
4395 * Release the flow if one of the following conditions has been met:
4396 * - The request has reached a sync point AND all outstanding
4397 * segments have been completed, or
4398 * - The entire request is complete and there are no more requests
4399 * (of any kind) in the queue.
4401 trace_hfi1_rsp_rcv_tid_write_data(qp, psn);
4402 trace_hfi1_tid_req_rcv_write_data(qp, 0, e->opcode, e->psn, e->lpsn,
4404 trace_hfi1_tid_write_rsp_rcv_data(qp);
4405 if (priv->r_tid_ack == HFI1_QP_WQE_INVALID)
4406 priv->r_tid_ack = priv->r_tid_tail;
4408 if (opcode == TID_OP(WRITE_DATA_LAST)) {
4409 release_rdma_sge_mr(e);
4410 for (next = priv->r_tid_tail + 1; ; next++) {
4411 if (next > rvt_size_atomic(&dev->rdi))
4413 if (next == priv->r_tid_head)
4415 e = &qp->s_ack_queue[next];
4416 if (e->opcode == TID_OP(WRITE_REQ))
4419 priv->r_tid_tail = next;
4420 if (++qp->s_acked_ack_queue > rvt_size_atomic(&dev->rdi))
4421 qp->s_acked_ack_queue = 0;
4424 hfi1_tid_write_alloc_resources(qp, true);
4427 * If we need to generate more responses, schedule the
4430 if (req->cur_seg < req->total_segs ||
4431 qp->s_tail_ack_queue != qp->r_head_ack_queue) {
4432 qp->s_flags |= RVT_S_RESP_PENDING;
4433 hfi1_schedule_send(qp);
4436 priv->pending_tid_w_segs--;
4437 if (priv->s_flags & HFI1_R_TID_RSC_TIMER) {
4438 if (priv->pending_tid_w_segs)
4439 hfi1_mod_tid_reap_timer(req->qp);
4441 hfi1_stop_tid_reap_timer(req->qp);
4445 priv->s_flags |= RVT_S_ACK_PENDING;
4446 hfi1_schedule_tid_send(qp);
4448 priv->r_next_psn_kdeth = flow->flow_state.r_next_psn;
4450 qp->s_flags |= RVT_S_ECN;
4451 spin_unlock_irqrestore(&qp->s_lock, flags);
4455 if (!priv->s_nak_state) {
4456 priv->s_nak_state = IB_NAK_PSN_ERROR;
4457 priv->s_nak_psn = flow->flow_state.r_next_psn;
4458 priv->s_flags |= RVT_S_ACK_PENDING;
4459 if (priv->r_tid_ack == HFI1_QP_WQE_INVALID)
4460 priv->r_tid_ack = priv->r_tid_tail;
4461 hfi1_schedule_tid_send(qp);
4466 static bool hfi1_tid_rdma_is_resync_psn(u32 psn)
4468 return (bool)((psn & HFI1_KDETH_BTH_SEQ_MASK) ==
4469 HFI1_KDETH_BTH_SEQ_MASK);
4472 u32 hfi1_build_tid_rdma_write_ack(struct rvt_qp *qp, struct rvt_ack_entry *e,
4473 struct ib_other_headers *ohdr, u16 iflow,
4474 u32 *bth1, u32 *bth2)
4476 struct hfi1_qp_priv *qpriv = qp->priv;
4477 struct tid_flow_state *fs = &qpriv->flow_state;
4478 struct tid_rdma_request *req = ack_to_tid_req(e);
4479 struct tid_rdma_flow *flow = &req->flows[iflow];
4480 struct tid_rdma_params *remote;
4483 remote = rcu_dereference(qpriv->tid_rdma.remote);
4484 KDETH_RESET(ohdr->u.tid_rdma.ack.kdeth1, JKEY, remote->jkey);
4485 ohdr->u.tid_rdma.ack.verbs_qp = cpu_to_be32(qp->remote_qpn);
4489 if (qpriv->resync) {
4490 *bth2 = mask_psn((fs->generation <<
4491 HFI1_KDETH_BTH_SEQ_SHIFT) - 1);
4492 ohdr->u.tid_rdma.ack.aeth = rvt_compute_aeth(qp);
4493 } else if (qpriv->s_nak_state) {
4494 *bth2 = mask_psn(qpriv->s_nak_psn);
4495 ohdr->u.tid_rdma.ack.aeth =
4496 cpu_to_be32((qp->r_msn & IB_MSN_MASK) |
4497 (qpriv->s_nak_state <<
4498 IB_AETH_CREDIT_SHIFT));
4500 *bth2 = full_flow_psn(flow, flow->flow_state.lpsn);
4501 ohdr->u.tid_rdma.ack.aeth = rvt_compute_aeth(qp);
4503 KDETH_RESET(ohdr->u.tid_rdma.ack.kdeth0, KVER, 0x1);
4504 ohdr->u.tid_rdma.ack.tid_flow_qp =
4505 cpu_to_be32(qpriv->tid_rdma.local.qp |
4506 ((flow->idx & TID_RDMA_DESTQP_FLOW_MASK) <<
4507 TID_RDMA_DESTQP_FLOW_SHIFT) |
4510 ohdr->u.tid_rdma.ack.tid_flow_psn = 0;
4511 ohdr->u.tid_rdma.ack.verbs_psn =
4512 cpu_to_be32(flow->flow_state.resp_ib_psn);
4514 if (qpriv->resync) {
4516 * If the PSN before the current expect KDETH PSN is the
4517 * RESYNC PSN, then we never received a good TID RDMA WRITE
4518 * DATA packet after a previous RESYNC.
4519 * In this case, the next expected KDETH PSN stays the same.
4521 if (hfi1_tid_rdma_is_resync_psn(qpriv->r_next_psn_kdeth - 1)) {
4522 ohdr->u.tid_rdma.ack.tid_flow_psn =
4523 cpu_to_be32(qpriv->r_next_psn_kdeth_save);
4526 * Because the KDETH PSNs jump during a RESYNC, it's
4527 * not possible to infer (or compute) the previous value
4528 * of r_next_psn_kdeth in the case of back-to-back
4529 * RESYNC packets. Therefore, we save it.
4531 qpriv->r_next_psn_kdeth_save =
4532 qpriv->r_next_psn_kdeth - 1;
4533 ohdr->u.tid_rdma.ack.tid_flow_psn =
4534 cpu_to_be32(qpriv->r_next_psn_kdeth_save);
4535 qpriv->r_next_psn_kdeth = mask_psn(*bth2 + 1);
4537 qpriv->resync = false;
4540 return sizeof(ohdr->u.tid_rdma.ack) / sizeof(u32);
4543 void hfi1_rc_rcv_tid_rdma_ack(struct hfi1_packet *packet)
4545 struct ib_other_headers *ohdr = packet->ohdr;
4546 struct rvt_qp *qp = packet->qp;
4547 struct hfi1_qp_priv *qpriv = qp->priv;
4548 struct rvt_swqe *wqe;
4549 struct tid_rdma_request *req;
4550 struct tid_rdma_flow *flow;
4551 u32 aeth, psn, req_psn, ack_psn, resync_psn, ack_kpsn;
4552 unsigned long flags;
4555 trace_hfi1_tid_write_sender_rcv_tid_ack(qp, 0);
4556 process_ecn(qp, packet);
4557 psn = mask_psn(be32_to_cpu(ohdr->bth[2]));
4558 aeth = be32_to_cpu(ohdr->u.tid_rdma.ack.aeth);
4559 req_psn = mask_psn(be32_to_cpu(ohdr->u.tid_rdma.ack.verbs_psn));
4560 resync_psn = mask_psn(be32_to_cpu(ohdr->u.tid_rdma.ack.tid_flow_psn));
4562 spin_lock_irqsave(&qp->s_lock, flags);
4563 trace_hfi1_rcv_tid_ack(qp, aeth, psn, req_psn, resync_psn);
4565 /* If we are waiting for an ACK to RESYNC, drop any other packets */
4566 if ((qp->s_flags & HFI1_S_WAIT_HALT) &&
4567 cmp_psn(psn, qpriv->s_resync_psn))
4571 if (hfi1_tid_rdma_is_resync_psn(psn))
4572 ack_kpsn = resync_psn;
4580 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
4582 if (wqe->wr.opcode != IB_WR_TID_RDMA_WRITE)
4585 req = wqe_to_tid_req(wqe);
4586 trace_hfi1_tid_req_rcv_tid_ack(qp, 0, wqe->wr.opcode, wqe->psn,
4588 flow = &req->flows[req->acked_tail];
4589 trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail, flow);
4591 /* Drop stale ACK/NAK */
4592 if (cmp_psn(psn, full_flow_psn(flow, flow->flow_state.spsn)) < 0)
4595 while (cmp_psn(ack_kpsn,
4596 full_flow_psn(flow, flow->flow_state.lpsn)) >= 0 &&
4597 req->ack_seg < req->cur_seg) {
4599 /* advance acked segment pointer */
4600 req->acked_tail = CIRC_NEXT(req->acked_tail, MAX_FLOWS);
4601 req->r_last_acked = flow->flow_state.resp_ib_psn;
4602 trace_hfi1_tid_req_rcv_tid_ack(qp, 0, wqe->wr.opcode, wqe->psn,
4604 if (req->ack_seg == req->total_segs) {
4605 req->state = TID_REQUEST_COMPLETE;
4606 wqe = do_rc_completion(qp, wqe,
4607 to_iport(qp->ibqp.device,
4609 trace_hfi1_sender_rcv_tid_ack(qp);
4610 atomic_dec(&qpriv->n_tid_requests);
4611 if (qp->s_acked == qp->s_tail)
4613 if (wqe->wr.opcode != IB_WR_TID_RDMA_WRITE)
4615 req = wqe_to_tid_req(wqe);
4617 flow = &req->flows[req->acked_tail];
4618 trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail, flow);
4621 trace_hfi1_tid_req_rcv_tid_ack(qp, 0, wqe->wr.opcode, wqe->psn,
4623 switch (aeth >> 29) {
4625 if (qpriv->s_flags & RVT_S_WAIT_ACK)
4626 qpriv->s_flags &= ~RVT_S_WAIT_ACK;
4627 if (!hfi1_tid_rdma_is_resync_psn(psn)) {
4628 /* Check if there is any pending TID ACK */
4629 if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE &&
4630 req->ack_seg < req->cur_seg)
4631 hfi1_mod_tid_retry_timer(qp);
4633 hfi1_stop_tid_retry_timer(qp);
4634 hfi1_schedule_send(qp);
4636 u32 spsn, fpsn, last_acked, generation;
4637 struct tid_rdma_request *rptr;
4640 hfi1_stop_tid_retry_timer(qp);
4641 /* Allow new requests (see hfi1_make_tid_rdma_pkt) */
4642 qp->s_flags &= ~HFI1_S_WAIT_HALT;
4644 * Clear RVT_S_SEND_ONE flag in case that the TID RDMA
4645 * ACK is received after the TID retry timer is fired
4646 * again. In this case, do not send any more TID
4647 * RESYNC request or wait for any more TID ACK packet.
4649 qpriv->s_flags &= ~RVT_S_SEND_ONE;
4650 hfi1_schedule_send(qp);
4652 if ((qp->s_acked == qpriv->s_tid_tail &&
4653 req->ack_seg == req->total_segs) ||
4654 qp->s_acked == qp->s_tail) {
4655 qpriv->s_state = TID_OP(WRITE_DATA_LAST);
4659 if (req->ack_seg == req->comp_seg) {
4660 qpriv->s_state = TID_OP(WRITE_DATA);
4665 * The PSN to start with is the next PSN after the
4668 psn = mask_psn(psn + 1);
4669 generation = psn >> HFI1_KDETH_BTH_SEQ_SHIFT;
4673 * Update to the correct WQE when we get an ACK(RESYNC)
4674 * in the middle of a request.
4676 if (delta_psn(ack_psn, wqe->lpsn))
4677 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
4678 req = wqe_to_tid_req(wqe);
4679 flow = &req->flows[req->acked_tail];
4681 * RESYNC re-numbers the PSN ranges of all remaining
4682 * segments. Also, PSN's start from 0 in the middle of a
4683 * segment and the first segment size is less than the
4684 * default number of packets. flow->resync_npkts is used
4685 * to track the number of packets from the start of the
4686 * real segment to the point of 0 PSN after the RESYNC
4687 * in order to later correctly rewind the SGE.
4689 fpsn = full_flow_psn(flow, flow->flow_state.spsn);
4690 req->r_ack_psn = psn;
4691 flow->resync_npkts +=
4692 delta_psn(mask_psn(resync_psn + 1), fpsn);
4694 * Renumber all packet sequence number ranges
4695 * based on the new generation.
4697 last_acked = qp->s_acked;
4700 /* start from last acked segment */
4701 for (fidx = rptr->acked_tail;
4702 CIRC_CNT(rptr->setup_head, fidx,
4704 fidx = CIRC_NEXT(fidx, MAX_FLOWS)) {
4708 flow = &rptr->flows[fidx];
4709 gen = flow->flow_state.generation;
4710 if (WARN_ON(gen == generation &&
4711 flow->flow_state.spsn !=
4714 lpsn = flow->flow_state.lpsn;
4715 lpsn = full_flow_psn(flow, lpsn);
4718 mask_psn(resync_psn)
4720 flow->flow_state.generation =
4722 flow->flow_state.spsn = spsn;
4723 flow->flow_state.lpsn =
4724 flow->flow_state.spsn +
4727 spsn += flow->npkts;
4728 resync_psn += flow->npkts;
4729 trace_hfi1_tid_flow_rcv_tid_ack(qp,
4733 if (++last_acked == qpriv->s_tid_cur + 1)
4735 if (last_acked == qp->s_size)
4737 wqe = rvt_get_swqe_ptr(qp, last_acked);
4738 rptr = wqe_to_tid_req(wqe);
4740 req->cur_seg = req->ack_seg;
4741 qpriv->s_tid_tail = qp->s_acked;
4742 qpriv->s_state = TID_OP(WRITE_REQ);
4743 hfi1_schedule_tid_send(qp);
4746 qpriv->s_retry = qp->s_retry_cnt;
4750 hfi1_stop_tid_retry_timer(qp);
4751 switch ((aeth >> IB_AETH_CREDIT_SHIFT) &
4752 IB_AETH_CREDIT_MASK) {
4753 case 0: /* PSN sequence error */
4754 flow = &req->flows[req->acked_tail];
4755 trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail,
4757 req->r_ack_psn = mask_psn(be32_to_cpu(ohdr->bth[2]));
4758 req->cur_seg = req->ack_seg;
4759 qpriv->s_tid_tail = qp->s_acked;
4760 qpriv->s_state = TID_OP(WRITE_REQ);
4761 qpriv->s_retry = qp->s_retry_cnt;
4762 hfi1_schedule_tid_send(qp);
4775 spin_unlock_irqrestore(&qp->s_lock, flags);
4778 void hfi1_add_tid_retry_timer(struct rvt_qp *qp)
4780 struct hfi1_qp_priv *priv = qp->priv;
4781 struct ib_qp *ibqp = &qp->ibqp;
4782 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
4784 lockdep_assert_held(&qp->s_lock);
4785 if (!(priv->s_flags & HFI1_S_TID_RETRY_TIMER)) {
4786 priv->s_flags |= HFI1_S_TID_RETRY_TIMER;
4787 priv->s_tid_retry_timer.expires = jiffies +
4788 priv->tid_retry_timeout_jiffies + rdi->busy_jiffies;
4789 add_timer(&priv->s_tid_retry_timer);
4793 static void hfi1_mod_tid_retry_timer(struct rvt_qp *qp)
4795 struct hfi1_qp_priv *priv = qp->priv;
4796 struct ib_qp *ibqp = &qp->ibqp;
4797 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
4799 lockdep_assert_held(&qp->s_lock);
4800 priv->s_flags |= HFI1_S_TID_RETRY_TIMER;
4801 mod_timer(&priv->s_tid_retry_timer, jiffies +
4802 priv->tid_retry_timeout_jiffies + rdi->busy_jiffies);
4805 static int hfi1_stop_tid_retry_timer(struct rvt_qp *qp)
4807 struct hfi1_qp_priv *priv = qp->priv;
4810 lockdep_assert_held(&qp->s_lock);
4811 if (priv->s_flags & HFI1_S_TID_RETRY_TIMER) {
4812 rval = del_timer(&priv->s_tid_retry_timer);
4813 priv->s_flags &= ~HFI1_S_TID_RETRY_TIMER;
4818 void hfi1_del_tid_retry_timer(struct rvt_qp *qp)
4820 struct hfi1_qp_priv *priv = qp->priv;
4822 del_timer_sync(&priv->s_tid_retry_timer);
4823 priv->s_flags &= ~HFI1_S_TID_RETRY_TIMER;
4826 static void hfi1_tid_retry_timeout(struct timer_list *t)
4828 struct hfi1_qp_priv *priv = from_timer(priv, t, s_tid_retry_timer);
4829 struct rvt_qp *qp = priv->owner;
4830 struct rvt_swqe *wqe;
4831 unsigned long flags;
4832 struct tid_rdma_request *req;
4834 spin_lock_irqsave(&qp->r_lock, flags);
4835 spin_lock(&qp->s_lock);
4836 trace_hfi1_tid_write_sender_retry_timeout(qp, 0);
4837 if (priv->s_flags & HFI1_S_TID_RETRY_TIMER) {
4838 hfi1_stop_tid_retry_timer(qp);
4839 if (!priv->s_retry) {
4840 trace_hfi1_msg_tid_retry_timeout(/* msg */
4842 "Exhausted retries. Tid retry timeout = ",
4843 (u64)priv->tid_retry_timeout_jiffies);
4845 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
4846 hfi1_trdma_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR);
4847 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
4849 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
4850 req = wqe_to_tid_req(wqe);
4851 trace_hfi1_tid_req_tid_retry_timeout(/* req */
4852 qp, 0, wqe->wr.opcode, wqe->psn, wqe->lpsn, req);
4854 priv->s_flags &= ~RVT_S_WAIT_ACK;
4855 /* Only send one packet (the RESYNC) */
4856 priv->s_flags |= RVT_S_SEND_ONE;
4858 * No additional request shall be made by this QP until
4859 * the RESYNC has been complete.
4861 qp->s_flags |= HFI1_S_WAIT_HALT;
4862 priv->s_state = TID_OP(RESYNC);
4864 hfi1_schedule_tid_send(qp);
4867 spin_unlock(&qp->s_lock);
4868 spin_unlock_irqrestore(&qp->r_lock, flags);
4871 u32 hfi1_build_tid_rdma_resync(struct rvt_qp *qp, struct rvt_swqe *wqe,
4872 struct ib_other_headers *ohdr, u32 *bth1,
4873 u32 *bth2, u16 fidx)
4875 struct hfi1_qp_priv *qpriv = qp->priv;
4876 struct tid_rdma_params *remote;
4877 struct tid_rdma_request *req = wqe_to_tid_req(wqe);
4878 struct tid_rdma_flow *flow = &req->flows[fidx];
4882 remote = rcu_dereference(qpriv->tid_rdma.remote);
4883 KDETH_RESET(ohdr->u.tid_rdma.ack.kdeth1, JKEY, remote->jkey);
4884 ohdr->u.tid_rdma.ack.verbs_qp = cpu_to_be32(qp->remote_qpn);
4888 generation = kern_flow_generation_next(flow->flow_state.generation);
4889 *bth2 = mask_psn((generation << HFI1_KDETH_BTH_SEQ_SHIFT) - 1);
4890 qpriv->s_resync_psn = *bth2;
4891 *bth2 |= IB_BTH_REQ_ACK;
4892 KDETH_RESET(ohdr->u.tid_rdma.ack.kdeth0, KVER, 0x1);
4894 return sizeof(ohdr->u.tid_rdma.resync) / sizeof(u32);
4897 void hfi1_rc_rcv_tid_rdma_resync(struct hfi1_packet *packet)
4899 struct ib_other_headers *ohdr = packet->ohdr;
4900 struct rvt_qp *qp = packet->qp;
4901 struct hfi1_qp_priv *qpriv = qp->priv;
4902 struct hfi1_ctxtdata *rcd = qpriv->rcd;
4903 struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
4904 struct rvt_ack_entry *e;
4905 struct tid_rdma_request *req;
4906 struct tid_rdma_flow *flow;
4907 struct tid_flow_state *fs = &qpriv->flow_state;
4908 u32 psn, generation, idx, gen_next;
4910 unsigned long flags;
4912 fecn = process_ecn(qp, packet);
4913 psn = mask_psn(be32_to_cpu(ohdr->bth[2]));
4915 generation = mask_psn(psn + 1) >> HFI1_KDETH_BTH_SEQ_SHIFT;
4916 spin_lock_irqsave(&qp->s_lock, flags);
4918 gen_next = (fs->generation == KERN_GENERATION_RESERVED) ?
4919 generation : kern_flow_generation_next(fs->generation);
4921 * RESYNC packet contains the "next" generation and can only be
4922 * from the current or previous generations
4924 if (generation != mask_generation(gen_next - 1) &&
4925 generation != gen_next)
4927 /* Already processing a resync */
4931 spin_lock(&rcd->exp_lock);
4932 if (fs->index >= RXE_NUM_TID_FLOWS) {
4934 * If we don't have a flow, save the generation so it can be
4935 * applied when a new flow is allocated
4937 fs->generation = generation;
4939 /* Reprogram the QP flow with new generation */
4940 rcd->flows[fs->index].generation = generation;
4941 fs->generation = kern_setup_hw_flow(rcd, fs->index);
4945 * Disable SW PSN checking since a RESYNC is equivalent to a
4946 * sync point and the flow has/will be reprogrammed
4948 qpriv->s_flags &= ~HFI1_R_TID_SW_PSN;
4949 trace_hfi1_tid_write_rsp_rcv_resync(qp);
4952 * Reset all TID flow information with the new generation.
4953 * This is done for all requests and segments after the
4954 * last received segment
4956 for (idx = qpriv->r_tid_tail; ; idx++) {
4959 if (idx > rvt_size_atomic(&dev->rdi))
4961 e = &qp->s_ack_queue[idx];
4962 if (e->opcode == TID_OP(WRITE_REQ)) {
4963 req = ack_to_tid_req(e);
4964 trace_hfi1_tid_req_rcv_resync(qp, 0, e->opcode, e->psn,
4967 /* start from last unacked segment */
4968 for (flow_idx = req->clear_tail;
4969 CIRC_CNT(req->setup_head, flow_idx,
4971 flow_idx = CIRC_NEXT(flow_idx, MAX_FLOWS)) {
4975 flow = &req->flows[flow_idx];
4976 lpsn = full_flow_psn(flow,
4977 flow->flow_state.lpsn);
4978 next = flow->flow_state.r_next_psn;
4979 flow->npkts = delta_psn(lpsn, next - 1);
4980 flow->flow_state.generation = fs->generation;
4981 flow->flow_state.spsn = fs->psn;
4982 flow->flow_state.lpsn =
4983 flow->flow_state.spsn + flow->npkts - 1;
4984 flow->flow_state.r_next_psn =
4986 flow->flow_state.spsn);
4987 fs->psn += flow->npkts;
4988 trace_hfi1_tid_flow_rcv_resync(qp, flow_idx,
4992 if (idx == qp->s_tail_ack_queue)
4996 spin_unlock(&rcd->exp_lock);
4997 qpriv->resync = true;
4998 /* RESYNC request always gets a TID RDMA ACK. */
4999 qpriv->s_nak_state = 0;
5000 qpriv->s_flags |= RVT_S_ACK_PENDING;
5001 hfi1_schedule_tid_send(qp);
5004 qp->s_flags |= RVT_S_ECN;
5005 spin_unlock_irqrestore(&qp->s_lock, flags);
5009 * Call this function when the last TID RDMA WRITE DATA packet for a request
5012 static void update_tid_tail(struct rvt_qp *qp)
5013 __must_hold(&qp->s_lock)
5015 struct hfi1_qp_priv *priv = qp->priv;
5017 struct rvt_swqe *wqe;
5019 lockdep_assert_held(&qp->s_lock);
5020 /* Can't move beyond s_tid_cur */
5021 if (priv->s_tid_tail == priv->s_tid_cur)
5023 for (i = priv->s_tid_tail + 1; ; i++) {
5024 if (i == qp->s_size)
5027 if (i == priv->s_tid_cur)
5029 wqe = rvt_get_swqe_ptr(qp, i);
5030 if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE)
5033 priv->s_tid_tail = i;
5034 priv->s_state = TID_OP(WRITE_RESP);
5037 int hfi1_make_tid_rdma_pkt(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
5038 __must_hold(&qp->s_lock)
5040 struct hfi1_qp_priv *priv = qp->priv;
5041 struct rvt_swqe *wqe;
5042 u32 bth1 = 0, bth2 = 0, hwords = 5, len, middle = 0;
5043 struct ib_other_headers *ohdr;
5044 struct rvt_sge_state *ss = &qp->s_sge;
5045 struct rvt_ack_entry *e = &qp->s_ack_queue[qp->s_tail_ack_queue];
5046 struct tid_rdma_request *req = ack_to_tid_req(e);
5048 u8 opcode = TID_OP(WRITE_DATA);
5050 lockdep_assert_held(&qp->s_lock);
5051 trace_hfi1_tid_write_sender_make_tid_pkt(qp, 0);
5053 * Prioritize the sending of the requests and responses over the
5054 * sending of the TID RDMA data packets.
5056 if (((atomic_read(&priv->n_tid_requests) < HFI1_TID_RDMA_WRITE_CNT) &&
5057 atomic_read(&priv->n_requests) &&
5058 !(qp->s_flags & (RVT_S_BUSY | RVT_S_WAIT_ACK |
5059 HFI1_S_ANY_WAIT_IO))) ||
5060 (e->opcode == TID_OP(WRITE_REQ) && req->cur_seg < req->alloc_seg &&
5061 !(qp->s_flags & (RVT_S_BUSY | HFI1_S_ANY_WAIT_IO)))) {
5062 struct iowait_work *iowork;
5064 iowork = iowait_get_ib_work(&priv->s_iowait);
5065 ps->s_txreq = get_waiting_verbs_txreq(iowork);
5066 if (ps->s_txreq || hfi1_make_rc_req(qp, ps)) {
5067 priv->s_flags |= HFI1_S_TID_BUSY_SET;
5072 ps->s_txreq = get_txreq(ps->dev, qp);
5076 ohdr = &ps->s_txreq->phdr.hdr.ibh.u.oth;
5078 if ((priv->s_flags & RVT_S_ACK_PENDING) &&
5079 make_tid_rdma_ack(qp, ohdr, ps))
5083 * Bail out if we can't send data.
5084 * Be reminded that this check must been done after the call to
5085 * make_tid_rdma_ack() because the responding QP could be in
5086 * RTR state where it can send TID RDMA ACK, not TID RDMA WRITE DATA.
5088 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK))
5091 if (priv->s_flags & RVT_S_WAIT_ACK)
5094 /* Check whether there is anything to do. */
5095 if (priv->s_tid_tail == HFI1_QP_WQE_INVALID)
5097 wqe = rvt_get_swqe_ptr(qp, priv->s_tid_tail);
5098 req = wqe_to_tid_req(wqe);
5099 trace_hfi1_tid_req_make_tid_pkt(qp, 0, wqe->wr.opcode, wqe->psn,
5101 switch (priv->s_state) {
5102 case TID_OP(WRITE_REQ):
5103 case TID_OP(WRITE_RESP):
5104 priv->tid_ss.sge = wqe->sg_list[0];
5105 priv->tid_ss.sg_list = wqe->sg_list + 1;
5106 priv->tid_ss.num_sge = wqe->wr.num_sge;
5107 priv->tid_ss.total_len = wqe->length;
5109 if (priv->s_state == TID_OP(WRITE_REQ))
5110 hfi1_tid_rdma_restart_req(qp, wqe, &bth2);
5111 priv->s_state = TID_OP(WRITE_DATA);
5114 case TID_OP(WRITE_DATA):
5116 * 1. Check whether TID RDMA WRITE RESP available.
5118 * 2.1 If have more segments and no TID RDMA WRITE RESP,
5119 * set HFI1_S_WAIT_TID_RESP
5120 * 2.2 Return indicating no progress made.
5122 * 3.1 Build TID RDMA WRITE DATA packet.
5123 * 3.2 If last packet in segment:
5124 * 3.2.1 Change KDETH header bits
5125 * 3.2.2 Advance RESP pointers.
5126 * 3.3 Return indicating progress made.
5128 trace_hfi1_sender_make_tid_pkt(qp);
5129 trace_hfi1_tid_write_sender_make_tid_pkt(qp, 0);
5130 wqe = rvt_get_swqe_ptr(qp, priv->s_tid_tail);
5131 req = wqe_to_tid_req(wqe);
5134 if (!req->comp_seg || req->cur_seg == req->comp_seg)
5137 trace_hfi1_tid_req_make_tid_pkt(qp, 0, wqe->wr.opcode,
5138 wqe->psn, wqe->lpsn, req);
5139 last = hfi1_build_tid_rdma_packet(wqe, ohdr, &bth1, &bth2,
5143 /* move pointer to next flow */
5144 req->clear_tail = CIRC_NEXT(req->clear_tail,
5146 if (++req->cur_seg < req->total_segs) {
5147 if (!CIRC_CNT(req->setup_head, req->clear_tail,
5149 qp->s_flags |= HFI1_S_WAIT_TID_RESP;
5151 priv->s_state = TID_OP(WRITE_DATA_LAST);
5152 opcode = TID_OP(WRITE_DATA_LAST);
5154 /* Advance the s_tid_tail now */
5155 update_tid_tail(qp);
5158 hwords += sizeof(ohdr->u.tid_rdma.w_data) / sizeof(u32);
5162 case TID_OP(RESYNC):
5163 trace_hfi1_sender_make_tid_pkt(qp);
5164 /* Use generation from the most recently received response */
5165 wqe = rvt_get_swqe_ptr(qp, priv->s_tid_cur);
5166 req = wqe_to_tid_req(wqe);
5167 /* If no responses for this WQE look at the previous one */
5168 if (!req->comp_seg) {
5169 wqe = rvt_get_swqe_ptr(qp,
5170 (!priv->s_tid_cur ? qp->s_size :
5171 priv->s_tid_cur) - 1);
5172 req = wqe_to_tid_req(wqe);
5174 hwords += hfi1_build_tid_rdma_resync(qp, wqe, ohdr, &bth1,
5176 CIRC_PREV(req->setup_head,
5180 opcode = TID_OP(RESYNC);
5186 if (priv->s_flags & RVT_S_SEND_ONE) {
5187 priv->s_flags &= ~RVT_S_SEND_ONE;
5188 priv->s_flags |= RVT_S_WAIT_ACK;
5189 bth2 |= IB_BTH_REQ_ACK;
5192 ps->s_txreq->hdr_dwords = hwords;
5193 ps->s_txreq->sde = priv->s_sde;
5194 ps->s_txreq->ss = ss;
5195 ps->s_txreq->s_cur_size = len;
5196 hfi1_make_ruc_header(qp, ohdr, (opcode << 24), bth1, bth2,
5200 hfi1_put_txreq(ps->s_txreq);
5203 priv->s_flags &= ~RVT_S_BUSY;
5205 * If we didn't get a txreq, the QP will be woken up later to try
5206 * again, set the flags to the the wake up which work item to wake
5208 * (A better algorithm should be found to do this and generalize the
5209 * sleep/wakeup flags.)
5211 iowait_set_flag(&priv->s_iowait, IOWAIT_PENDING_TID);
5215 static int make_tid_rdma_ack(struct rvt_qp *qp,
5216 struct ib_other_headers *ohdr,
5217 struct hfi1_pkt_state *ps)
5219 struct rvt_ack_entry *e;
5220 struct hfi1_qp_priv *qpriv = qp->priv;
5221 struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
5224 u32 bth1 = 0, bth2 = 0;
5227 struct tid_rdma_request *req, *nreq;
5229 trace_hfi1_tid_write_rsp_make_tid_ack(qp);
5230 /* Don't send an ACK if we aren't supposed to. */
5231 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
5234 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
5237 e = &qp->s_ack_queue[qpriv->r_tid_ack];
5238 req = ack_to_tid_req(e);
5240 * In the RESYNC case, we are exactly one segment past the
5241 * previously sent ack or at the previously sent NAK. So to send
5242 * the resync ack, we go back one segment (which might be part of
5243 * the previous request) and let the do-while loop execute again.
5244 * The advantage of executing the do-while loop is that any data
5245 * received after the previous ack is automatically acked in the
5246 * RESYNC ack. It turns out that for the do-while loop we only need
5247 * to pull back qpriv->r_tid_ack, not the segment
5248 * indices/counters. The scheme works even if the previous request
5249 * was not a TID WRITE request.
5251 if (qpriv->resync) {
5252 if (!req->ack_seg || req->ack_seg == req->total_segs)
5253 qpriv->r_tid_ack = !qpriv->r_tid_ack ?
5254 rvt_size_atomic(&dev->rdi) :
5255 qpriv->r_tid_ack - 1;
5256 e = &qp->s_ack_queue[qpriv->r_tid_ack];
5257 req = ack_to_tid_req(e);
5260 trace_hfi1_rsp_make_tid_ack(qp, e->psn);
5261 trace_hfi1_tid_req_make_tid_ack(qp, 0, e->opcode, e->psn, e->lpsn,
5264 * If we've sent all the ACKs that we can, we are done
5265 * until we get more segments...
5267 if (!qpriv->s_nak_state && !qpriv->resync &&
5268 req->ack_seg == req->comp_seg)
5273 * To deal with coalesced ACKs, the acked_tail pointer
5274 * into the flow array is used. The distance between it
5275 * and the clear_tail is the number of flows that are
5279 /* Get up-to-date value */
5280 CIRC_CNT(req->clear_tail, req->acked_tail,
5282 /* Advance acked index */
5283 req->acked_tail = req->clear_tail;
5286 * req->clear_tail points to the segment currently being
5287 * received. So, when sending an ACK, the previous
5288 * segment is being ACK'ed.
5290 flow = CIRC_PREV(req->acked_tail, MAX_FLOWS);
5291 if (req->ack_seg != req->total_segs)
5293 req->state = TID_REQUEST_COMPLETE;
5295 next = qpriv->r_tid_ack + 1;
5296 if (next > rvt_size_atomic(&dev->rdi))
5298 qpriv->r_tid_ack = next;
5299 if (qp->s_ack_queue[next].opcode != TID_OP(WRITE_REQ))
5301 nreq = ack_to_tid_req(&qp->s_ack_queue[next]);
5302 if (!nreq->comp_seg || nreq->ack_seg == nreq->comp_seg)
5305 /* Move to the next ack entry now */
5306 e = &qp->s_ack_queue[qpriv->r_tid_ack];
5307 req = ack_to_tid_req(e);
5311 * At this point qpriv->r_tid_ack == qpriv->r_tid_tail but e and
5312 * req could be pointing at the previous ack queue entry
5314 if (qpriv->s_nak_state ||
5316 !hfi1_tid_rdma_is_resync_psn(qpriv->r_next_psn_kdeth - 1) &&
5317 (cmp_psn(qpriv->r_next_psn_kdeth - 1,
5318 full_flow_psn(&req->flows[flow],
5319 req->flows[flow].flow_state.lpsn)) > 0))) {
5321 * A NAK will implicitly acknowledge all previous TID RDMA
5322 * requests. Therefore, we NAK with the req->acked_tail
5323 * segment for the request at qpriv->r_tid_ack (same at
5324 * this point as the req->clear_tail segment for the
5325 * qpriv->r_tid_tail request)
5327 e = &qp->s_ack_queue[qpriv->r_tid_ack];
5328 req = ack_to_tid_req(e);
5329 flow = req->acked_tail;
5330 } else if (req->ack_seg == req->total_segs &&
5331 qpriv->s_flags & HFI1_R_TID_WAIT_INTERLCK)
5332 qpriv->s_flags &= ~HFI1_R_TID_WAIT_INTERLCK;
5334 trace_hfi1_tid_write_rsp_make_tid_ack(qp);
5335 trace_hfi1_tid_req_make_tid_ack(qp, 0, e->opcode, e->psn, e->lpsn,
5337 hwords += hfi1_build_tid_rdma_write_ack(qp, e, ohdr, flow, &bth1,
5340 qpriv->s_flags &= ~RVT_S_ACK_PENDING;
5341 ps->s_txreq->hdr_dwords = hwords;
5342 ps->s_txreq->sde = qpriv->s_sde;
5343 ps->s_txreq->s_cur_size = len;
5344 ps->s_txreq->ss = NULL;
5345 hfi1_make_ruc_header(qp, ohdr, (TID_OP(ACK) << 24), bth1, bth2, middle,
5347 ps->s_txreq->txreq.flags |= SDMA_TXREQ_F_VIP;
5351 * Ensure s_rdma_ack_cnt changes are committed prior to resetting
5352 * RVT_S_RESP_PENDING
5355 qpriv->s_flags &= ~RVT_S_ACK_PENDING;
5359 static int hfi1_send_tid_ok(struct rvt_qp *qp)
5361 struct hfi1_qp_priv *priv = qp->priv;
5363 return !(priv->s_flags & RVT_S_BUSY ||
5364 qp->s_flags & HFI1_S_ANY_WAIT_IO) &&
5365 (verbs_txreq_queued(iowait_get_tid_work(&priv->s_iowait)) ||
5366 (priv->s_flags & RVT_S_RESP_PENDING) ||
5367 !(qp->s_flags & HFI1_S_ANY_TID_WAIT_SEND));
5370 void _hfi1_do_tid_send(struct work_struct *work)
5372 struct iowait_work *w = container_of(work, struct iowait_work, iowork);
5373 struct rvt_qp *qp = iowait_to_qp(w->iow);
5375 hfi1_do_tid_send(qp);
5378 static void hfi1_do_tid_send(struct rvt_qp *qp)
5380 struct hfi1_pkt_state ps;
5381 struct hfi1_qp_priv *priv = qp->priv;
5383 ps.dev = to_idev(qp->ibqp.device);
5384 ps.ibp = to_iport(qp->ibqp.device, qp->port_num);
5385 ps.ppd = ppd_from_ibp(ps.ibp);
5386 ps.wait = iowait_get_tid_work(&priv->s_iowait);
5387 ps.in_thread = false;
5388 ps.timeout_int = qp->timeout_jiffies / 8;
5390 trace_hfi1_rc_do_tid_send(qp, false);
5391 spin_lock_irqsave(&qp->s_lock, ps.flags);
5393 /* Return if we are already busy processing a work request. */
5394 if (!hfi1_send_tid_ok(qp)) {
5395 if (qp->s_flags & HFI1_S_ANY_WAIT_IO)
5396 iowait_set_flag(&priv->s_iowait, IOWAIT_PENDING_TID);
5397 spin_unlock_irqrestore(&qp->s_lock, ps.flags);
5401 priv->s_flags |= RVT_S_BUSY;
5403 ps.timeout = jiffies + ps.timeout_int;
5404 ps.cpu = priv->s_sde ? priv->s_sde->cpu :
5405 cpumask_first(cpumask_of_node(ps.ppd->dd->node));
5406 ps.pkts_sent = false;
5408 /* insure a pre-built packet is handled */
5409 ps.s_txreq = get_waiting_verbs_txreq(ps.wait);
5411 /* Check for a constructed packet to be sent. */
5413 if (priv->s_flags & HFI1_S_TID_BUSY_SET) {
5414 qp->s_flags |= RVT_S_BUSY;
5415 ps.wait = iowait_get_ib_work(&priv->s_iowait);
5417 spin_unlock_irqrestore(&qp->s_lock, ps.flags);
5420 * If the packet cannot be sent now, return and
5421 * the send tasklet will be woken up later.
5423 if (hfi1_verbs_send(qp, &ps))
5426 /* allow other tasks to run */
5427 if (hfi1_schedule_send_yield(qp, &ps, true))
5430 spin_lock_irqsave(&qp->s_lock, ps.flags);
5431 if (priv->s_flags & HFI1_S_TID_BUSY_SET) {
5432 qp->s_flags &= ~RVT_S_BUSY;
5433 priv->s_flags &= ~HFI1_S_TID_BUSY_SET;
5434 ps.wait = iowait_get_tid_work(&priv->s_iowait);
5435 if (iowait_flag_set(&priv->s_iowait,
5437 hfi1_schedule_send(qp);
5440 } while (hfi1_make_tid_rdma_pkt(qp, &ps));
5441 iowait_starve_clear(ps.pkts_sent, &priv->s_iowait);
5442 spin_unlock_irqrestore(&qp->s_lock, ps.flags);
5445 static bool _hfi1_schedule_tid_send(struct rvt_qp *qp)
5447 struct hfi1_qp_priv *priv = qp->priv;
5448 struct hfi1_ibport *ibp =
5449 to_iport(qp->ibqp.device, qp->port_num);
5450 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
5451 struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
5453 return iowait_tid_schedule(&priv->s_iowait, ppd->hfi1_wq,
5456 cpumask_first(cpumask_of_node(dd->node)));
5460 * hfi1_schedule_tid_send - schedule progress on TID RDMA state machine
5463 * This schedules qp progress on the TID RDMA state machine. Caller
5464 * should hold the s_lock.
5465 * Unlike hfi1_schedule_send(), this cannot use hfi1_send_ok() because
5466 * the two state machines can step on each other with respect to the
5468 * Therefore, a modified test is used.
5469 * @return true if the second leg is scheduled;
5470 * false if the second leg is not scheduled.
5472 bool hfi1_schedule_tid_send(struct rvt_qp *qp)
5474 lockdep_assert_held(&qp->s_lock);
5475 if (hfi1_send_tid_ok(qp)) {
5477 * The following call returns true if the qp is not on the
5478 * queue and false if the qp is already on the queue before
5479 * this call. Either way, the qp will be on the queue when the
5482 _hfi1_schedule_tid_send(qp);
5485 if (qp->s_flags & HFI1_S_ANY_WAIT_IO)
5486 iowait_set_flag(&((struct hfi1_qp_priv *)qp->priv)->s_iowait,
5487 IOWAIT_PENDING_TID);
5491 bool hfi1_tid_rdma_ack_interlock(struct rvt_qp *qp, struct rvt_ack_entry *e)
5493 struct rvt_ack_entry *prev;
5494 struct tid_rdma_request *req;
5495 struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
5496 struct hfi1_qp_priv *priv = qp->priv;
5499 s_prev = qp->s_tail_ack_queue == 0 ? rvt_size_atomic(&dev->rdi) :
5500 (qp->s_tail_ack_queue - 1);
5501 prev = &qp->s_ack_queue[s_prev];
5503 if ((e->opcode == TID_OP(READ_REQ) ||
5504 e->opcode == OP(RDMA_READ_REQUEST)) &&
5505 prev->opcode == TID_OP(WRITE_REQ)) {
5506 req = ack_to_tid_req(prev);
5507 if (req->ack_seg != req->total_segs) {
5508 priv->s_flags |= HFI1_R_TID_WAIT_INTERLCK;
5515 static u32 read_r_next_psn(struct hfi1_devdata *dd, u8 ctxt, u8 fidx)
5520 * The only sane way to get the amount of
5521 * progress is to read the HW flow state.
5523 reg = read_uctxt_csr(dd, ctxt, RCV_TID_FLOW_TABLE + (8 * fidx));
5524 return mask_psn(reg);
5527 static void tid_rdma_rcv_err(struct hfi1_packet *packet,
5528 struct ib_other_headers *ohdr,
5529 struct rvt_qp *qp, u32 psn, int diff, bool fecn)
5531 unsigned long flags;
5533 tid_rdma_rcv_error(packet, ohdr, qp, psn, diff);
5535 spin_lock_irqsave(&qp->s_lock, flags);
5536 qp->s_flags |= RVT_S_ECN;
5537 spin_unlock_irqrestore(&qp->s_lock, flags);
5541 static void update_r_next_psn_fecn(struct hfi1_packet *packet,
5542 struct hfi1_qp_priv *priv,
5543 struct hfi1_ctxtdata *rcd,
5544 struct tid_rdma_flow *flow,
5548 * If a start/middle packet is delivered here due to
5549 * RSM rule and FECN, we need to update the r_next_psn.
5551 if (fecn && packet->etype == RHF_RCV_TYPE_EAGER &&
5552 !(priv->s_flags & HFI1_R_TID_SW_PSN)) {
5553 struct hfi1_devdata *dd = rcd->dd;
5555 flow->flow_state.r_next_psn =
5556 read_r_next_psn(dd, rcd->ctxt, flow->idx);