2 * Copyright(c) 2016, 2017 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <linux/hash.h>
49 #include <linux/bitops.h>
50 #include <linux/lockdep.h>
51 #include <linux/vmalloc.h>
52 #include <linux/slab.h>
53 #include <rdma/ib_verbs.h>
54 #include <rdma/ib_hdrs.h>
55 #include <rdma/opa_addr.h>
60 static void rvt_rc_timeout(struct timer_list *t);
63 * Convert the AETH RNR timeout code into the number of microseconds.
65 static const u32 ib_rvt_rnr_table[32] = {
66 655360, /* 00: 655.36 */
86 10240, /* 14: 10.24 */
87 15360, /* 15: 15.36 */
88 20480, /* 16: 20.48 */
89 30720, /* 17: 30.72 */
90 40960, /* 18: 40.96 */
91 61440, /* 19: 61.44 */
92 81920, /* 1A: 81.92 */
93 122880, /* 1B: 122.88 */
94 163840, /* 1C: 163.84 */
95 245760, /* 1D: 245.76 */
96 327680, /* 1E: 327.68 */
97 491520 /* 1F: 491.52 */
101 * Note that it is OK to post send work requests in the SQE and ERR
102 * states; rvt_do_send() will process them and generate error
103 * completions as per IB 1.2 C10-96.
105 const int ib_rvt_state_ops[IB_QPS_ERR + 1] = {
107 [IB_QPS_INIT] = RVT_POST_RECV_OK,
108 [IB_QPS_RTR] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK,
109 [IB_QPS_RTS] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
110 RVT_POST_SEND_OK | RVT_PROCESS_SEND_OK |
111 RVT_PROCESS_NEXT_SEND_OK,
112 [IB_QPS_SQD] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
113 RVT_POST_SEND_OK | RVT_PROCESS_SEND_OK,
114 [IB_QPS_SQE] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
115 RVT_POST_SEND_OK | RVT_FLUSH_SEND,
116 [IB_QPS_ERR] = RVT_POST_RECV_OK | RVT_FLUSH_RECV |
117 RVT_POST_SEND_OK | RVT_FLUSH_SEND,
119 EXPORT_SYMBOL(ib_rvt_state_ops);
121 /* platform specific: return the last level cache (llc) size, in KiB */
122 static int rvt_wss_llc_size(void)
124 /* assume that the boot CPU value is universal for all CPUs */
125 return boot_cpu_data.x86_cache_size;
128 /* platform specific: cacheless copy */
129 static void cacheless_memcpy(void *dst, void *src, size_t n)
132 * Use the only available X64 cacheless copy. Add a __user cast
133 * to quiet sparse. The src agument is already in the kernel so
134 * there are no security issues. The extra fault recovery machinery
137 __copy_user_nocache(dst, (void __user *)src, n, 0);
140 void rvt_wss_exit(struct rvt_dev_info *rdi)
142 struct rvt_wss *wss = rdi->wss;
147 /* coded to handle partially initialized and repeat callers */
155 * rvt_wss_init - Init wss data structures
157 * Return: 0 on success
159 int rvt_wss_init(struct rvt_dev_info *rdi)
161 unsigned int sge_copy_mode = rdi->dparms.sge_copy_mode;
162 unsigned int wss_threshold = rdi->dparms.wss_threshold;
163 unsigned int wss_clean_period = rdi->dparms.wss_clean_period;
169 int node = rdi->dparms.node;
171 if (sge_copy_mode != RVT_SGE_COPY_ADAPTIVE) {
176 rdi->wss = kzalloc_node(sizeof(*rdi->wss), GFP_KERNEL, node);
181 /* check for a valid percent range - default to 80 if none or invalid */
182 if (wss_threshold < 1 || wss_threshold > 100)
185 /* reject a wildly large period */
186 if (wss_clean_period > 1000000)
187 wss_clean_period = 256;
189 /* reject a zero period */
190 if (wss_clean_period == 0)
191 wss_clean_period = 1;
194 * Calculate the table size - the next power of 2 larger than the
195 * LLC size. LLC size is in KiB.
197 llc_size = rvt_wss_llc_size() * 1024;
198 table_size = roundup_pow_of_two(llc_size);
200 /* one bit per page in rounded up table */
201 llc_bits = llc_size / PAGE_SIZE;
202 table_bits = table_size / PAGE_SIZE;
203 wss->pages_mask = table_bits - 1;
204 wss->num_entries = table_bits / BITS_PER_LONG;
206 wss->threshold = (llc_bits * wss_threshold) / 100;
207 if (wss->threshold == 0)
210 wss->clean_period = wss_clean_period;
211 atomic_set(&wss->clean_counter, wss_clean_period);
213 wss->entries = kcalloc_node(wss->num_entries, sizeof(*wss->entries),
224 * Advance the clean counter. When the clean period has expired,
227 * This is implemented in atomics to avoid locking. Because multiple
228 * variables are involved, it can be racy which can lead to slightly
229 * inaccurate information. Since this is only a heuristic, this is
230 * OK. Any innaccuracies will clean themselves out as the counter
231 * advances. That said, it is unlikely the entry clean operation will
232 * race - the next possible racer will not start until the next clean
235 * The clean counter is implemented as a decrement to zero. When zero
236 * is reached an entry is cleaned.
238 static void wss_advance_clean_counter(struct rvt_wss *wss)
244 /* become the cleaner if we decrement the counter to zero */
245 if (atomic_dec_and_test(&wss->clean_counter)) {
247 * Set, not add, the clean period. This avoids an issue
248 * where the counter could decrement below the clean period.
249 * Doing a set can result in lost decrements, slowing the
250 * clean advance. Since this a heuristic, this possible
253 * An alternative is to loop, advancing the counter by a
254 * clean period until the result is > 0. However, this could
255 * lead to several threads keeping another in the clean loop.
256 * This could be mitigated by limiting the number of times
257 * we stay in the loop.
259 atomic_set(&wss->clean_counter, wss->clean_period);
262 * Uniquely grab the entry to clean and move to next.
263 * The current entry is always the lower bits of
264 * wss.clean_entry. The table size, wss.num_entries,
265 * is always a power-of-2.
267 entry = (atomic_inc_return(&wss->clean_entry) - 1)
268 & (wss->num_entries - 1);
270 /* clear the entry and count the bits */
271 bits = xchg(&wss->entries[entry], 0);
272 weight = hweight64((u64)bits);
273 /* only adjust the contended total count if needed */
275 atomic_sub(weight, &wss->total_count);
280 * Insert the given address into the working set array.
282 static void wss_insert(struct rvt_wss *wss, void *address)
284 u32 page = ((unsigned long)address >> PAGE_SHIFT) & wss->pages_mask;
285 u32 entry = page / BITS_PER_LONG; /* assumes this ends up a shift */
286 u32 nr = page & (BITS_PER_LONG - 1);
288 if (!test_and_set_bit(nr, &wss->entries[entry]))
289 atomic_inc(&wss->total_count);
291 wss_advance_clean_counter(wss);
295 * Is the working set larger than the threshold?
297 static inline bool wss_exceeds_threshold(struct rvt_wss *wss)
299 return atomic_read(&wss->total_count) >= wss->threshold;
302 static void get_map_page(struct rvt_qpn_table *qpt,
303 struct rvt_qpn_map *map)
305 unsigned long page = get_zeroed_page(GFP_KERNEL);
308 * Free the page if someone raced with us installing it.
311 spin_lock(&qpt->lock);
315 map->page = (void *)page;
316 spin_unlock(&qpt->lock);
320 * init_qpn_table - initialize the QP number table for a device
321 * @qpt: the QPN table
323 static int init_qpn_table(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt)
326 struct rvt_qpn_map *map;
329 if (!(rdi->dparms.qpn_res_end >= rdi->dparms.qpn_res_start))
332 spin_lock_init(&qpt->lock);
334 qpt->last = rdi->dparms.qpn_start;
335 qpt->incr = rdi->dparms.qpn_inc << rdi->dparms.qos_shift;
338 * Drivers may want some QPs beyond what we need for verbs let them use
339 * our qpn table. No need for two. Lets go ahead and mark the bitmaps
340 * for those. The reserved range must be *after* the range which verbs
344 /* Figure out number of bit maps needed before reserved range */
345 qpt->nmaps = rdi->dparms.qpn_res_start / RVT_BITS_PER_PAGE;
347 /* This should always be zero */
348 offset = rdi->dparms.qpn_res_start & RVT_BITS_PER_PAGE_MASK;
350 /* Starting with the first reserved bit map */
351 map = &qpt->map[qpt->nmaps];
353 rvt_pr_info(rdi, "Reserving QPNs from 0x%x to 0x%x for non-verbs use\n",
354 rdi->dparms.qpn_res_start, rdi->dparms.qpn_res_end);
355 for (i = rdi->dparms.qpn_res_start; i <= rdi->dparms.qpn_res_end; i++) {
357 get_map_page(qpt, map);
363 set_bit(offset, map->page);
365 if (offset == RVT_BITS_PER_PAGE) {
376 * free_qpn_table - free the QP number table for a device
377 * @qpt: the QPN table
379 static void free_qpn_table(struct rvt_qpn_table *qpt)
383 for (i = 0; i < ARRAY_SIZE(qpt->map); i++)
384 free_page((unsigned long)qpt->map[i].page);
388 * rvt_driver_qp_init - Init driver qp resources
389 * @rdi: rvt dev strucutre
391 * Return: 0 on success
393 int rvt_driver_qp_init(struct rvt_dev_info *rdi)
398 if (!rdi->dparms.qp_table_size)
402 * If driver is not doing any QP allocation then make sure it is
403 * providing the necessary QP functions.
405 if (!rdi->driver_f.free_all_qps ||
406 !rdi->driver_f.qp_priv_alloc ||
407 !rdi->driver_f.qp_priv_free ||
408 !rdi->driver_f.notify_qp_reset ||
409 !rdi->driver_f.notify_restart_rc)
412 /* allocate parent object */
413 rdi->qp_dev = kzalloc_node(sizeof(*rdi->qp_dev), GFP_KERNEL,
418 /* allocate hash table */
419 rdi->qp_dev->qp_table_size = rdi->dparms.qp_table_size;
420 rdi->qp_dev->qp_table_bits = ilog2(rdi->dparms.qp_table_size);
421 rdi->qp_dev->qp_table =
422 kmalloc_array_node(rdi->qp_dev->qp_table_size,
423 sizeof(*rdi->qp_dev->qp_table),
424 GFP_KERNEL, rdi->dparms.node);
425 if (!rdi->qp_dev->qp_table)
428 for (i = 0; i < rdi->qp_dev->qp_table_size; i++)
429 RCU_INIT_POINTER(rdi->qp_dev->qp_table[i], NULL);
431 spin_lock_init(&rdi->qp_dev->qpt_lock);
433 /* initialize qpn map */
434 if (init_qpn_table(rdi, &rdi->qp_dev->qpn_table))
437 spin_lock_init(&rdi->n_qps_lock);
442 kfree(rdi->qp_dev->qp_table);
443 free_qpn_table(&rdi->qp_dev->qpn_table);
452 * free_all_qps - check for QPs still in use
453 * @rdi: rvt device info structure
455 * There should not be any QPs still in use.
456 * Free memory for table.
458 static unsigned rvt_free_all_qps(struct rvt_dev_info *rdi)
462 unsigned n, qp_inuse = 0;
463 spinlock_t *ql; /* work around too long line below */
465 if (rdi->driver_f.free_all_qps)
466 qp_inuse = rdi->driver_f.free_all_qps(rdi);
468 qp_inuse += rvt_mcast_tree_empty(rdi);
473 ql = &rdi->qp_dev->qpt_lock;
474 spin_lock_irqsave(ql, flags);
475 for (n = 0; n < rdi->qp_dev->qp_table_size; n++) {
476 qp = rcu_dereference_protected(rdi->qp_dev->qp_table[n],
477 lockdep_is_held(ql));
478 RCU_INIT_POINTER(rdi->qp_dev->qp_table[n], NULL);
480 for (; qp; qp = rcu_dereference_protected(qp->next,
481 lockdep_is_held(ql)))
484 spin_unlock_irqrestore(ql, flags);
490 * rvt_qp_exit - clean up qps on device exit
491 * @rdi: rvt dev structure
493 * Check for qp leaks and free resources.
495 void rvt_qp_exit(struct rvt_dev_info *rdi)
497 u32 qps_inuse = rvt_free_all_qps(rdi);
500 rvt_pr_err(rdi, "QP memory leak! %u still in use\n",
505 kfree(rdi->qp_dev->qp_table);
506 free_qpn_table(&rdi->qp_dev->qpn_table);
510 static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
511 struct rvt_qpn_map *map, unsigned off)
513 return (map - qpt->map) * RVT_BITS_PER_PAGE + off;
517 * alloc_qpn - Allocate the next available qpn or zero/one for QP type
518 * IB_QPT_SMI/IB_QPT_GSI
519 * @rdi: rvt device info structure
520 * @qpt: queue pair number table pointer
521 * @port_num: IB port number, 1 based, comes from core
523 * Return: The queue pair number
525 static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
526 enum ib_qp_type type, u8 port_num)
528 u32 i, offset, max_scan, qpn;
529 struct rvt_qpn_map *map;
532 if (rdi->driver_f.alloc_qpn)
533 return rdi->driver_f.alloc_qpn(rdi, qpt, type, port_num);
535 if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
538 ret = type == IB_QPT_GSI;
539 n = 1 << (ret + 2 * (port_num - 1));
540 spin_lock(&qpt->lock);
545 spin_unlock(&qpt->lock);
549 qpn = qpt->last + qpt->incr;
550 if (qpn >= RVT_QPN_MAX)
551 qpn = qpt->incr | ((qpt->last & 1) ^ 1);
552 /* offset carries bit 0 */
553 offset = qpn & RVT_BITS_PER_PAGE_MASK;
554 map = &qpt->map[qpn / RVT_BITS_PER_PAGE];
555 max_scan = qpt->nmaps - !offset;
557 if (unlikely(!map->page)) {
558 get_map_page(qpt, map);
559 if (unlikely(!map->page))
563 if (!test_and_set_bit(offset, map->page)) {
570 * This qpn might be bogus if offset >= BITS_PER_PAGE.
571 * That is OK. It gets re-assigned below
573 qpn = mk_qpn(qpt, map, offset);
574 } while (offset < RVT_BITS_PER_PAGE && qpn < RVT_QPN_MAX);
576 * In order to keep the number of pages allocated to a
577 * minimum, we scan the all existing pages before increasing
578 * the size of the bitmap table.
580 if (++i > max_scan) {
581 if (qpt->nmaps == RVT_QPNMAP_ENTRIES)
583 map = &qpt->map[qpt->nmaps++];
584 /* start at incr with current bit 0 */
585 offset = qpt->incr | (offset & 1);
586 } else if (map < &qpt->map[qpt->nmaps]) {
588 /* start at incr with current bit 0 */
589 offset = qpt->incr | (offset & 1);
592 /* wrap to first map page, invert bit 0 */
593 offset = qpt->incr | ((offset & 1) ^ 1);
595 /* there can be no set bits in low-order QoS bits */
596 WARN_ON(offset & (BIT(rdi->dparms.qos_shift) - 1));
597 qpn = mk_qpn(qpt, map, offset);
607 * rvt_clear_mr_refs - Drop help mr refs
608 * @qp: rvt qp data structure
609 * @clr_sends: If shoudl clear send side or not
611 static void rvt_clear_mr_refs(struct rvt_qp *qp, int clr_sends)
614 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
616 if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags))
617 rvt_put_ss(&qp->s_rdma_read_sge);
619 rvt_put_ss(&qp->r_sge);
622 while (qp->s_last != qp->s_head) {
623 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_last);
627 if (qp->ibqp.qp_type == IB_QPT_UD ||
628 qp->ibqp.qp_type == IB_QPT_SMI ||
629 qp->ibqp.qp_type == IB_QPT_GSI)
630 atomic_dec(&ibah_to_rvtah(
631 wqe->ud_wr.ah)->refcount);
632 if (++qp->s_last >= qp->s_size)
634 smp_wmb(); /* see qp_set_savail */
637 rvt_put_mr(qp->s_rdma_mr);
638 qp->s_rdma_mr = NULL;
642 for (n = 0; qp->s_ack_queue && n < rvt_max_atomic(rdi); n++) {
643 struct rvt_ack_entry *e = &qp->s_ack_queue[n];
645 if (e->rdma_sge.mr) {
646 rvt_put_mr(e->rdma_sge.mr);
647 e->rdma_sge.mr = NULL;
653 * rvt_swqe_has_lkey - return true if lkey is used by swqe
654 * @wqe - the send wqe
657 * Test the swqe for using lkey
659 static bool rvt_swqe_has_lkey(struct rvt_swqe *wqe, u32 lkey)
663 for (i = 0; i < wqe->wr.num_sge; i++) {
664 struct rvt_sge *sge = &wqe->sg_list[i];
666 if (rvt_mr_has_lkey(sge->mr, lkey))
673 * rvt_qp_sends_has_lkey - return true is qp sends use lkey
677 static bool rvt_qp_sends_has_lkey(struct rvt_qp *qp, u32 lkey)
679 u32 s_last = qp->s_last;
681 while (s_last != qp->s_head) {
682 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, s_last);
684 if (rvt_swqe_has_lkey(wqe, lkey))
687 if (++s_last >= qp->s_size)
691 if (rvt_mr_has_lkey(qp->s_rdma_mr, lkey))
697 * rvt_qp_acks_has_lkey - return true if acks have lkey
701 static bool rvt_qp_acks_has_lkey(struct rvt_qp *qp, u32 lkey)
704 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
706 for (i = 0; qp->s_ack_queue && i < rvt_max_atomic(rdi); i++) {
707 struct rvt_ack_entry *e = &qp->s_ack_queue[i];
709 if (rvt_mr_has_lkey(e->rdma_sge.mr, lkey))
716 * rvt_qp_mr_clean - clean up remote ops for lkey
718 * @lkey - the lkey that is being de-registered
720 * This routine checks if the lkey is being used by
723 * If so, the qp is put into an error state to elminate
724 * any references from the qp.
726 void rvt_qp_mr_clean(struct rvt_qp *qp, u32 lkey)
728 bool lastwqe = false;
730 if (qp->ibqp.qp_type == IB_QPT_SMI ||
731 qp->ibqp.qp_type == IB_QPT_GSI)
732 /* avoid special QPs */
734 spin_lock_irq(&qp->r_lock);
735 spin_lock(&qp->s_hlock);
736 spin_lock(&qp->s_lock);
738 if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET)
741 if (rvt_ss_has_lkey(&qp->r_sge, lkey) ||
742 rvt_qp_sends_has_lkey(qp, lkey) ||
743 rvt_qp_acks_has_lkey(qp, lkey))
744 lastwqe = rvt_error_qp(qp, IB_WC_LOC_PROT_ERR);
746 spin_unlock(&qp->s_lock);
747 spin_unlock(&qp->s_hlock);
748 spin_unlock_irq(&qp->r_lock);
752 ev.device = qp->ibqp.device;
753 ev.element.qp = &qp->ibqp;
754 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
755 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
760 * rvt_remove_qp - remove qp form table
761 * @rdi: rvt dev struct
764 * Remove the QP from the table so it can't be found asynchronously by
765 * the receive routine.
767 static void rvt_remove_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp)
769 struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
770 u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits);
774 spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags);
776 if (rcu_dereference_protected(rvp->qp[0],
777 lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) {
778 RCU_INIT_POINTER(rvp->qp[0], NULL);
779 } else if (rcu_dereference_protected(rvp->qp[1],
780 lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) {
781 RCU_INIT_POINTER(rvp->qp[1], NULL);
784 struct rvt_qp __rcu **qpp;
787 qpp = &rdi->qp_dev->qp_table[n];
788 for (; (q = rcu_dereference_protected(*qpp,
789 lockdep_is_held(&rdi->qp_dev->qpt_lock))) != NULL;
792 RCU_INIT_POINTER(*qpp,
793 rcu_dereference_protected(qp->next,
794 lockdep_is_held(&rdi->qp_dev->qpt_lock)));
796 trace_rvt_qpremove(qp, n);
802 spin_unlock_irqrestore(&rdi->qp_dev->qpt_lock, flags);
810 * rvt_init_qp - initialize the QP state to the reset state
811 * @qp: the QP to init or reinit
814 * This function is called from both rvt_create_qp() and
815 * rvt_reset_qp(). The difference is that the reset
816 * patch the necessary locks to protect against concurent
819 static void rvt_init_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
820 enum ib_qp_type type)
824 qp->qp_access_flags = 0;
825 qp->s_flags &= RVT_S_SIGNAL_REQ_WR;
831 qp->s_sending_psn = 0;
832 qp->s_sending_hpsn = 0;
836 if (type == IB_QPT_RC) {
837 qp->s_state = IB_OPCODE_RC_SEND_LAST;
838 qp->r_state = IB_OPCODE_RC_SEND_LAST;
840 qp->s_state = IB_OPCODE_UC_SEND_LAST;
841 qp->r_state = IB_OPCODE_UC_SEND_LAST;
843 qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
854 qp->s_mig_state = IB_MIG_MIGRATED;
855 qp->r_head_ack_queue = 0;
856 qp->s_tail_ack_queue = 0;
857 qp->s_num_rd_atomic = 0;
859 qp->r_rq.wq->head = 0;
860 qp->r_rq.wq->tail = 0;
862 qp->r_sge.num_sge = 0;
863 atomic_set(&qp->s_reserved_used, 0);
867 * rvt_reset_qp - initialize the QP state to the reset state
868 * @qp: the QP to reset
871 * r_lock, s_hlock, and s_lock are required to be held by the caller
873 static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
874 enum ib_qp_type type)
875 __must_hold(&qp->s_lock)
876 __must_hold(&qp->s_hlock)
877 __must_hold(&qp->r_lock)
879 lockdep_assert_held(&qp->r_lock);
880 lockdep_assert_held(&qp->s_hlock);
881 lockdep_assert_held(&qp->s_lock);
882 if (qp->state != IB_QPS_RESET) {
883 qp->state = IB_QPS_RESET;
885 /* Let drivers flush their waitlist */
886 rdi->driver_f.flush_qp_waiters(qp);
887 rvt_stop_rc_timers(qp);
888 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_ANY_WAIT);
889 spin_unlock(&qp->s_lock);
890 spin_unlock(&qp->s_hlock);
891 spin_unlock_irq(&qp->r_lock);
893 /* Stop the send queue and the retry timer */
894 rdi->driver_f.stop_send_queue(qp);
895 rvt_del_timers_sync(qp);
896 /* Wait for things to stop */
897 rdi->driver_f.quiesce_qp(qp);
899 /* take qp out the hash and wait for it to be unused */
900 rvt_remove_qp(rdi, qp);
902 /* grab the lock b/c it was locked at call time */
903 spin_lock_irq(&qp->r_lock);
904 spin_lock(&qp->s_hlock);
905 spin_lock(&qp->s_lock);
907 rvt_clear_mr_refs(qp, 1);
909 * Let the driver do any tear down or re-init it needs to for
910 * a qp that has been reset
912 rdi->driver_f.notify_qp_reset(qp);
914 rvt_init_qp(rdi, qp, type);
915 lockdep_assert_held(&qp->r_lock);
916 lockdep_assert_held(&qp->s_hlock);
917 lockdep_assert_held(&qp->s_lock);
920 /** rvt_free_qpn - Free a qpn from the bit map
922 * @qpn: queue pair number to free
924 static void rvt_free_qpn(struct rvt_qpn_table *qpt, u32 qpn)
926 struct rvt_qpn_map *map;
928 map = qpt->map + (qpn & RVT_QPN_MASK) / RVT_BITS_PER_PAGE;
930 clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page);
934 * rvt_create_qp - create a queue pair for a device
935 * @ibpd: the protection domain who's device we create the queue pair for
936 * @init_attr: the attributes of the queue pair
937 * @udata: user data for libibverbs.so
939 * Queue pair creation is mostly an rvt issue. However, drivers have their own
940 * unique idea of what queue pair numbers mean. For instance there is a reserved
943 * Return: the queue pair on success, otherwise returns an errno.
945 * Called by the ib_create_qp() core verbs function.
947 struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
948 struct ib_qp_init_attr *init_attr,
949 struct ib_udata *udata)
953 struct rvt_swqe *swq = NULL;
956 struct ib_qp *ret = ERR_PTR(-ENOMEM);
957 struct rvt_dev_info *rdi = ib_to_rvt(ibpd->device);
962 return ERR_PTR(-EINVAL);
964 if (init_attr->cap.max_send_sge > rdi->dparms.props.max_send_sge ||
965 init_attr->cap.max_send_wr > rdi->dparms.props.max_qp_wr ||
966 init_attr->create_flags)
967 return ERR_PTR(-EINVAL);
969 /* Check receive queue parameters if no SRQ is specified. */
970 if (!init_attr->srq) {
971 if (init_attr->cap.max_recv_sge >
972 rdi->dparms.props.max_recv_sge ||
973 init_attr->cap.max_recv_wr > rdi->dparms.props.max_qp_wr)
974 return ERR_PTR(-EINVAL);
976 if (init_attr->cap.max_send_sge +
977 init_attr->cap.max_send_wr +
978 init_attr->cap.max_recv_sge +
979 init_attr->cap.max_recv_wr == 0)
980 return ERR_PTR(-EINVAL);
983 init_attr->cap.max_send_wr + 1 +
984 rdi->dparms.reserved_operations;
985 switch (init_attr->qp_type) {
988 if (init_attr->port_num == 0 ||
989 init_attr->port_num > ibpd->device->phys_port_cnt)
990 return ERR_PTR(-EINVAL);
995 sz = sizeof(struct rvt_sge) *
996 init_attr->cap.max_send_sge +
997 sizeof(struct rvt_swqe);
998 swq = vzalloc_node(array_size(sz, sqsize), rdi->dparms.node);
1000 return ERR_PTR(-ENOMEM);
1004 if (init_attr->srq) {
1005 struct rvt_srq *srq = ibsrq_to_rvtsrq(init_attr->srq);
1007 if (srq->rq.max_sge > 1)
1008 sg_list_sz = sizeof(*qp->r_sg_list) *
1009 (srq->rq.max_sge - 1);
1010 } else if (init_attr->cap.max_recv_sge > 1)
1011 sg_list_sz = sizeof(*qp->r_sg_list) *
1012 (init_attr->cap.max_recv_sge - 1);
1013 qp = kzalloc_node(sz + sg_list_sz, GFP_KERNEL,
1018 RCU_INIT_POINTER(qp->next, NULL);
1019 if (init_attr->qp_type == IB_QPT_RC) {
1021 kcalloc_node(rvt_max_atomic(rdi),
1022 sizeof(*qp->s_ack_queue),
1025 if (!qp->s_ack_queue)
1028 /* initialize timers needed for rc qp */
1029 timer_setup(&qp->s_timer, rvt_rc_timeout, 0);
1030 hrtimer_init(&qp->s_rnr_timer, CLOCK_MONOTONIC,
1032 qp->s_rnr_timer.function = rvt_rc_rnr_retry;
1035 * Driver needs to set up it's private QP structure and do any
1036 * initialization that is needed.
1038 priv = rdi->driver_f.qp_priv_alloc(rdi, qp);
1044 qp->timeout_jiffies =
1045 usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
1047 if (init_attr->srq) {
1050 qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
1051 qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
1052 sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
1053 sizeof(struct rvt_rwqe);
1055 qp->r_rq.wq = vmalloc_user(
1056 sizeof(struct rvt_rwq) +
1057 qp->r_rq.size * sz);
1059 qp->r_rq.wq = vzalloc_node(
1060 sizeof(struct rvt_rwq) +
1064 goto bail_driver_priv;
1068 * ib_create_qp() will initialize qp->ibqp
1069 * except for qp->ibqp.qp_num.
1071 spin_lock_init(&qp->r_lock);
1072 spin_lock_init(&qp->s_hlock);
1073 spin_lock_init(&qp->s_lock);
1074 spin_lock_init(&qp->r_rq.lock);
1075 atomic_set(&qp->refcount, 0);
1076 atomic_set(&qp->local_ops_pending, 0);
1077 init_waitqueue_head(&qp->wait);
1078 INIT_LIST_HEAD(&qp->rspwait);
1079 qp->state = IB_QPS_RESET;
1081 qp->s_size = sqsize;
1082 qp->s_avail = init_attr->cap.max_send_wr;
1083 qp->s_max_sge = init_attr->cap.max_send_sge;
1084 if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
1085 qp->s_flags = RVT_S_SIGNAL_REQ_WR;
1087 err = alloc_qpn(rdi, &rdi->qp_dev->qpn_table,
1089 init_attr->port_num);
1094 qp->ibqp.qp_num = err;
1095 qp->port_num = init_attr->port_num;
1096 rvt_init_qp(rdi, qp, init_attr->qp_type);
1100 /* Don't support raw QPs */
1101 return ERR_PTR(-EINVAL);
1104 init_attr->cap.max_inline_data = 0;
1107 * Return the address of the RWQ as the offset to mmap.
1108 * See rvt_mmap() for details.
1110 if (udata && udata->outlen >= sizeof(__u64)) {
1114 err = ib_copy_to_udata(udata, &offset,
1121 u32 s = sizeof(struct rvt_rwq) + qp->r_rq.size * sz;
1123 qp->ip = rvt_create_mmap_info(rdi, s,
1124 ibpd->uobject->context,
1127 ret = ERR_PTR(-ENOMEM);
1131 err = ib_copy_to_udata(udata, &qp->ip->offset,
1132 sizeof(qp->ip->offset));
1138 qp->pid = current->pid;
1141 spin_lock(&rdi->n_qps_lock);
1142 if (rdi->n_qps_allocated == rdi->dparms.props.max_qp) {
1143 spin_unlock(&rdi->n_qps_lock);
1144 ret = ERR_PTR(-ENOMEM);
1148 rdi->n_qps_allocated++;
1150 * Maintain a busy_jiffies variable that will be added to the timeout
1151 * period in mod_retry_timer and add_retry_timer. This busy jiffies
1152 * is scaled by the number of rc qps created for the device to reduce
1153 * the number of timeouts occurring when there is a large number of
1154 * qps. busy_jiffies is incremented every rc qp scaling interval.
1155 * The scaling interval is selected based on extensive performance
1156 * evaluation of targeted workloads.
1158 if (init_attr->qp_type == IB_QPT_RC) {
1160 rdi->busy_jiffies = rdi->n_rc_qps / RC_QP_SCALING_INTERVAL;
1162 spin_unlock(&rdi->n_qps_lock);
1165 spin_lock_irq(&rdi->pending_lock);
1166 list_add(&qp->ip->pending_mmaps, &rdi->pending_mmaps);
1167 spin_unlock_irq(&rdi->pending_lock);
1173 * We have our QP and its good, now keep track of what types of opcodes
1174 * can be processed on this QP. We do this by keeping track of what the
1175 * 3 high order bits of the opcode are.
1177 switch (init_attr->qp_type) {
1181 qp->allowed_ops = IB_OPCODE_UD;
1184 qp->allowed_ops = IB_OPCODE_RC;
1187 qp->allowed_ops = IB_OPCODE_UC;
1190 ret = ERR_PTR(-EINVAL);
1198 kref_put(&qp->ip->ref, rvt_release_mmap_info);
1201 rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
1208 rdi->driver_f.qp_priv_free(rdi, qp);
1211 kfree(qp->s_ack_queue);
1221 * rvt_error_qp - put a QP into the error state
1222 * @qp: the QP to put into the error state
1223 * @err: the receive completion error to signal if a RWQE is active
1225 * Flushes both send and receive work queues.
1227 * Return: true if last WQE event should be generated.
1228 * The QP r_lock and s_lock should be held and interrupts disabled.
1229 * If we are already in error state, just return.
1231 int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err)
1235 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
1237 lockdep_assert_held(&qp->r_lock);
1238 lockdep_assert_held(&qp->s_lock);
1239 if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET)
1242 qp->state = IB_QPS_ERR;
1244 if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
1245 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
1246 del_timer(&qp->s_timer);
1249 if (qp->s_flags & RVT_S_ANY_WAIT_SEND)
1250 qp->s_flags &= ~RVT_S_ANY_WAIT_SEND;
1252 rdi->driver_f.notify_error_qp(qp);
1254 /* Schedule the sending tasklet to drain the send work queue. */
1255 if (READ_ONCE(qp->s_last) != qp->s_head)
1256 rdi->driver_f.schedule_send(qp);
1258 rvt_clear_mr_refs(qp, 0);
1260 memset(&wc, 0, sizeof(wc));
1262 wc.opcode = IB_WC_RECV;
1264 if (test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) {
1265 wc.wr_id = qp->r_wr_id;
1267 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
1269 wc.status = IB_WC_WR_FLUSH_ERR;
1276 spin_lock(&qp->r_rq.lock);
1278 /* sanity check pointers before trusting them */
1281 if (head >= qp->r_rq.size)
1284 if (tail >= qp->r_rq.size)
1286 while (tail != head) {
1287 wc.wr_id = rvt_get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
1288 if (++tail >= qp->r_rq.size)
1290 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
1294 spin_unlock(&qp->r_rq.lock);
1295 } else if (qp->ibqp.event_handler) {
1302 EXPORT_SYMBOL(rvt_error_qp);
1305 * Put the QP into the hash table.
1306 * The hash table holds a reference to the QP.
1308 static void rvt_insert_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp)
1310 struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
1311 unsigned long flags;
1314 spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags);
1316 if (qp->ibqp.qp_num <= 1) {
1317 rcu_assign_pointer(rvp->qp[qp->ibqp.qp_num], qp);
1319 u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits);
1321 qp->next = rdi->qp_dev->qp_table[n];
1322 rcu_assign_pointer(rdi->qp_dev->qp_table[n], qp);
1323 trace_rvt_qpinsert(qp, n);
1326 spin_unlock_irqrestore(&rdi->qp_dev->qpt_lock, flags);
1330 * rvt_modify_qp - modify the attributes of a queue pair
1331 * @ibqp: the queue pair who's attributes we're modifying
1332 * @attr: the new attributes
1333 * @attr_mask: the mask of attributes to modify
1334 * @udata: user data for libibverbs.so
1336 * Return: 0 on success, otherwise returns an errno.
1338 int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1339 int attr_mask, struct ib_udata *udata)
1341 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
1342 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1343 enum ib_qp_state cur_state, new_state;
1347 int pmtu = 0; /* for gcc warning only */
1350 spin_lock_irq(&qp->r_lock);
1351 spin_lock(&qp->s_hlock);
1352 spin_lock(&qp->s_lock);
1354 cur_state = attr_mask & IB_QP_CUR_STATE ?
1355 attr->cur_qp_state : qp->state;
1356 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
1357 opa_ah = rdma_cap_opa_ah(ibqp->device, qp->port_num);
1359 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
1363 if (rdi->driver_f.check_modify_qp &&
1364 rdi->driver_f.check_modify_qp(qp, attr, attr_mask, udata))
1367 if (attr_mask & IB_QP_AV) {
1369 if (rdma_ah_get_dlid(&attr->ah_attr) >=
1370 opa_get_mcast_base(OPA_MCAST_NR))
1373 if (rdma_ah_get_dlid(&attr->ah_attr) >=
1374 be16_to_cpu(IB_MULTICAST_LID_BASE))
1378 if (rvt_check_ah(qp->ibqp.device, &attr->ah_attr))
1382 if (attr_mask & IB_QP_ALT_PATH) {
1384 if (rdma_ah_get_dlid(&attr->alt_ah_attr) >=
1385 opa_get_mcast_base(OPA_MCAST_NR))
1388 if (rdma_ah_get_dlid(&attr->alt_ah_attr) >=
1389 be16_to_cpu(IB_MULTICAST_LID_BASE))
1393 if (rvt_check_ah(qp->ibqp.device, &attr->alt_ah_attr))
1395 if (attr->alt_pkey_index >= rvt_get_npkeys(rdi))
1399 if (attr_mask & IB_QP_PKEY_INDEX)
1400 if (attr->pkey_index >= rvt_get_npkeys(rdi))
1403 if (attr_mask & IB_QP_MIN_RNR_TIMER)
1404 if (attr->min_rnr_timer > 31)
1407 if (attr_mask & IB_QP_PORT)
1408 if (qp->ibqp.qp_type == IB_QPT_SMI ||
1409 qp->ibqp.qp_type == IB_QPT_GSI ||
1410 attr->port_num == 0 ||
1411 attr->port_num > ibqp->device->phys_port_cnt)
1414 if (attr_mask & IB_QP_DEST_QPN)
1415 if (attr->dest_qp_num > RVT_QPN_MASK)
1418 if (attr_mask & IB_QP_RETRY_CNT)
1419 if (attr->retry_cnt > 7)
1422 if (attr_mask & IB_QP_RNR_RETRY)
1423 if (attr->rnr_retry > 7)
1427 * Don't allow invalid path_mtu values. OK to set greater
1428 * than the active mtu (or even the max_cap, if we have tuned
1429 * that to a small mtu. We'll set qp->path_mtu
1430 * to the lesser of requested attribute mtu and active,
1431 * for packetizing messages.
1432 * Note that the QP port has to be set in INIT and MTU in RTR.
1434 if (attr_mask & IB_QP_PATH_MTU) {
1435 pmtu = rdi->driver_f.get_pmtu_from_attr(rdi, qp, attr);
1440 if (attr_mask & IB_QP_PATH_MIG_STATE) {
1441 if (attr->path_mig_state == IB_MIG_REARM) {
1442 if (qp->s_mig_state == IB_MIG_ARMED)
1444 if (new_state != IB_QPS_RTS)
1446 } else if (attr->path_mig_state == IB_MIG_MIGRATED) {
1447 if (qp->s_mig_state == IB_MIG_REARM)
1449 if (new_state != IB_QPS_RTS && new_state != IB_QPS_SQD)
1451 if (qp->s_mig_state == IB_MIG_ARMED)
1458 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1459 if (attr->max_dest_rd_atomic > rdi->dparms.max_rdma_atomic)
1462 switch (new_state) {
1464 if (qp->state != IB_QPS_RESET)
1465 rvt_reset_qp(rdi, qp, ibqp->qp_type);
1469 /* Allow event to re-trigger if QP set to RTR more than once */
1470 qp->r_flags &= ~RVT_R_COMM_EST;
1471 qp->state = new_state;
1475 qp->s_draining = qp->s_last != qp->s_cur;
1476 qp->state = new_state;
1480 if (qp->ibqp.qp_type == IB_QPT_RC)
1482 qp->state = new_state;
1486 lastwqe = rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
1490 qp->state = new_state;
1494 if (attr_mask & IB_QP_PKEY_INDEX)
1495 qp->s_pkey_index = attr->pkey_index;
1497 if (attr_mask & IB_QP_PORT)
1498 qp->port_num = attr->port_num;
1500 if (attr_mask & IB_QP_DEST_QPN)
1501 qp->remote_qpn = attr->dest_qp_num;
1503 if (attr_mask & IB_QP_SQ_PSN) {
1504 qp->s_next_psn = attr->sq_psn & rdi->dparms.psn_modify_mask;
1505 qp->s_psn = qp->s_next_psn;
1506 qp->s_sending_psn = qp->s_next_psn;
1507 qp->s_last_psn = qp->s_next_psn - 1;
1508 qp->s_sending_hpsn = qp->s_last_psn;
1511 if (attr_mask & IB_QP_RQ_PSN)
1512 qp->r_psn = attr->rq_psn & rdi->dparms.psn_modify_mask;
1514 if (attr_mask & IB_QP_ACCESS_FLAGS)
1515 qp->qp_access_flags = attr->qp_access_flags;
1517 if (attr_mask & IB_QP_AV) {
1518 rdma_replace_ah_attr(&qp->remote_ah_attr, &attr->ah_attr);
1519 qp->s_srate = rdma_ah_get_static_rate(&attr->ah_attr);
1520 qp->srate_mbps = ib_rate_to_mbps(qp->s_srate);
1523 if (attr_mask & IB_QP_ALT_PATH) {
1524 rdma_replace_ah_attr(&qp->alt_ah_attr, &attr->alt_ah_attr);
1525 qp->s_alt_pkey_index = attr->alt_pkey_index;
1528 if (attr_mask & IB_QP_PATH_MIG_STATE) {
1529 qp->s_mig_state = attr->path_mig_state;
1531 qp->remote_ah_attr = qp->alt_ah_attr;
1532 qp->port_num = rdma_ah_get_port_num(&qp->alt_ah_attr);
1533 qp->s_pkey_index = qp->s_alt_pkey_index;
1537 if (attr_mask & IB_QP_PATH_MTU) {
1538 qp->pmtu = rdi->driver_f.mtu_from_qp(rdi, qp, pmtu);
1539 qp->log_pmtu = ilog2(qp->pmtu);
1542 if (attr_mask & IB_QP_RETRY_CNT) {
1543 qp->s_retry_cnt = attr->retry_cnt;
1544 qp->s_retry = attr->retry_cnt;
1547 if (attr_mask & IB_QP_RNR_RETRY) {
1548 qp->s_rnr_retry_cnt = attr->rnr_retry;
1549 qp->s_rnr_retry = attr->rnr_retry;
1552 if (attr_mask & IB_QP_MIN_RNR_TIMER)
1553 qp->r_min_rnr_timer = attr->min_rnr_timer;
1555 if (attr_mask & IB_QP_TIMEOUT) {
1556 qp->timeout = attr->timeout;
1557 qp->timeout_jiffies = rvt_timeout_to_jiffies(qp->timeout);
1560 if (attr_mask & IB_QP_QKEY)
1561 qp->qkey = attr->qkey;
1563 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1564 qp->r_max_rd_atomic = attr->max_dest_rd_atomic;
1566 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
1567 qp->s_max_rd_atomic = attr->max_rd_atomic;
1569 if (rdi->driver_f.modify_qp)
1570 rdi->driver_f.modify_qp(qp, attr, attr_mask, udata);
1572 spin_unlock(&qp->s_lock);
1573 spin_unlock(&qp->s_hlock);
1574 spin_unlock_irq(&qp->r_lock);
1576 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
1577 rvt_insert_qp(rdi, qp);
1580 ev.device = qp->ibqp.device;
1581 ev.element.qp = &qp->ibqp;
1582 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
1583 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
1586 ev.device = qp->ibqp.device;
1587 ev.element.qp = &qp->ibqp;
1588 ev.event = IB_EVENT_PATH_MIG;
1589 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
1594 spin_unlock(&qp->s_lock);
1595 spin_unlock(&qp->s_hlock);
1596 spin_unlock_irq(&qp->r_lock);
1601 * rvt_destroy_qp - destroy a queue pair
1602 * @ibqp: the queue pair to destroy
1604 * Note that this can be called while the QP is actively sending or
1607 * Return: 0 on success.
1609 int rvt_destroy_qp(struct ib_qp *ibqp)
1611 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1612 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
1614 spin_lock_irq(&qp->r_lock);
1615 spin_lock(&qp->s_hlock);
1616 spin_lock(&qp->s_lock);
1617 rvt_reset_qp(rdi, qp, ibqp->qp_type);
1618 spin_unlock(&qp->s_lock);
1619 spin_unlock(&qp->s_hlock);
1620 spin_unlock_irq(&qp->r_lock);
1622 wait_event(qp->wait, !atomic_read(&qp->refcount));
1623 /* qpn is now available for use again */
1624 rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
1626 spin_lock(&rdi->n_qps_lock);
1627 rdi->n_qps_allocated--;
1628 if (qp->ibqp.qp_type == IB_QPT_RC) {
1630 rdi->busy_jiffies = rdi->n_rc_qps / RC_QP_SCALING_INTERVAL;
1632 spin_unlock(&rdi->n_qps_lock);
1635 kref_put(&qp->ip->ref, rvt_release_mmap_info);
1639 rdi->driver_f.qp_priv_free(rdi, qp);
1640 kfree(qp->s_ack_queue);
1641 rdma_destroy_ah_attr(&qp->remote_ah_attr);
1642 rdma_destroy_ah_attr(&qp->alt_ah_attr);
1648 * rvt_query_qp - query an ipbq
1649 * @ibqp: IB qp to query
1650 * @attr: attr struct to fill in
1651 * @attr_mask: attr mask ignored
1652 * @init_attr: struct to fill in
1656 int rvt_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1657 int attr_mask, struct ib_qp_init_attr *init_attr)
1659 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1660 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
1662 attr->qp_state = qp->state;
1663 attr->cur_qp_state = attr->qp_state;
1664 attr->path_mtu = rdi->driver_f.mtu_to_path_mtu(qp->pmtu);
1665 attr->path_mig_state = qp->s_mig_state;
1666 attr->qkey = qp->qkey;
1667 attr->rq_psn = qp->r_psn & rdi->dparms.psn_mask;
1668 attr->sq_psn = qp->s_next_psn & rdi->dparms.psn_mask;
1669 attr->dest_qp_num = qp->remote_qpn;
1670 attr->qp_access_flags = qp->qp_access_flags;
1671 attr->cap.max_send_wr = qp->s_size - 1 -
1672 rdi->dparms.reserved_operations;
1673 attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1;
1674 attr->cap.max_send_sge = qp->s_max_sge;
1675 attr->cap.max_recv_sge = qp->r_rq.max_sge;
1676 attr->cap.max_inline_data = 0;
1677 attr->ah_attr = qp->remote_ah_attr;
1678 attr->alt_ah_attr = qp->alt_ah_attr;
1679 attr->pkey_index = qp->s_pkey_index;
1680 attr->alt_pkey_index = qp->s_alt_pkey_index;
1681 attr->en_sqd_async_notify = 0;
1682 attr->sq_draining = qp->s_draining;
1683 attr->max_rd_atomic = qp->s_max_rd_atomic;
1684 attr->max_dest_rd_atomic = qp->r_max_rd_atomic;
1685 attr->min_rnr_timer = qp->r_min_rnr_timer;
1686 attr->port_num = qp->port_num;
1687 attr->timeout = qp->timeout;
1688 attr->retry_cnt = qp->s_retry_cnt;
1689 attr->rnr_retry = qp->s_rnr_retry_cnt;
1690 attr->alt_port_num =
1691 rdma_ah_get_port_num(&qp->alt_ah_attr);
1692 attr->alt_timeout = qp->alt_timeout;
1694 init_attr->event_handler = qp->ibqp.event_handler;
1695 init_attr->qp_context = qp->ibqp.qp_context;
1696 init_attr->send_cq = qp->ibqp.send_cq;
1697 init_attr->recv_cq = qp->ibqp.recv_cq;
1698 init_attr->srq = qp->ibqp.srq;
1699 init_attr->cap = attr->cap;
1700 if (qp->s_flags & RVT_S_SIGNAL_REQ_WR)
1701 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
1703 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
1704 init_attr->qp_type = qp->ibqp.qp_type;
1705 init_attr->port_num = qp->port_num;
1710 * rvt_post_receive - post a receive on a QP
1711 * @ibqp: the QP to post the receive on
1712 * @wr: the WR to post
1713 * @bad_wr: the first bad WR is put here
1715 * This may be called from interrupt context.
1717 * Return: 0 on success otherwise errno
1719 int rvt_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
1720 const struct ib_recv_wr **bad_wr)
1722 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1723 struct rvt_rwq *wq = qp->r_rq.wq;
1724 unsigned long flags;
1725 int qp_err_flush = (ib_rvt_state_ops[qp->state] & RVT_FLUSH_RECV) &&
1728 /* Check that state is OK to post receive. */
1729 if (!(ib_rvt_state_ops[qp->state] & RVT_POST_RECV_OK) || !wq) {
1734 for (; wr; wr = wr->next) {
1735 struct rvt_rwqe *wqe;
1739 if ((unsigned)wr->num_sge > qp->r_rq.max_sge) {
1744 spin_lock_irqsave(&qp->r_rq.lock, flags);
1745 next = wq->head + 1;
1746 if (next >= qp->r_rq.size)
1748 if (next == wq->tail) {
1749 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
1753 if (unlikely(qp_err_flush)) {
1756 memset(&wc, 0, sizeof(wc));
1758 wc.opcode = IB_WC_RECV;
1759 wc.wr_id = wr->wr_id;
1760 wc.status = IB_WC_WR_FLUSH_ERR;
1761 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
1763 wqe = rvt_get_rwqe_ptr(&qp->r_rq, wq->head);
1764 wqe->wr_id = wr->wr_id;
1765 wqe->num_sge = wr->num_sge;
1766 for (i = 0; i < wr->num_sge; i++)
1767 wqe->sg_list[i] = wr->sg_list[i];
1769 * Make sure queue entry is written
1770 * before the head index.
1775 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
1781 * rvt_qp_valid_operation - validate post send wr request
1783 * @post-parms - the post send table for the driver
1784 * @wr - the work request
1786 * The routine validates the operation based on the
1787 * validation table an returns the length of the operation
1788 * which can extend beyond the ib_send_bw. Operation
1789 * dependent flags key atomic operation validation.
1791 * There is an exception for UD qps that validates the pd and
1792 * overrides the length to include the additional UD specific
1795 * Returns a negative error or the length of the work request
1796 * for building the swqe.
1798 static inline int rvt_qp_valid_operation(
1800 const struct rvt_operation_params *post_parms,
1801 const struct ib_send_wr *wr)
1805 if (wr->opcode >= RVT_OPERATION_MAX || !post_parms[wr->opcode].length)
1807 if (!(post_parms[wr->opcode].qpt_support & BIT(qp->ibqp.qp_type)))
1809 if ((post_parms[wr->opcode].flags & RVT_OPERATION_PRIV) &&
1810 ibpd_to_rvtpd(qp->ibqp.pd)->user)
1812 if (post_parms[wr->opcode].flags & RVT_OPERATION_ATOMIC_SGE &&
1813 (wr->num_sge == 0 ||
1814 wr->sg_list[0].length < sizeof(u64) ||
1815 wr->sg_list[0].addr & (sizeof(u64) - 1)))
1817 if (post_parms[wr->opcode].flags & RVT_OPERATION_ATOMIC &&
1818 !qp->s_max_rd_atomic)
1820 len = post_parms[wr->opcode].length;
1822 if (qp->ibqp.qp_type != IB_QPT_UC &&
1823 qp->ibqp.qp_type != IB_QPT_RC) {
1824 if (qp->ibqp.pd != ud_wr(wr)->ah->pd)
1826 len = sizeof(struct ib_ud_wr);
1832 * rvt_qp_is_avail - determine queue capacity
1834 * @rdi: the rdmavt device
1835 * @reserved_op: is reserved operation
1837 * This assumes the s_hlock is held but the s_last
1838 * qp variable is uncontrolled.
1840 * For non reserved operations, the qp->s_avail
1843 * The return value is zero or a -ENOMEM.
1845 static inline int rvt_qp_is_avail(
1847 struct rvt_dev_info *rdi,
1854 /* see rvt_qp_wqe_unreserve() */
1855 smp_mb__before_atomic();
1856 reserved_used = atomic_read(&qp->s_reserved_used);
1857 if (unlikely(reserved_op)) {
1858 /* see rvt_qp_wqe_unreserve() */
1859 smp_mb__before_atomic();
1860 if (reserved_used >= rdi->dparms.reserved_operations)
1864 /* non-reserved operations */
1865 if (likely(qp->s_avail))
1867 slast = READ_ONCE(qp->s_last);
1868 if (qp->s_head >= slast)
1869 avail = qp->s_size - (qp->s_head - slast);
1871 avail = slast - qp->s_head;
1873 /* see rvt_qp_wqe_unreserve() */
1874 smp_mb__before_atomic();
1875 reserved_used = atomic_read(&qp->s_reserved_used);
1877 (rdi->dparms.reserved_operations - reserved_used);
1878 /* insure we don't assign a negative s_avail */
1879 if ((s32)avail <= 0)
1881 qp->s_avail = avail;
1882 if (WARN_ON(qp->s_avail >
1883 (qp->s_size - 1 - rdi->dparms.reserved_operations)))
1885 "More avail entries than QP RB size.\nQP: %u, size: %u, avail: %u\nhead: %u, tail: %u, cur: %u, acked: %u, last: %u",
1886 qp->ibqp.qp_num, qp->s_size, qp->s_avail,
1887 qp->s_head, qp->s_tail, qp->s_cur,
1888 qp->s_acked, qp->s_last);
1893 * rvt_post_one_wr - post one RC, UC, or UD send work request
1894 * @qp: the QP to post on
1895 * @wr: the work request to send
1897 static int rvt_post_one_wr(struct rvt_qp *qp,
1898 const struct ib_send_wr *wr,
1901 struct rvt_swqe *wqe;
1906 struct rvt_lkey_table *rkt;
1908 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
1913 int local_ops_delayed = 0;
1915 BUILD_BUG_ON(IB_QPT_MAX >= (sizeof(u32) * BITS_PER_BYTE));
1917 /* IB spec says that num_sge == 0 is OK. */
1918 if (unlikely(wr->num_sge > qp->s_max_sge))
1921 ret = rvt_qp_valid_operation(qp, rdi->post_parms, wr);
1927 * Local operations include fast register and local invalidate.
1928 * Fast register needs to be processed immediately because the
1929 * registered lkey may be used by following work requests and the
1930 * lkey needs to be valid at the time those requests are posted.
1931 * Local invalidate can be processed immediately if fencing is
1932 * not required and no previous local invalidate ops are pending.
1933 * Signaled local operations that have been processed immediately
1934 * need to have requests with "completion only" flags set posted
1935 * to the send queue in order to generate completions.
1937 if ((rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL)) {
1938 switch (wr->opcode) {
1940 ret = rvt_fast_reg_mr(qp,
1943 reg_wr(wr)->access);
1944 if (ret || !(wr->send_flags & IB_SEND_SIGNALED))
1947 case IB_WR_LOCAL_INV:
1948 if ((wr->send_flags & IB_SEND_FENCE) ||
1949 atomic_read(&qp->local_ops_pending)) {
1950 local_ops_delayed = 1;
1952 ret = rvt_invalidate_rkey(
1953 qp, wr->ex.invalidate_rkey);
1954 if (ret || !(wr->send_flags & IB_SEND_SIGNALED))
1963 reserved_op = rdi->post_parms[wr->opcode].flags &
1964 RVT_OPERATION_USE_RESERVE;
1965 /* check for avail */
1966 ret = rvt_qp_is_avail(qp, rdi, reserved_op);
1969 next = qp->s_head + 1;
1970 if (next >= qp->s_size)
1973 rkt = &rdi->lkey_table;
1974 pd = ibpd_to_rvtpd(qp->ibqp.pd);
1975 wqe = rvt_get_swqe_ptr(qp, qp->s_head);
1977 /* cplen has length from above */
1978 memcpy(&wqe->wr, wr, cplen);
1983 struct rvt_sge *last_sge = NULL;
1985 acc = wr->opcode >= IB_WR_RDMA_READ ?
1986 IB_ACCESS_LOCAL_WRITE : 0;
1987 for (i = 0; i < wr->num_sge; i++) {
1988 u32 length = wr->sg_list[i].length;
1992 ret = rvt_lkey_ok(rkt, pd, &wqe->sg_list[j], last_sge,
1993 &wr->sg_list[i], acc);
1994 if (unlikely(ret < 0))
1995 goto bail_inval_free;
1996 wqe->length += length;
1998 last_sge = &wqe->sg_list[j];
2001 wqe->wr.num_sge = j;
2005 * Calculate and set SWQE PSN values prior to handing it off
2006 * to the driver's check routine. This give the driver the
2007 * opportunity to adjust PSN values based on internal checks.
2009 log_pmtu = qp->log_pmtu;
2010 if (qp->ibqp.qp_type != IB_QPT_UC &&
2011 qp->ibqp.qp_type != IB_QPT_RC) {
2012 struct rvt_ah *ah = ibah_to_rvtah(wqe->ud_wr.ah);
2014 log_pmtu = ah->log_pmtu;
2015 atomic_inc(&ibah_to_rvtah(ud_wr(wr)->ah)->refcount);
2018 if (rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL) {
2019 if (local_ops_delayed)
2020 atomic_inc(&qp->local_ops_pending);
2022 wqe->wr.send_flags |= RVT_SEND_COMPLETION_ONLY;
2027 wqe->ssn = qp->s_ssn++;
2028 wqe->psn = qp->s_next_psn;
2029 wqe->lpsn = wqe->psn +
2031 ((wqe->length - 1) >> log_pmtu) :
2035 /* general part of wqe valid - allow for driver checks */
2036 if (rdi->driver_f.setup_wqe) {
2037 ret = rdi->driver_f.setup_wqe(qp, wqe, call_send);
2039 goto bail_inval_free_ref;
2042 if (!(rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL))
2043 qp->s_next_psn = wqe->lpsn + 1;
2045 if (unlikely(reserved_op)) {
2046 wqe->wr.send_flags |= RVT_SEND_RESERVE_USED;
2047 rvt_qp_wqe_reserve(qp, wqe);
2049 wqe->wr.send_flags &= ~RVT_SEND_RESERVE_USED;
2052 trace_rvt_post_one_wr(qp, wqe, wr->num_sge);
2053 smp_wmb(); /* see request builders */
2058 bail_inval_free_ref:
2059 if (qp->ibqp.qp_type != IB_QPT_UC &&
2060 qp->ibqp.qp_type != IB_QPT_RC)
2061 atomic_dec(&ibah_to_rvtah(ud_wr(wr)->ah)->refcount);
2063 /* release mr holds */
2065 struct rvt_sge *sge = &wqe->sg_list[--j];
2067 rvt_put_mr(sge->mr);
2073 * rvt_post_send - post a send on a QP
2074 * @ibqp: the QP to post the send on
2075 * @wr: the list of work requests to post
2076 * @bad_wr: the first bad WR is put here
2078 * This may be called from interrupt context.
2080 * Return: 0 on success else errno
2082 int rvt_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
2083 const struct ib_send_wr **bad_wr)
2085 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
2086 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
2087 unsigned long flags = 0;
2092 spin_lock_irqsave(&qp->s_hlock, flags);
2095 * Ensure QP state is such that we can send. If not bail out early,
2096 * there is no need to do this every time we post a send.
2098 if (unlikely(!(ib_rvt_state_ops[qp->state] & RVT_POST_SEND_OK))) {
2099 spin_unlock_irqrestore(&qp->s_hlock, flags);
2104 * If the send queue is empty, and we only have a single WR then just go
2105 * ahead and kick the send engine into gear. Otherwise we will always
2106 * just schedule the send to happen later.
2108 call_send = qp->s_head == READ_ONCE(qp->s_last) && !wr->next;
2110 for (; wr; wr = wr->next) {
2111 err = rvt_post_one_wr(qp, wr, &call_send);
2112 if (unlikely(err)) {
2119 spin_unlock_irqrestore(&qp->s_hlock, flags);
2122 * Only call do_send if there is exactly one packet, and the
2123 * driver said it was ok.
2125 if (nreq == 1 && call_send)
2126 rdi->driver_f.do_send(qp);
2128 rdi->driver_f.schedule_send_no_lock(qp);
2134 * rvt_post_srq_receive - post a receive on a shared receive queue
2135 * @ibsrq: the SRQ to post the receive on
2136 * @wr: the list of work requests to post
2137 * @bad_wr: A pointer to the first WR to cause a problem is put here
2139 * This may be called from interrupt context.
2141 * Return: 0 on success else errno
2143 int rvt_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
2144 const struct ib_recv_wr **bad_wr)
2146 struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
2148 unsigned long flags;
2150 for (; wr; wr = wr->next) {
2151 struct rvt_rwqe *wqe;
2155 if ((unsigned)wr->num_sge > srq->rq.max_sge) {
2160 spin_lock_irqsave(&srq->rq.lock, flags);
2162 next = wq->head + 1;
2163 if (next >= srq->rq.size)
2165 if (next == wq->tail) {
2166 spin_unlock_irqrestore(&srq->rq.lock, flags);
2171 wqe = rvt_get_rwqe_ptr(&srq->rq, wq->head);
2172 wqe->wr_id = wr->wr_id;
2173 wqe->num_sge = wr->num_sge;
2174 for (i = 0; i < wr->num_sge; i++)
2175 wqe->sg_list[i] = wr->sg_list[i];
2176 /* Make sure queue entry is written before the head index. */
2179 spin_unlock_irqrestore(&srq->rq.lock, flags);
2185 * Validate a RWQE and fill in the SGE state.
2188 static int init_sge(struct rvt_qp *qp, struct rvt_rwqe *wqe)
2192 struct rvt_lkey_table *rkt;
2194 struct rvt_sge_state *ss;
2195 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2197 rkt = &rdi->lkey_table;
2198 pd = ibpd_to_rvtpd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd);
2200 ss->sg_list = qp->r_sg_list;
2202 for (i = j = 0; i < wqe->num_sge; i++) {
2203 if (wqe->sg_list[i].length == 0)
2206 ret = rvt_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge,
2207 NULL, &wqe->sg_list[i],
2208 IB_ACCESS_LOCAL_WRITE);
2209 if (unlikely(ret <= 0))
2211 qp->r_len += wqe->sg_list[i].length;
2215 ss->total_len = qp->r_len;
2220 struct rvt_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge;
2222 rvt_put_mr(sge->mr);
2225 memset(&wc, 0, sizeof(wc));
2226 wc.wr_id = wqe->wr_id;
2227 wc.status = IB_WC_LOC_PROT_ERR;
2228 wc.opcode = IB_WC_RECV;
2230 /* Signal solicited completion event. */
2231 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
2236 * rvt_get_rwqe - copy the next RWQE into the QP's RWQE
2238 * @wr_id_only: update qp->r_wr_id only, not qp->r_sge
2240 * Return -1 if there is a local error, 0 if no RWQE is available,
2241 * otherwise return 1.
2243 * Can be called from interrupt level.
2245 int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only)
2247 unsigned long flags;
2250 struct rvt_srq *srq;
2251 struct rvt_rwqe *wqe;
2252 void (*handler)(struct ib_event *, void *);
2257 srq = ibsrq_to_rvtsrq(qp->ibqp.srq);
2258 handler = srq->ibsrq.event_handler;
2266 spin_lock_irqsave(&rq->lock, flags);
2267 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
2274 /* Validate tail before using it since it is user writable. */
2275 if (tail >= rq->size)
2277 if (unlikely(tail == wq->head)) {
2281 /* Make sure entry is read after head index is read. */
2283 wqe = rvt_get_rwqe_ptr(rq, tail);
2285 * Even though we update the tail index in memory, the verbs
2286 * consumer is not supposed to post more entries until a
2287 * completion is generated.
2289 if (++tail >= rq->size)
2292 if (!wr_id_only && !init_sge(qp, wqe)) {
2296 qp->r_wr_id = wqe->wr_id;
2299 set_bit(RVT_R_WRID_VALID, &qp->r_aflags);
2304 * Validate head pointer value and compute
2305 * the number of remaining WQEs.
2311 n += rq->size - tail;
2314 if (n < srq->limit) {
2318 spin_unlock_irqrestore(&rq->lock, flags);
2319 ev.device = qp->ibqp.device;
2320 ev.element.srq = qp->ibqp.srq;
2321 ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
2322 handler(&ev, srq->ibsrq.srq_context);
2327 spin_unlock_irqrestore(&rq->lock, flags);
2331 EXPORT_SYMBOL(rvt_get_rwqe);
2334 * qp_comm_est - handle trap with QP established
2337 void rvt_comm_est(struct rvt_qp *qp)
2339 qp->r_flags |= RVT_R_COMM_EST;
2340 if (qp->ibqp.event_handler) {
2343 ev.device = qp->ibqp.device;
2344 ev.element.qp = &qp->ibqp;
2345 ev.event = IB_EVENT_COMM_EST;
2346 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
2349 EXPORT_SYMBOL(rvt_comm_est);
2351 void rvt_rc_error(struct rvt_qp *qp, enum ib_wc_status err)
2353 unsigned long flags;
2356 spin_lock_irqsave(&qp->s_lock, flags);
2357 lastwqe = rvt_error_qp(qp, err);
2358 spin_unlock_irqrestore(&qp->s_lock, flags);
2363 ev.device = qp->ibqp.device;
2364 ev.element.qp = &qp->ibqp;
2365 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
2366 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
2369 EXPORT_SYMBOL(rvt_rc_error);
2372 * rvt_rnr_tbl_to_usec - return index into ib_rvt_rnr_table
2373 * @index - the index
2374 * return usec from an index into ib_rvt_rnr_table
2376 unsigned long rvt_rnr_tbl_to_usec(u32 index)
2378 return ib_rvt_rnr_table[(index & IB_AETH_CREDIT_MASK)];
2380 EXPORT_SYMBOL(rvt_rnr_tbl_to_usec);
2382 static inline unsigned long rvt_aeth_to_usec(u32 aeth)
2384 return ib_rvt_rnr_table[(aeth >> IB_AETH_CREDIT_SHIFT) &
2385 IB_AETH_CREDIT_MASK];
2389 * rvt_add_retry_timer - add/start a retry timer
2391 * add a retry timer on the QP
2393 void rvt_add_retry_timer(struct rvt_qp *qp)
2395 struct ib_qp *ibqp = &qp->ibqp;
2396 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
2398 lockdep_assert_held(&qp->s_lock);
2399 qp->s_flags |= RVT_S_TIMER;
2400 /* 4.096 usec. * (1 << qp->timeout) */
2401 qp->s_timer.expires = jiffies + qp->timeout_jiffies +
2403 add_timer(&qp->s_timer);
2405 EXPORT_SYMBOL(rvt_add_retry_timer);
2408 * rvt_add_rnr_timer - add/start an rnr timer
2410 * @aeth - aeth of RNR timeout, simulated aeth for loopback
2411 * add an rnr timer on the QP
2413 void rvt_add_rnr_timer(struct rvt_qp *qp, u32 aeth)
2417 lockdep_assert_held(&qp->s_lock);
2418 qp->s_flags |= RVT_S_WAIT_RNR;
2419 to = rvt_aeth_to_usec(aeth);
2420 trace_rvt_rnrnak_add(qp, to);
2421 hrtimer_start(&qp->s_rnr_timer,
2422 ns_to_ktime(1000 * to), HRTIMER_MODE_REL_PINNED);
2424 EXPORT_SYMBOL(rvt_add_rnr_timer);
2427 * rvt_stop_rc_timers - stop all timers
2429 * stop any pending timers
2431 void rvt_stop_rc_timers(struct rvt_qp *qp)
2433 lockdep_assert_held(&qp->s_lock);
2434 /* Remove QP from all timers */
2435 if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
2436 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
2437 del_timer(&qp->s_timer);
2438 hrtimer_try_to_cancel(&qp->s_rnr_timer);
2441 EXPORT_SYMBOL(rvt_stop_rc_timers);
2444 * rvt_stop_rnr_timer - stop an rnr timer
2447 * stop an rnr timer and return if the timer
2450 static void rvt_stop_rnr_timer(struct rvt_qp *qp)
2452 lockdep_assert_held(&qp->s_lock);
2453 /* Remove QP from rnr timer */
2454 if (qp->s_flags & RVT_S_WAIT_RNR) {
2455 qp->s_flags &= ~RVT_S_WAIT_RNR;
2456 trace_rvt_rnrnak_stop(qp, 0);
2461 * rvt_del_timers_sync - wait for any timeout routines to exit
2464 void rvt_del_timers_sync(struct rvt_qp *qp)
2466 del_timer_sync(&qp->s_timer);
2467 hrtimer_cancel(&qp->s_rnr_timer);
2469 EXPORT_SYMBOL(rvt_del_timers_sync);
2472 * This is called from s_timer for missing responses.
2474 static void rvt_rc_timeout(struct timer_list *t)
2476 struct rvt_qp *qp = from_timer(qp, t, s_timer);
2477 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2478 unsigned long flags;
2480 spin_lock_irqsave(&qp->r_lock, flags);
2481 spin_lock(&qp->s_lock);
2482 if (qp->s_flags & RVT_S_TIMER) {
2483 struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
2485 qp->s_flags &= ~RVT_S_TIMER;
2486 rvp->n_rc_timeouts++;
2487 del_timer(&qp->s_timer);
2488 trace_rvt_rc_timeout(qp, qp->s_last_psn + 1);
2489 if (rdi->driver_f.notify_restart_rc)
2490 rdi->driver_f.notify_restart_rc(qp,
2493 rdi->driver_f.schedule_send(qp);
2495 spin_unlock(&qp->s_lock);
2496 spin_unlock_irqrestore(&qp->r_lock, flags);
2500 * This is called from s_timer for RNR timeouts.
2502 enum hrtimer_restart rvt_rc_rnr_retry(struct hrtimer *t)
2504 struct rvt_qp *qp = container_of(t, struct rvt_qp, s_rnr_timer);
2505 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2506 unsigned long flags;
2508 spin_lock_irqsave(&qp->s_lock, flags);
2509 rvt_stop_rnr_timer(qp);
2510 trace_rvt_rnrnak_timeout(qp, 0);
2511 rdi->driver_f.schedule_send(qp);
2512 spin_unlock_irqrestore(&qp->s_lock, flags);
2513 return HRTIMER_NORESTART;
2515 EXPORT_SYMBOL(rvt_rc_rnr_retry);
2518 * rvt_qp_iter_init - initial for QP iteration
2522 * This returns an iterator suitable for iterating QPs
2525 * The @cb is a user defined callback and @v is a 64
2526 * bit value passed to and relevant for processing in the
2527 * @cb. An example use case would be to alter QP processing
2528 * based on criteria not part of the rvt_qp.
2530 * Use cases that require memory allocation to succeed
2531 * must preallocate appropriately.
2533 * Return: a pointer to an rvt_qp_iter or NULL
2535 struct rvt_qp_iter *rvt_qp_iter_init(struct rvt_dev_info *rdi,
2537 void (*cb)(struct rvt_qp *qp, u64 v))
2539 struct rvt_qp_iter *i;
2541 i = kzalloc(sizeof(*i), GFP_KERNEL);
2546 /* number of special QPs (SMI/GSI) for device */
2547 i->specials = rdi->ibdev.phys_port_cnt * 2;
2553 EXPORT_SYMBOL(rvt_qp_iter_init);
2556 * rvt_qp_iter_next - return the next QP in iter
2557 * @iter - the iterator
2559 * Fine grained QP iterator suitable for use
2560 * with debugfs seq_file mechanisms.
2562 * Updates iter->qp with the current QP when the return
2565 * Return: 0 - iter->qp is valid 1 - no more QPs
2567 int rvt_qp_iter_next(struct rvt_qp_iter *iter)
2572 struct rvt_qp *pqp = iter->qp;
2574 struct rvt_dev_info *rdi = iter->rdi;
2577 * The approach is to consider the special qps
2578 * as additional table entries before the
2579 * real hash table. Since the qp code sets
2580 * the qp->next hash link to NULL, this works just fine.
2582 * iter->specials is 2 * # ports
2584 * n = 0..iter->specials is the special qp indices
2586 * n = iter->specials..rdi->qp_dev->qp_table_size+iter->specials are
2587 * the potential hash bucket entries
2590 for (; n < rdi->qp_dev->qp_table_size + iter->specials; n++) {
2592 qp = rcu_dereference(pqp->next);
2594 if (n < iter->specials) {
2595 struct rvt_ibport *rvp;
2598 pidx = n % rdi->ibdev.phys_port_cnt;
2599 rvp = rdi->ports[pidx];
2600 qp = rcu_dereference(rvp->qp[n & 1]);
2602 qp = rcu_dereference(
2603 rdi->qp_dev->qp_table[
2604 (n - iter->specials)]);
2616 EXPORT_SYMBOL(rvt_qp_iter_next);
2619 * rvt_qp_iter - iterate all QPs
2620 * @rdi - rvt devinfo
2621 * @v - a 64 bit value
2624 * This provides a way for iterating all QPs.
2626 * The @cb is a user defined callback and @v is a 64
2627 * bit value passed to and relevant for processing in the
2628 * cb. An example use case would be to alter QP processing
2629 * based on criteria not part of the rvt_qp.
2631 * The code has an internal iterator to simplify
2632 * non seq_file use cases.
2634 void rvt_qp_iter(struct rvt_dev_info *rdi,
2636 void (*cb)(struct rvt_qp *qp, u64 v))
2639 struct rvt_qp_iter i = {
2641 .specials = rdi->ibdev.phys_port_cnt * 2,
2648 ret = rvt_qp_iter_next(&i);
2659 EXPORT_SYMBOL(rvt_qp_iter);
2662 * This should be called with s_lock held.
2664 void rvt_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
2665 enum ib_wc_status status)
2668 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2670 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND))
2675 trace_rvt_qp_send_completion(qp, wqe, last);
2676 if (++last >= qp->s_size)
2678 trace_rvt_qp_send_completion(qp, wqe, last);
2680 /* See post_send() */
2683 if (qp->ibqp.qp_type == IB_QPT_UD ||
2684 qp->ibqp.qp_type == IB_QPT_SMI ||
2685 qp->ibqp.qp_type == IB_QPT_GSI)
2686 atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount);
2688 rvt_qp_swqe_complete(qp,
2690 rdi->wc_opcode[wqe->wr.opcode],
2693 if (qp->s_acked == old_last)
2695 if (qp->s_cur == old_last)
2697 if (qp->s_tail == old_last)
2699 if (qp->state == IB_QPS_SQD && last == qp->s_cur)
2702 EXPORT_SYMBOL(rvt_send_complete);
2705 * rvt_copy_sge - copy data to SGE memory
2706 * @qp: associated QP
2707 * @ss: the SGE state
2708 * @data: the data to copy
2709 * @length: the length of the data
2710 * @release: boolean to release MR
2711 * @copy_last: do a separate copy of the last 8 bytes
2713 void rvt_copy_sge(struct rvt_qp *qp, struct rvt_sge_state *ss,
2714 void *data, u32 length,
2715 bool release, bool copy_last)
2717 struct rvt_sge *sge = &ss->sge;
2719 bool in_last = false;
2720 bool cacheless_copy = false;
2721 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2722 struct rvt_wss *wss = rdi->wss;
2723 unsigned int sge_copy_mode = rdi->dparms.sge_copy_mode;
2725 if (sge_copy_mode == RVT_SGE_COPY_CACHELESS) {
2726 cacheless_copy = length >= PAGE_SIZE;
2727 } else if (sge_copy_mode == RVT_SGE_COPY_ADAPTIVE) {
2728 if (length >= PAGE_SIZE) {
2730 * NOTE: this *assumes*:
2731 * o The first vaddr is the dest.
2732 * o If multiple pages, then vaddr is sequential.
2734 wss_insert(wss, sge->vaddr);
2735 if (length >= (2 * PAGE_SIZE))
2736 wss_insert(wss, (sge->vaddr + PAGE_SIZE));
2738 cacheless_copy = wss_exceeds_threshold(wss);
2740 wss_advance_clean_counter(wss);
2755 u32 len = rvt_get_sge_length(sge, length);
2757 WARN_ON_ONCE(len == 0);
2758 if (unlikely(in_last)) {
2759 /* enforce byte transfer ordering */
2760 for (i = 0; i < len; i++)
2761 ((u8 *)sge->vaddr)[i] = ((u8 *)data)[i];
2762 } else if (cacheless_copy) {
2763 cacheless_memcpy(sge->vaddr, data, len);
2765 memcpy(sge->vaddr, data, len);
2767 rvt_update_sge(ss, len, release);
2779 EXPORT_SYMBOL(rvt_copy_sge);
2782 * ruc_loopback - handle UC and RC loopback requests
2783 * @sqp: the sending QP
2785 * This is called from rvt_do_send() to forward a WQE addressed to the same HFI
2786 * Note that although we are single threaded due to the send engine, we still
2787 * have to protect against post_send(). We don't have to worry about
2788 * receive interrupts since this is a connected protocol and all packets
2789 * will pass through here.
2791 void rvt_ruc_loopback(struct rvt_qp *sqp)
2793 struct rvt_ibport *rvp = NULL;
2794 struct rvt_dev_info *rdi = ib_to_rvt(sqp->ibqp.device);
2796 struct rvt_swqe *wqe;
2797 struct rvt_sge *sge;
2798 unsigned long flags;
2802 enum ib_wc_status send_status;
2805 bool copy_last = false;
2809 rvp = rdi->ports[sqp->port_num - 1];
2812 * Note that we check the responder QP state after
2813 * checking the requester's state.
2816 qp = rvt_lookup_qpn(ib_to_rvt(sqp->ibqp.device), rvp,
2819 spin_lock_irqsave(&sqp->s_lock, flags);
2821 /* Return if we are already busy processing a work request. */
2822 if ((sqp->s_flags & (RVT_S_BUSY | RVT_S_ANY_WAIT)) ||
2823 !(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_OR_FLUSH_SEND))
2826 sqp->s_flags |= RVT_S_BUSY;
2829 if (sqp->s_last == READ_ONCE(sqp->s_head))
2831 wqe = rvt_get_swqe_ptr(sqp, sqp->s_last);
2833 /* Return if it is not OK to start a new work request. */
2834 if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_NEXT_SEND_OK)) {
2835 if (!(ib_rvt_state_ops[sqp->state] & RVT_FLUSH_SEND))
2837 /* We are in the error state, flush the work request. */
2838 send_status = IB_WC_WR_FLUSH_ERR;
2843 * We can rely on the entry not changing without the s_lock
2844 * being held until we update s_last.
2845 * We increment s_cur to indicate s_last is in progress.
2847 if (sqp->s_last == sqp->s_cur) {
2848 if (++sqp->s_cur >= sqp->s_size)
2851 spin_unlock_irqrestore(&sqp->s_lock, flags);
2853 if (!qp || !(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) ||
2854 qp->ibqp.qp_type != sqp->ibqp.qp_type) {
2857 * For RC, the requester would timeout and retry so
2858 * shortcut the timeouts and just signal too many retries.
2860 if (sqp->ibqp.qp_type == IB_QPT_RC)
2861 send_status = IB_WC_RETRY_EXC_ERR;
2863 send_status = IB_WC_SUCCESS;
2867 memset(&wc, 0, sizeof(wc));
2868 send_status = IB_WC_SUCCESS;
2871 sqp->s_sge.sge = wqe->sg_list[0];
2872 sqp->s_sge.sg_list = wqe->sg_list + 1;
2873 sqp->s_sge.num_sge = wqe->wr.num_sge;
2874 sqp->s_len = wqe->length;
2875 switch (wqe->wr.opcode) {
2879 case IB_WR_LOCAL_INV:
2880 if (!(wqe->wr.send_flags & RVT_SEND_COMPLETION_ONLY)) {
2881 if (rvt_invalidate_rkey(sqp,
2882 wqe->wr.ex.invalidate_rkey))
2883 send_status = IB_WC_LOC_PROT_ERR;
2888 case IB_WR_SEND_WITH_INV:
2889 if (!rvt_invalidate_rkey(qp, wqe->wr.ex.invalidate_rkey)) {
2890 wc.wc_flags = IB_WC_WITH_INVALIDATE;
2891 wc.ex.invalidate_rkey = wqe->wr.ex.invalidate_rkey;
2895 case IB_WR_SEND_WITH_IMM:
2896 wc.wc_flags = IB_WC_WITH_IMM;
2897 wc.ex.imm_data = wqe->wr.ex.imm_data;
2901 ret = rvt_get_rwqe(qp, false);
2908 case IB_WR_RDMA_WRITE_WITH_IMM:
2909 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
2911 wc.wc_flags = IB_WC_WITH_IMM;
2912 wc.ex.imm_data = wqe->wr.ex.imm_data;
2913 ret = rvt_get_rwqe(qp, true);
2918 /* skip copy_last set and qp_access_flags recheck */
2920 case IB_WR_RDMA_WRITE:
2921 copy_last = rvt_is_user_qp(qp);
2922 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
2925 if (wqe->length == 0)
2927 if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, wqe->length,
2928 wqe->rdma_wr.remote_addr,
2930 IB_ACCESS_REMOTE_WRITE)))
2932 qp->r_sge.sg_list = NULL;
2933 qp->r_sge.num_sge = 1;
2934 qp->r_sge.total_len = wqe->length;
2937 case IB_WR_RDMA_READ:
2938 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
2940 if (unlikely(!rvt_rkey_ok(qp, &sqp->s_sge.sge, wqe->length,
2941 wqe->rdma_wr.remote_addr,
2943 IB_ACCESS_REMOTE_READ)))
2946 sqp->s_sge.sg_list = NULL;
2947 sqp->s_sge.num_sge = 1;
2948 qp->r_sge.sge = wqe->sg_list[0];
2949 qp->r_sge.sg_list = wqe->sg_list + 1;
2950 qp->r_sge.num_sge = wqe->wr.num_sge;
2951 qp->r_sge.total_len = wqe->length;
2954 case IB_WR_ATOMIC_CMP_AND_SWP:
2955 case IB_WR_ATOMIC_FETCH_AND_ADD:
2956 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
2958 if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
2959 wqe->atomic_wr.remote_addr,
2960 wqe->atomic_wr.rkey,
2961 IB_ACCESS_REMOTE_ATOMIC)))
2963 /* Perform atomic OP and save result. */
2964 maddr = (atomic64_t *)qp->r_sge.sge.vaddr;
2965 sdata = wqe->atomic_wr.compare_add;
2966 *(u64 *)sqp->s_sge.sge.vaddr =
2967 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
2968 (u64)atomic64_add_return(sdata, maddr) - sdata :
2969 (u64)cmpxchg((u64 *)qp->r_sge.sge.vaddr,
2970 sdata, wqe->atomic_wr.swap);
2971 rvt_put_mr(qp->r_sge.sge.mr);
2972 qp->r_sge.num_sge = 0;
2976 send_status = IB_WC_LOC_QP_OP_ERR;
2980 sge = &sqp->s_sge.sge;
2981 while (sqp->s_len) {
2982 u32 len = sqp->s_len;
2984 if (len > sge->length)
2986 if (len > sge->sge_length)
2987 len = sge->sge_length;
2988 WARN_ON_ONCE(len == 0);
2989 rvt_copy_sge(qp, &qp->r_sge, sge->vaddr,
2990 len, release, copy_last);
2993 sge->sge_length -= len;
2994 if (sge->sge_length == 0) {
2996 rvt_put_mr(sge->mr);
2997 if (--sqp->s_sge.num_sge)
2998 *sge = *sqp->s_sge.sg_list++;
2999 } else if (sge->length == 0 && sge->mr->lkey) {
3000 if (++sge->n >= RVT_SEGSZ) {
3001 if (++sge->m >= sge->mr->mapsz)
3006 sge->mr->map[sge->m]->segs[sge->n].vaddr;
3008 sge->mr->map[sge->m]->segs[sge->n].length;
3013 rvt_put_ss(&qp->r_sge);
3015 if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
3018 if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM)
3019 wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
3021 wc.opcode = IB_WC_RECV;
3022 wc.wr_id = qp->r_wr_id;
3023 wc.status = IB_WC_SUCCESS;
3024 wc.byte_len = wqe->length;
3026 wc.src_qp = qp->remote_qpn;
3027 wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr) & U16_MAX;
3028 wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr);
3030 /* Signal completion event if the solicited bit is set. */
3031 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
3032 wqe->wr.send_flags & IB_SEND_SOLICITED);
3035 spin_lock_irqsave(&sqp->s_lock, flags);
3038 sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
3039 rvt_send_complete(sqp, wqe, send_status);
3041 atomic_dec(&sqp->local_ops_pending);
3047 /* Handle RNR NAK */
3048 if (qp->ibqp.qp_type == IB_QPT_UC)
3052 * Note: we don't need the s_lock held since the BUSY flag
3053 * makes this single threaded.
3055 if (sqp->s_rnr_retry == 0) {
3056 send_status = IB_WC_RNR_RETRY_EXC_ERR;
3059 if (sqp->s_rnr_retry_cnt < 7)
3061 spin_lock_irqsave(&sqp->s_lock, flags);
3062 if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_RECV_OK))
3064 rvt_add_rnr_timer(sqp, qp->r_min_rnr_timer <<
3065 IB_AETH_CREDIT_SHIFT);
3069 send_status = IB_WC_REM_OP_ERR;
3070 wc.status = IB_WC_LOC_QP_OP_ERR;
3074 send_status = IB_WC_REM_INV_REQ_ERR;
3075 wc.status = IB_WC_LOC_QP_OP_ERR;
3079 send_status = IB_WC_REM_ACCESS_ERR;
3080 wc.status = IB_WC_LOC_PROT_ERR;
3082 /* responder goes to error state */
3083 rvt_rc_error(qp, wc.status);
3086 spin_lock_irqsave(&sqp->s_lock, flags);
3087 rvt_send_complete(sqp, wqe, send_status);
3088 if (sqp->ibqp.qp_type == IB_QPT_RC) {
3089 int lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
3091 sqp->s_flags &= ~RVT_S_BUSY;
3092 spin_unlock_irqrestore(&sqp->s_lock, flags);
3096 ev.device = sqp->ibqp.device;
3097 ev.element.qp = &sqp->ibqp;
3098 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
3099 sqp->ibqp.event_handler(&ev, sqp->ibqp.qp_context);
3104 sqp->s_flags &= ~RVT_S_BUSY;
3106 spin_unlock_irqrestore(&sqp->s_lock, flags);
3110 EXPORT_SYMBOL(rvt_ruc_loopback);