2 * Copyright(c) 2016 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <linux/hash.h>
49 #include <linux/bitops.h>
50 #include <linux/lockdep.h>
51 #include <linux/vmalloc.h>
52 #include <linux/slab.h>
53 #include <rdma/ib_verbs.h>
59 * Note that it is OK to post send work requests in the SQE and ERR
60 * states; rvt_do_send() will process them and generate error
61 * completions as per IB 1.2 C10-96.
63 const int ib_rvt_state_ops[IB_QPS_ERR + 1] = {
65 [IB_QPS_INIT] = RVT_POST_RECV_OK,
66 [IB_QPS_RTR] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK,
67 [IB_QPS_RTS] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
68 RVT_POST_SEND_OK | RVT_PROCESS_SEND_OK |
69 RVT_PROCESS_NEXT_SEND_OK,
70 [IB_QPS_SQD] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
71 RVT_POST_SEND_OK | RVT_PROCESS_SEND_OK,
72 [IB_QPS_SQE] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
73 RVT_POST_SEND_OK | RVT_FLUSH_SEND,
74 [IB_QPS_ERR] = RVT_POST_RECV_OK | RVT_FLUSH_RECV |
75 RVT_POST_SEND_OK | RVT_FLUSH_SEND,
77 EXPORT_SYMBOL(ib_rvt_state_ops);
79 static void get_map_page(struct rvt_qpn_table *qpt,
80 struct rvt_qpn_map *map,
83 unsigned long page = get_zeroed_page(gfp);
86 * Free the page if someone raced with us installing it.
89 spin_lock(&qpt->lock);
93 map->page = (void *)page;
94 spin_unlock(&qpt->lock);
98 * init_qpn_table - initialize the QP number table for a device
101 static int init_qpn_table(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt)
104 struct rvt_qpn_map *map;
107 if (!(rdi->dparms.qpn_res_end >= rdi->dparms.qpn_res_start))
110 spin_lock_init(&qpt->lock);
112 qpt->last = rdi->dparms.qpn_start;
113 qpt->incr = rdi->dparms.qpn_inc << rdi->dparms.qos_shift;
116 * Drivers may want some QPs beyond what we need for verbs let them use
117 * our qpn table. No need for two. Lets go ahead and mark the bitmaps
118 * for those. The reserved range must be *after* the range which verbs
122 /* Figure out number of bit maps needed before reserved range */
123 qpt->nmaps = rdi->dparms.qpn_res_start / RVT_BITS_PER_PAGE;
125 /* This should always be zero */
126 offset = rdi->dparms.qpn_res_start & RVT_BITS_PER_PAGE_MASK;
128 /* Starting with the first reserved bit map */
129 map = &qpt->map[qpt->nmaps];
131 rvt_pr_info(rdi, "Reserving QPNs from 0x%x to 0x%x for non-verbs use\n",
132 rdi->dparms.qpn_res_start, rdi->dparms.qpn_res_end);
133 for (i = rdi->dparms.qpn_res_start; i <= rdi->dparms.qpn_res_end; i++) {
135 get_map_page(qpt, map, GFP_KERNEL);
141 set_bit(offset, map->page);
143 if (offset == RVT_BITS_PER_PAGE) {
154 * free_qpn_table - free the QP number table for a device
155 * @qpt: the QPN table
157 static void free_qpn_table(struct rvt_qpn_table *qpt)
161 for (i = 0; i < ARRAY_SIZE(qpt->map); i++)
162 free_page((unsigned long)qpt->map[i].page);
165 int rvt_driver_qp_init(struct rvt_dev_info *rdi)
170 if (rdi->flags & RVT_FLAG_QP_INIT_DRIVER) {
171 rvt_pr_info(rdi, "Driver is doing QP init.\n");
175 if (!rdi->dparms.qp_table_size)
179 * If driver is not doing any QP allocation then make sure it is
180 * providing the necessary QP functions.
182 if (!rdi->driver_f.free_all_qps ||
183 !rdi->driver_f.qp_priv_alloc ||
184 !rdi->driver_f.qp_priv_free ||
185 !rdi->driver_f.notify_qp_reset)
188 /* allocate parent object */
189 rdi->qp_dev = kzalloc_node(sizeof(*rdi->qp_dev), GFP_KERNEL,
194 /* allocate hash table */
195 rdi->qp_dev->qp_table_size = rdi->dparms.qp_table_size;
196 rdi->qp_dev->qp_table_bits = ilog2(rdi->dparms.qp_table_size);
197 rdi->qp_dev->qp_table =
198 kmalloc_node(rdi->qp_dev->qp_table_size *
199 sizeof(*rdi->qp_dev->qp_table),
200 GFP_KERNEL, rdi->dparms.node);
201 if (!rdi->qp_dev->qp_table)
204 for (i = 0; i < rdi->qp_dev->qp_table_size; i++)
205 RCU_INIT_POINTER(rdi->qp_dev->qp_table[i], NULL);
207 spin_lock_init(&rdi->qp_dev->qpt_lock);
209 /* initialize qpn map */
210 if (init_qpn_table(rdi, &rdi->qp_dev->qpn_table))
213 spin_lock_init(&rdi->n_qps_lock);
218 kfree(rdi->qp_dev->qp_table);
219 free_qpn_table(&rdi->qp_dev->qpn_table);
228 * free_all_qps - check for QPs still in use
229 * @qpt: the QP table to empty
231 * There should not be any QPs still in use.
232 * Free memory for table.
234 static unsigned rvt_free_all_qps(struct rvt_dev_info *rdi)
238 unsigned n, qp_inuse = 0;
239 spinlock_t *ql; /* work around too long line below */
241 if (rdi->driver_f.free_all_qps)
242 qp_inuse = rdi->driver_f.free_all_qps(rdi);
244 qp_inuse += rvt_mcast_tree_empty(rdi);
249 ql = &rdi->qp_dev->qpt_lock;
250 spin_lock_irqsave(ql, flags);
251 for (n = 0; n < rdi->qp_dev->qp_table_size; n++) {
252 qp = rcu_dereference_protected(rdi->qp_dev->qp_table[n],
253 lockdep_is_held(ql));
254 RCU_INIT_POINTER(rdi->qp_dev->qp_table[n], NULL);
256 for (; qp; qp = rcu_dereference_protected(qp->next,
257 lockdep_is_held(ql)))
260 spin_unlock_irqrestore(ql, flags);
265 void rvt_qp_exit(struct rvt_dev_info *rdi)
267 u32 qps_inuse = rvt_free_all_qps(rdi);
270 rvt_pr_err(rdi, "QP memory leak! %u still in use\n",
275 if (rdi->flags & RVT_FLAG_QP_INIT_DRIVER)
276 return; /* driver did the qp init so nothing else to do */
278 kfree(rdi->qp_dev->qp_table);
279 free_qpn_table(&rdi->qp_dev->qpn_table);
283 static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
284 struct rvt_qpn_map *map, unsigned off)
286 return (map - qpt->map) * RVT_BITS_PER_PAGE + off;
290 * alloc_qpn - Allocate the next available qpn or zero/one for QP type
291 * IB_QPT_SMI/IB_QPT_GSI
292 *@rdi: rvt device info structure
293 *@qpt: queue pair number table pointer
294 *@port_num: IB port number, 1 based, comes from core
296 * Return: The queue pair number
298 static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
299 enum ib_qp_type type, u8 port_num, gfp_t gfp)
301 u32 i, offset, max_scan, qpn;
302 struct rvt_qpn_map *map;
305 if (rdi->driver_f.alloc_qpn)
306 return rdi->driver_f.alloc_qpn(rdi, qpt, type, port_num, gfp);
308 if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
311 ret = type == IB_QPT_GSI;
312 n = 1 << (ret + 2 * (port_num - 1));
313 spin_lock(&qpt->lock);
318 spin_unlock(&qpt->lock);
322 qpn = qpt->last + qpt->incr;
323 if (qpn >= RVT_QPN_MAX)
324 qpn = qpt->incr | ((qpt->last & 1) ^ 1);
325 /* offset carries bit 0 */
326 offset = qpn & RVT_BITS_PER_PAGE_MASK;
327 map = &qpt->map[qpn / RVT_BITS_PER_PAGE];
328 max_scan = qpt->nmaps - !offset;
330 if (unlikely(!map->page)) {
331 get_map_page(qpt, map, gfp);
332 if (unlikely(!map->page))
336 if (!test_and_set_bit(offset, map->page)) {
343 * This qpn might be bogus if offset >= BITS_PER_PAGE.
344 * That is OK. It gets re-assigned below
346 qpn = mk_qpn(qpt, map, offset);
347 } while (offset < RVT_BITS_PER_PAGE && qpn < RVT_QPN_MAX);
349 * In order to keep the number of pages allocated to a
350 * minimum, we scan the all existing pages before increasing
351 * the size of the bitmap table.
353 if (++i > max_scan) {
354 if (qpt->nmaps == RVT_QPNMAP_ENTRIES)
356 map = &qpt->map[qpt->nmaps++];
357 /* start at incr with current bit 0 */
358 offset = qpt->incr | (offset & 1);
359 } else if (map < &qpt->map[qpt->nmaps]) {
361 /* start at incr with current bit 0 */
362 offset = qpt->incr | (offset & 1);
365 /* wrap to first map page, invert bit 0 */
366 offset = qpt->incr | ((offset & 1) ^ 1);
368 /* there can be no bits at shift and below */
369 WARN_ON(offset & (rdi->dparms.qos_shift - 1));
370 qpn = mk_qpn(qpt, map, offset);
379 static void free_qpn(struct rvt_qpn_table *qpt, u32 qpn)
381 struct rvt_qpn_map *map;
383 map = qpt->map + qpn / RVT_BITS_PER_PAGE;
385 clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page);
389 * reset_qp - initialize the QP state to the reset state
390 * @qp: the QP to reset
392 * r and s lock are required to be held by the caller
394 void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
395 enum ib_qp_type type)
397 if (qp->state != IB_QPS_RESET) {
398 qp->state = IB_QPS_RESET;
400 /* Let drivers flush their waitlist */
401 rdi->driver_f.flush_qp_waiters(qp);
402 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_ANY_WAIT);
403 spin_unlock(&qp->s_lock);
404 spin_unlock_irq(&qp->r_lock);
406 /* Stop the send queue and the retry timer */
407 rdi->driver_f.stop_send_queue(qp);
408 del_timer_sync(&qp->s_timer);
410 /* Wait for things to stop */
411 rdi->driver_f.quiesce_qp(qp);
413 /* take qp out the hash and wait for it to be unused */
414 rvt_remove_qp(rdi, qp);
415 wait_event(qp->wait, !atomic_read(&qp->refcount));
417 /* grab the lock b/c it was locked at call time */
418 spin_lock_irq(&qp->r_lock);
419 spin_lock(&qp->s_lock);
421 rvt_clear_mr_refs(qp, 1);
425 * Let the driver do any tear down it needs to for a qp
426 * that has been reset
428 rdi->driver_f.notify_qp_reset(qp);
432 qp->qp_access_flags = 0;
433 qp->s_flags &= RVT_S_SIGNAL_REQ_WR;
439 qp->s_sending_psn = 0;
440 qp->s_sending_hpsn = 0;
444 if (type == IB_QPT_RC) {
445 qp->s_state = IB_OPCODE_RC_SEND_LAST;
446 qp->r_state = IB_OPCODE_RC_SEND_LAST;
448 qp->s_state = IB_OPCODE_UC_SEND_LAST;
449 qp->r_state = IB_OPCODE_UC_SEND_LAST;
451 qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
462 qp->s_mig_state = IB_MIG_MIGRATED;
463 memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue));
464 qp->r_head_ack_queue = 0;
465 qp->s_tail_ack_queue = 0;
466 qp->s_num_rd_atomic = 0;
468 qp->r_rq.wq->head = 0;
469 qp->r_rq.wq->tail = 0;
471 qp->r_sge.num_sge = 0;
473 EXPORT_SYMBOL(rvt_reset_qp);
476 * rvt_create_qp - create a queue pair for a device
477 * @ibpd: the protection domain who's device we create the queue pair for
478 * @init_attr: the attributes of the queue pair
479 * @udata: user data for libibverbs.so
481 * Queue pair creation is mostly an rvt issue. However, drivers have their own
482 * unique idea of what queue pair numbers mean. For instance there is a reserved
485 * Returns the queue pair on success, otherwise returns an errno.
487 * Called by the ib_create_qp() core verbs function.
489 struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
490 struct ib_qp_init_attr *init_attr,
491 struct ib_udata *udata)
495 struct rvt_swqe *swq = NULL;
498 struct ib_qp *ret = ERR_PTR(-ENOMEM);
499 struct rvt_dev_info *rdi = ib_to_rvt(ibpd->device);
504 return ERR_PTR(-EINVAL);
506 if (init_attr->cap.max_send_sge > rdi->dparms.props.max_sge ||
507 init_attr->cap.max_send_wr > rdi->dparms.props.max_qp_wr ||
508 init_attr->create_flags & ~(IB_QP_CREATE_USE_GFP_NOIO))
509 return ERR_PTR(-EINVAL);
511 /* GFP_NOIO is applicable to RC QP's only */
513 if (init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO &&
514 init_attr->qp_type != IB_QPT_RC)
515 return ERR_PTR(-EINVAL);
517 gfp = init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO ?
518 GFP_NOIO : GFP_KERNEL;
520 /* Check receive queue parameters if no SRQ is specified. */
521 if (!init_attr->srq) {
522 if (init_attr->cap.max_recv_sge > rdi->dparms.props.max_sge ||
523 init_attr->cap.max_recv_wr > rdi->dparms.props.max_qp_wr)
524 return ERR_PTR(-EINVAL);
526 if (init_attr->cap.max_send_sge +
527 init_attr->cap.max_send_wr +
528 init_attr->cap.max_recv_sge +
529 init_attr->cap.max_recv_wr == 0)
530 return ERR_PTR(-EINVAL);
533 switch (init_attr->qp_type) {
536 if (init_attr->port_num == 0 ||
537 init_attr->port_num > ibpd->device->phys_port_cnt)
538 return ERR_PTR(-EINVAL);
542 sz = sizeof(struct rvt_sge) *
543 init_attr->cap.max_send_sge +
544 sizeof(struct rvt_swqe);
547 (init_attr->cap.max_send_wr + 1) * sz,
551 (init_attr->cap.max_send_wr + 1) * sz,
554 return ERR_PTR(-ENOMEM);
558 if (init_attr->srq) {
559 struct rvt_srq *srq = ibsrq_to_rvtsrq(init_attr->srq);
561 if (srq->rq.max_sge > 1)
562 sg_list_sz = sizeof(*qp->r_sg_list) *
563 (srq->rq.max_sge - 1);
564 } else if (init_attr->cap.max_recv_sge > 1)
565 sg_list_sz = sizeof(*qp->r_sg_list) *
566 (init_attr->cap.max_recv_sge - 1);
567 qp = kzalloc_node(sz + sg_list_sz, gfp, rdi->dparms.node);
571 RCU_INIT_POINTER(qp->next, NULL);
574 * Driver needs to set up it's private QP structure and do any
575 * initialization that is needed.
577 priv = rdi->driver_f.qp_priv_alloc(rdi, qp, gfp);
581 qp->timeout_jiffies =
582 usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
584 if (init_attr->srq) {
587 qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
588 qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
589 sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
590 sizeof(struct rvt_rwqe);
592 qp->r_rq.wq = vmalloc_user(
593 sizeof(struct rvt_rwq) +
595 else if (gfp == GFP_NOIO)
596 qp->r_rq.wq = __vmalloc(
597 sizeof(struct rvt_rwq) +
601 qp->r_rq.wq = vmalloc_node(
602 sizeof(struct rvt_rwq) +
606 goto bail_driver_priv;
610 * ib_create_qp() will initialize qp->ibqp
611 * except for qp->ibqp.qp_num.
613 spin_lock_init(&qp->r_lock);
614 spin_lock_init(&qp->s_lock);
615 spin_lock_init(&qp->r_rq.lock);
616 atomic_set(&qp->refcount, 0);
617 init_waitqueue_head(&qp->wait);
618 init_timer(&qp->s_timer);
619 qp->s_timer.data = (unsigned long)qp;
620 INIT_LIST_HEAD(&qp->rspwait);
621 qp->state = IB_QPS_RESET;
623 qp->s_size = init_attr->cap.max_send_wr + 1;
624 qp->s_max_sge = init_attr->cap.max_send_sge;
625 if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
626 qp->s_flags = RVT_S_SIGNAL_REQ_WR;
628 err = alloc_qpn(rdi, &rdi->qp_dev->qpn_table,
630 init_attr->port_num, gfp);
635 qp->ibqp.qp_num = err;
636 qp->port_num = init_attr->port_num;
637 rvt_reset_qp(rdi, qp, init_attr->qp_type);
641 /* Don't support raw QPs */
642 return ERR_PTR(-EINVAL);
645 init_attr->cap.max_inline_data = 0;
648 * Return the address of the RWQ as the offset to mmap.
649 * See rvt_mmap() for details.
651 if (udata && udata->outlen >= sizeof(__u64)) {
655 err = ib_copy_to_udata(udata, &offset,
662 u32 s = sizeof(struct rvt_rwq) + qp->r_rq.size * sz;
664 qp->ip = rvt_create_mmap_info(rdi, s,
665 ibpd->uobject->context,
668 ret = ERR_PTR(-ENOMEM);
672 err = ib_copy_to_udata(udata, &qp->ip->offset,
673 sizeof(qp->ip->offset));
681 spin_lock(&rdi->n_qps_lock);
682 if (rdi->n_qps_allocated == rdi->dparms.props.max_qp) {
683 spin_unlock(&rdi->n_qps_lock);
684 ret = ERR_PTR(-ENOMEM);
688 rdi->n_qps_allocated++;
689 spin_unlock(&rdi->n_qps_lock);
692 spin_lock_irq(&rdi->pending_lock);
693 list_add(&qp->ip->pending_mmaps, &rdi->pending_mmaps);
694 spin_unlock_irq(&rdi->pending_lock);
700 * We have our QP and its good, now keep track of what types of opcodes
701 * can be processed on this QP. We do this by keeping track of what the
702 * 3 high order bits of the opcode are.
704 switch (init_attr->qp_type) {
708 qp->allowed_ops = IB_OPCODE_UD_SEND_ONLY & RVT_OPCODE_QP_MASK;
711 qp->allowed_ops = IB_OPCODE_RC_SEND_ONLY & RVT_OPCODE_QP_MASK;
714 qp->allowed_ops = IB_OPCODE_UC_SEND_ONLY & RVT_OPCODE_QP_MASK;
717 ret = ERR_PTR(-EINVAL);
724 kref_put(&qp->ip->ref, rvt_release_mmap_info);
727 free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
733 rdi->driver_f.qp_priv_free(rdi, qp);
744 void rvt_clear_mr_refs(struct rvt_qp *qp, int clr_sends)
748 if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags))
749 rvt_put_ss(&qp->s_rdma_read_sge);
751 rvt_put_ss(&qp->r_sge);
754 while (qp->s_last != qp->s_head) {
755 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_last);
758 for (i = 0; i < wqe->wr.num_sge; i++) {
759 struct rvt_sge *sge = &wqe->sg_list[i];
763 if (qp->ibqp.qp_type == IB_QPT_UD ||
764 qp->ibqp.qp_type == IB_QPT_SMI ||
765 qp->ibqp.qp_type == IB_QPT_GSI)
766 atomic_dec(&ibah_to_rvtah(
767 wqe->ud_wr.ah)->refcount);
768 if (++qp->s_last >= qp->s_size)
772 rvt_put_mr(qp->s_rdma_mr);
773 qp->s_rdma_mr = NULL;
777 if (qp->ibqp.qp_type != IB_QPT_RC)
780 for (n = 0; n < ARRAY_SIZE(qp->s_ack_queue); n++) {
781 struct rvt_ack_entry *e = &qp->s_ack_queue[n];
783 if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST &&
785 rvt_put_mr(e->rdma_sge.mr);
786 e->rdma_sge.mr = NULL;
790 EXPORT_SYMBOL(rvt_clear_mr_refs);
793 * rvt_error_qp - put a QP into the error state
794 * @qp: the QP to put into the error state
795 * @err: the receive completion error to signal if a RWQE is active
797 * Flushes both send and receive work queues.
798 * Returns true if last WQE event should be generated.
799 * The QP r_lock and s_lock should be held and interrupts disabled.
800 * If we are already in error state, just return.
802 int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err)
806 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
808 if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET)
811 qp->state = IB_QPS_ERR;
813 if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
814 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
815 del_timer(&qp->s_timer);
818 if (qp->s_flags & RVT_S_ANY_WAIT_SEND)
819 qp->s_flags &= ~RVT_S_ANY_WAIT_SEND;
821 rdi->driver_f.notify_error_qp(qp);
823 /* Schedule the sending tasklet to drain the send work queue. */
824 if (qp->s_last != qp->s_head)
825 rdi->driver_f.schedule_send(qp);
827 rvt_clear_mr_refs(qp, 0);
829 memset(&wc, 0, sizeof(wc));
831 wc.opcode = IB_WC_RECV;
833 if (test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) {
834 wc.wr_id = qp->r_wr_id;
836 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
838 wc.status = IB_WC_WR_FLUSH_ERR;
845 spin_lock(&qp->r_rq.lock);
847 /* sanity check pointers before trusting them */
850 if (head >= qp->r_rq.size)
853 if (tail >= qp->r_rq.size)
855 while (tail != head) {
856 wc.wr_id = rvt_get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
857 if (++tail >= qp->r_rq.size)
859 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
863 spin_unlock(&qp->r_rq.lock);
864 } else if (qp->ibqp.event_handler) {
871 EXPORT_SYMBOL(rvt_error_qp);
874 * Put the QP into the hash table.
875 * The hash table holds a reference to the QP.
877 static void rvt_insert_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp)
879 struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
882 atomic_inc(&qp->refcount);
883 spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags);
885 if (qp->ibqp.qp_num <= 1) {
886 rcu_assign_pointer(rvp->qp[qp->ibqp.qp_num], qp);
888 u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits);
890 qp->next = rdi->qp_dev->qp_table[n];
891 rcu_assign_pointer(rdi->qp_dev->qp_table[n], qp);
892 trace_rvt_qpinsert(qp, n);
895 spin_unlock_irqrestore(&rdi->qp_dev->qpt_lock, flags);
899 * Remove the QP from the table so it can't be found asynchronously by
900 * the receive routine.
902 void rvt_remove_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp)
904 struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
905 u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits);
909 spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags);
911 if (rcu_dereference_protected(rvp->qp[0],
912 lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) {
913 RCU_INIT_POINTER(rvp->qp[0], NULL);
914 } else if (rcu_dereference_protected(rvp->qp[1],
915 lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) {
916 RCU_INIT_POINTER(rvp->qp[1], NULL);
919 struct rvt_qp __rcu **qpp;
922 qpp = &rdi->qp_dev->qp_table[n];
923 for (; (q = rcu_dereference_protected(*qpp,
924 lockdep_is_held(&rdi->qp_dev->qpt_lock))) != NULL;
927 RCU_INIT_POINTER(*qpp,
928 rcu_dereference_protected(qp->next,
929 lockdep_is_held(&rdi->qp_dev->qpt_lock)));
931 trace_rvt_qpremove(qp, n);
937 spin_unlock_irqrestore(&rdi->qp_dev->qpt_lock, flags);
940 if (atomic_dec_and_test(&qp->refcount))
944 EXPORT_SYMBOL(rvt_remove_qp);
947 * qib_modify_qp - modify the attributes of a queue pair
948 * @ibqp: the queue pair who's attributes we're modifying
949 * @attr: the new attributes
950 * @attr_mask: the mask of attributes to modify
951 * @udata: user data for libibverbs.so
953 * Returns 0 on success, otherwise returns an errno.
955 int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
956 int attr_mask, struct ib_udata *udata)
958 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
959 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
960 enum ib_qp_state cur_state, new_state;
964 int pmtu = 0; /* for gcc warning only */
965 enum rdma_link_layer link;
967 link = rdma_port_get_link_layer(ibqp->device, qp->port_num);
969 spin_lock_irq(&qp->r_lock);
970 spin_lock(&qp->s_lock);
972 cur_state = attr_mask & IB_QP_CUR_STATE ?
973 attr->cur_qp_state : qp->state;
974 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
976 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
980 if (rdi->driver_f.check_modify_qp &&
981 rdi->driver_f.check_modify_qp(qp, attr, attr_mask, udata))
984 if (attr_mask & IB_QP_AV) {
985 if (attr->ah_attr.dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE))
987 if (rvt_check_ah(qp->ibqp.device, &attr->ah_attr))
991 if (attr_mask & IB_QP_ALT_PATH) {
992 if (attr->alt_ah_attr.dlid >=
993 be16_to_cpu(IB_MULTICAST_LID_BASE))
995 if (rvt_check_ah(qp->ibqp.device, &attr->alt_ah_attr))
997 if (attr->alt_pkey_index >= rvt_get_npkeys(rdi))
1001 if (attr_mask & IB_QP_PKEY_INDEX)
1002 if (attr->pkey_index >= rvt_get_npkeys(rdi))
1005 if (attr_mask & IB_QP_MIN_RNR_TIMER)
1006 if (attr->min_rnr_timer > 31)
1009 if (attr_mask & IB_QP_PORT)
1010 if (qp->ibqp.qp_type == IB_QPT_SMI ||
1011 qp->ibqp.qp_type == IB_QPT_GSI ||
1012 attr->port_num == 0 ||
1013 attr->port_num > ibqp->device->phys_port_cnt)
1016 if (attr_mask & IB_QP_DEST_QPN)
1017 if (attr->dest_qp_num > RVT_QPN_MASK)
1020 if (attr_mask & IB_QP_RETRY_CNT)
1021 if (attr->retry_cnt > 7)
1024 if (attr_mask & IB_QP_RNR_RETRY)
1025 if (attr->rnr_retry > 7)
1029 * Don't allow invalid path_mtu values. OK to set greater
1030 * than the active mtu (or even the max_cap, if we have tuned
1031 * that to a small mtu. We'll set qp->path_mtu
1032 * to the lesser of requested attribute mtu and active,
1033 * for packetizing messages.
1034 * Note that the QP port has to be set in INIT and MTU in RTR.
1036 if (attr_mask & IB_QP_PATH_MTU) {
1037 pmtu = rdi->driver_f.get_pmtu_from_attr(rdi, qp, attr);
1042 if (attr_mask & IB_QP_PATH_MIG_STATE) {
1043 if (attr->path_mig_state == IB_MIG_REARM) {
1044 if (qp->s_mig_state == IB_MIG_ARMED)
1046 if (new_state != IB_QPS_RTS)
1048 } else if (attr->path_mig_state == IB_MIG_MIGRATED) {
1049 if (qp->s_mig_state == IB_MIG_REARM)
1051 if (new_state != IB_QPS_RTS && new_state != IB_QPS_SQD)
1053 if (qp->s_mig_state == IB_MIG_ARMED)
1060 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1061 if (attr->max_dest_rd_atomic > rdi->dparms.max_rdma_atomic)
1064 switch (new_state) {
1066 if (qp->state != IB_QPS_RESET)
1067 rvt_reset_qp(rdi, qp, ibqp->qp_type);
1071 /* Allow event to re-trigger if QP set to RTR more than once */
1072 qp->r_flags &= ~RVT_R_COMM_EST;
1073 qp->state = new_state;
1077 qp->s_draining = qp->s_last != qp->s_cur;
1078 qp->state = new_state;
1082 if (qp->ibqp.qp_type == IB_QPT_RC)
1084 qp->state = new_state;
1088 lastwqe = rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
1092 qp->state = new_state;
1096 if (attr_mask & IB_QP_PKEY_INDEX)
1097 qp->s_pkey_index = attr->pkey_index;
1099 if (attr_mask & IB_QP_PORT)
1100 qp->port_num = attr->port_num;
1102 if (attr_mask & IB_QP_DEST_QPN)
1103 qp->remote_qpn = attr->dest_qp_num;
1105 if (attr_mask & IB_QP_SQ_PSN) {
1106 qp->s_next_psn = attr->sq_psn & rdi->dparms.psn_modify_mask;
1107 qp->s_psn = qp->s_next_psn;
1108 qp->s_sending_psn = qp->s_next_psn;
1109 qp->s_last_psn = qp->s_next_psn - 1;
1110 qp->s_sending_hpsn = qp->s_last_psn;
1113 if (attr_mask & IB_QP_RQ_PSN)
1114 qp->r_psn = attr->rq_psn & rdi->dparms.psn_modify_mask;
1116 if (attr_mask & IB_QP_ACCESS_FLAGS)
1117 qp->qp_access_flags = attr->qp_access_flags;
1119 if (attr_mask & IB_QP_AV) {
1120 qp->remote_ah_attr = attr->ah_attr;
1121 qp->s_srate = attr->ah_attr.static_rate;
1122 qp->srate_mbps = ib_rate_to_mbps(qp->s_srate);
1125 if (attr_mask & IB_QP_ALT_PATH) {
1126 qp->alt_ah_attr = attr->alt_ah_attr;
1127 qp->s_alt_pkey_index = attr->alt_pkey_index;
1130 if (attr_mask & IB_QP_PATH_MIG_STATE) {
1131 qp->s_mig_state = attr->path_mig_state;
1133 qp->remote_ah_attr = qp->alt_ah_attr;
1134 qp->port_num = qp->alt_ah_attr.port_num;
1135 qp->s_pkey_index = qp->s_alt_pkey_index;
1138 * Ignored by drivers which do not support it. Not
1139 * really worth creating a call back into the driver
1140 * just to set a flag.
1142 qp->s_flags |= RVT_S_AHG_CLEAR;
1146 if (attr_mask & IB_QP_PATH_MTU) {
1147 qp->pmtu = rdi->driver_f.mtu_from_qp(rdi, qp, pmtu);
1148 qp->path_mtu = rdi->driver_f.mtu_to_path_mtu(qp->pmtu);
1151 if (attr_mask & IB_QP_RETRY_CNT) {
1152 qp->s_retry_cnt = attr->retry_cnt;
1153 qp->s_retry = attr->retry_cnt;
1156 if (attr_mask & IB_QP_RNR_RETRY) {
1157 qp->s_rnr_retry_cnt = attr->rnr_retry;
1158 qp->s_rnr_retry = attr->rnr_retry;
1161 if (attr_mask & IB_QP_MIN_RNR_TIMER)
1162 qp->r_min_rnr_timer = attr->min_rnr_timer;
1164 if (attr_mask & IB_QP_TIMEOUT) {
1165 qp->timeout = attr->timeout;
1166 qp->timeout_jiffies =
1167 usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
1171 if (attr_mask & IB_QP_QKEY)
1172 qp->qkey = attr->qkey;
1174 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1175 qp->r_max_rd_atomic = attr->max_dest_rd_atomic;
1177 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
1178 qp->s_max_rd_atomic = attr->max_rd_atomic;
1180 if (rdi->driver_f.modify_qp)
1181 rdi->driver_f.modify_qp(qp, attr, attr_mask, udata);
1183 spin_unlock(&qp->s_lock);
1184 spin_unlock_irq(&qp->r_lock);
1186 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
1187 rvt_insert_qp(rdi, qp);
1190 ev.device = qp->ibqp.device;
1191 ev.element.qp = &qp->ibqp;
1192 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
1193 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
1196 ev.device = qp->ibqp.device;
1197 ev.element.qp = &qp->ibqp;
1198 ev.event = IB_EVENT_PATH_MIG;
1199 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
1204 spin_unlock(&qp->s_lock);
1205 spin_unlock_irq(&qp->r_lock);
1210 * rvt_destroy_qp - destroy a queue pair
1211 * @ibqp: the queue pair to destroy
1213 * Returns 0 on success.
1215 * Note that this can be called while the QP is actively sending or
1218 int rvt_destroy_qp(struct ib_qp *ibqp)
1220 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1221 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
1223 spin_lock_irq(&qp->r_lock);
1224 spin_lock(&qp->s_lock);
1225 rvt_reset_qp(rdi, qp, ibqp->qp_type);
1226 spin_unlock(&qp->s_lock);
1227 spin_unlock_irq(&qp->r_lock);
1229 /* qpn is now available for use again */
1230 rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
1232 spin_lock(&rdi->n_qps_lock);
1233 rdi->n_qps_allocated--;
1234 spin_unlock(&rdi->n_qps_lock);
1237 kref_put(&qp->ip->ref, rvt_release_mmap_info);
1241 rdi->driver_f.qp_priv_free(rdi, qp);
1246 int rvt_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1247 int attr_mask, struct ib_qp_init_attr *init_attr)
1249 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1250 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
1252 attr->qp_state = qp->state;
1253 attr->cur_qp_state = attr->qp_state;
1254 attr->path_mtu = qp->path_mtu;
1255 attr->path_mig_state = qp->s_mig_state;
1256 attr->qkey = qp->qkey;
1257 attr->rq_psn = qp->r_psn & rdi->dparms.psn_mask;
1258 attr->sq_psn = qp->s_next_psn & rdi->dparms.psn_mask;
1259 attr->dest_qp_num = qp->remote_qpn;
1260 attr->qp_access_flags = qp->qp_access_flags;
1261 attr->cap.max_send_wr = qp->s_size - 1;
1262 attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1;
1263 attr->cap.max_send_sge = qp->s_max_sge;
1264 attr->cap.max_recv_sge = qp->r_rq.max_sge;
1265 attr->cap.max_inline_data = 0;
1266 attr->ah_attr = qp->remote_ah_attr;
1267 attr->alt_ah_attr = qp->alt_ah_attr;
1268 attr->pkey_index = qp->s_pkey_index;
1269 attr->alt_pkey_index = qp->s_alt_pkey_index;
1270 attr->en_sqd_async_notify = 0;
1271 attr->sq_draining = qp->s_draining;
1272 attr->max_rd_atomic = qp->s_max_rd_atomic;
1273 attr->max_dest_rd_atomic = qp->r_max_rd_atomic;
1274 attr->min_rnr_timer = qp->r_min_rnr_timer;
1275 attr->port_num = qp->port_num;
1276 attr->timeout = qp->timeout;
1277 attr->retry_cnt = qp->s_retry_cnt;
1278 attr->rnr_retry = qp->s_rnr_retry_cnt;
1279 attr->alt_port_num = qp->alt_ah_attr.port_num;
1280 attr->alt_timeout = qp->alt_timeout;
1282 init_attr->event_handler = qp->ibqp.event_handler;
1283 init_attr->qp_context = qp->ibqp.qp_context;
1284 init_attr->send_cq = qp->ibqp.send_cq;
1285 init_attr->recv_cq = qp->ibqp.recv_cq;
1286 init_attr->srq = qp->ibqp.srq;
1287 init_attr->cap = attr->cap;
1288 if (qp->s_flags & RVT_S_SIGNAL_REQ_WR)
1289 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
1291 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
1292 init_attr->qp_type = qp->ibqp.qp_type;
1293 init_attr->port_num = qp->port_num;
1298 * rvt_post_receive - post a receive on a QP
1299 * @ibqp: the QP to post the receive on
1300 * @wr: the WR to post
1301 * @bad_wr: the first bad WR is put here
1303 * This may be called from interrupt context.
1305 int rvt_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
1306 struct ib_recv_wr **bad_wr)
1308 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1309 struct rvt_rwq *wq = qp->r_rq.wq;
1310 unsigned long flags;
1312 /* Check that state is OK to post receive. */
1313 if (!(ib_rvt_state_ops[qp->state] & RVT_POST_RECV_OK) || !wq) {
1318 for (; wr; wr = wr->next) {
1319 struct rvt_rwqe *wqe;
1323 if ((unsigned)wr->num_sge > qp->r_rq.max_sge) {
1328 spin_lock_irqsave(&qp->r_rq.lock, flags);
1329 next = wq->head + 1;
1330 if (next >= qp->r_rq.size)
1332 if (next == wq->tail) {
1333 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
1338 wqe = rvt_get_rwqe_ptr(&qp->r_rq, wq->head);
1339 wqe->wr_id = wr->wr_id;
1340 wqe->num_sge = wr->num_sge;
1341 for (i = 0; i < wr->num_sge; i++)
1342 wqe->sg_list[i] = wr->sg_list[i];
1343 /* Make sure queue entry is written before the head index. */
1346 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
1352 * rvt_post_one_wr - post one RC, UC, or UD send work request
1353 * @qp: the QP to post on
1354 * @wr: the work request to send
1356 static int rvt_post_one_wr(struct rvt_qp *qp, struct ib_send_wr *wr)
1358 struct rvt_swqe *wqe;
1363 struct rvt_lkey_table *rkt;
1365 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
1367 /* IB spec says that num_sge == 0 is OK. */
1368 if (unlikely(wr->num_sge > qp->s_max_sge))
1372 * Don't allow RDMA reads or atomic operations on UC or
1373 * undefined operations.
1374 * Make sure buffer is large enough to hold the result for atomics.
1376 if (qp->ibqp.qp_type == IB_QPT_UC) {
1377 if ((unsigned)wr->opcode >= IB_WR_RDMA_READ)
1379 } else if (qp->ibqp.qp_type != IB_QPT_RC) {
1380 /* Check IB_QPT_SMI, IB_QPT_GSI, IB_QPT_UD opcode */
1381 if (wr->opcode != IB_WR_SEND &&
1382 wr->opcode != IB_WR_SEND_WITH_IMM)
1384 /* Check UD destination address PD */
1385 if (qp->ibqp.pd != ud_wr(wr)->ah->pd)
1387 } else if ((unsigned)wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD) {
1389 } else if (wr->opcode >= IB_WR_ATOMIC_CMP_AND_SWP &&
1390 (wr->num_sge == 0 ||
1391 wr->sg_list[0].length < sizeof(u64) ||
1392 wr->sg_list[0].addr & (sizeof(u64) - 1))) {
1394 } else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic) {
1398 next = qp->s_head + 1;
1399 if (next >= qp->s_size)
1401 if (next == qp->s_last)
1404 if (rdi->driver_f.check_send_wr &&
1405 rdi->driver_f.check_send_wr(qp, wr))
1408 rkt = &rdi->lkey_table;
1409 pd = ibpd_to_rvtpd(qp->ibqp.pd);
1410 wqe = rvt_get_swqe_ptr(qp, qp->s_head);
1412 if (qp->ibqp.qp_type != IB_QPT_UC &&
1413 qp->ibqp.qp_type != IB_QPT_RC)
1414 memcpy(&wqe->ud_wr, ud_wr(wr), sizeof(wqe->ud_wr));
1415 else if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
1416 wr->opcode == IB_WR_RDMA_WRITE ||
1417 wr->opcode == IB_WR_RDMA_READ)
1418 memcpy(&wqe->rdma_wr, rdma_wr(wr), sizeof(wqe->rdma_wr));
1419 else if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
1420 wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
1421 memcpy(&wqe->atomic_wr, atomic_wr(wr), sizeof(wqe->atomic_wr));
1423 memcpy(&wqe->wr, wr, sizeof(wqe->wr));
1428 acc = wr->opcode >= IB_WR_RDMA_READ ?
1429 IB_ACCESS_LOCAL_WRITE : 0;
1430 for (i = 0; i < wr->num_sge; i++) {
1431 u32 length = wr->sg_list[i].length;
1436 ok = rvt_lkey_ok(rkt, pd, &wqe->sg_list[j],
1437 &wr->sg_list[i], acc);
1439 goto bail_inval_free;
1440 wqe->length += length;
1443 wqe->wr.num_sge = j;
1445 if (qp->ibqp.qp_type == IB_QPT_UC ||
1446 qp->ibqp.qp_type == IB_QPT_RC) {
1447 if (wqe->length > 0x80000000U)
1448 goto bail_inval_free;
1450 atomic_inc(&ibah_to_rvtah(ud_wr(wr)->ah)->refcount);
1452 wqe->ssn = qp->s_ssn++;
1458 /* release mr holds */
1460 struct rvt_sge *sge = &wqe->sg_list[--j];
1462 rvt_put_mr(sge->mr);
1468 * rvt_post_send - post a send on a QP
1469 * @ibqp: the QP to post the send on
1470 * @wr: the list of work requests to post
1471 * @bad_wr: the first bad WR is put here
1473 * This may be called from interrupt context.
1475 int rvt_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1476 struct ib_send_wr **bad_wr)
1478 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1479 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
1480 unsigned long flags = 0;
1485 spin_lock_irqsave(&qp->s_lock, flags);
1488 * Ensure QP state is such that we can send. If not bail out early,
1489 * there is no need to do this every time we post a send.
1491 if (unlikely(!(ib_rvt_state_ops[qp->state] & RVT_POST_SEND_OK))) {
1492 spin_unlock_irqrestore(&qp->s_lock, flags);
1497 * If the send queue is empty, and we only have a single WR then just go
1498 * ahead and kick the send engine into gear. Otherwise we will always
1499 * just schedule the send to happen later.
1501 call_send = qp->s_head == ACCESS_ONCE(qp->s_last) && !wr->next;
1503 for (; wr; wr = wr->next) {
1504 err = rvt_post_one_wr(qp, wr);
1505 if (unlikely(err)) {
1512 if (nreq && !call_send)
1513 rdi->driver_f.schedule_send(qp);
1514 spin_unlock_irqrestore(&qp->s_lock, flags);
1515 if (nreq && call_send)
1516 rdi->driver_f.do_send(qp);
1521 * rvt_post_srq_receive - post a receive on a shared receive queue
1522 * @ibsrq: the SRQ to post the receive on
1523 * @wr: the list of work requests to post
1524 * @bad_wr: A pointer to the first WR to cause a problem is put here
1526 * This may be called from interrupt context.
1528 int rvt_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
1529 struct ib_recv_wr **bad_wr)
1531 struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
1533 unsigned long flags;
1535 for (; wr; wr = wr->next) {
1536 struct rvt_rwqe *wqe;
1540 if ((unsigned)wr->num_sge > srq->rq.max_sge) {
1545 spin_lock_irqsave(&srq->rq.lock, flags);
1547 next = wq->head + 1;
1548 if (next >= srq->rq.size)
1550 if (next == wq->tail) {
1551 spin_unlock_irqrestore(&srq->rq.lock, flags);
1556 wqe = rvt_get_rwqe_ptr(&srq->rq, wq->head);
1557 wqe->wr_id = wr->wr_id;
1558 wqe->num_sge = wr->num_sge;
1559 for (i = 0; i < wr->num_sge; i++)
1560 wqe->sg_list[i] = wr->sg_list[i];
1561 /* Make sure queue entry is written before the head index. */
1564 spin_unlock_irqrestore(&srq->rq.lock, flags);
1569 void rvt_free_qpn(struct rvt_qpn_table *qpt, u32 qpn)
1571 struct rvt_qpn_map *map;
1573 map = qpt->map + qpn / RVT_BITS_PER_PAGE;
1575 clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page);
1577 EXPORT_SYMBOL(rvt_free_qpn);
1579 void rvt_dec_qp_cnt(struct rvt_dev_info *rdi)
1581 spin_lock(&rdi->n_qps_lock);
1582 rdi->n_qps_allocated--;
1583 spin_unlock(&rdi->n_qps_lock);
1585 EXPORT_SYMBOL(rvt_dec_qp_cnt);