3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2015 Intel Corporation.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
21 * Copyright(c) 2015 Intel Corporation.
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
27 * - Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * - Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in
31 * the documentation and/or other materials provided with the
33 * - Neither the name of Intel Corporation nor the names of its
34 * contributors may be used to endorse or promote products derived
35 * from this software without specific prior written permission.
37 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
38 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
39 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
40 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
41 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
42 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
43 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
44 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
45 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
46 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
47 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
51 #include <rdma/ib_mad.h>
52 #include <rdma/ib_user_verbs.h>
54 #include <linux/module.h>
55 #include <linux/utsname.h>
56 #include <linux/rculist.h>
58 #include <linux/random.h>
59 #include <linux/vmalloc.h>
68 static unsigned int hfi1_lkey_table_size = 16;
69 module_param_named(lkey_table_size, hfi1_lkey_table_size, uint,
71 MODULE_PARM_DESC(lkey_table_size,
72 "LKEY table size in bits (2^n, 1 <= n <= 23)");
74 static unsigned int hfi1_max_pds = 0xFFFF;
75 module_param_named(max_pds, hfi1_max_pds, uint, S_IRUGO);
76 MODULE_PARM_DESC(max_pds,
77 "Maximum number of protection domains to support");
79 static unsigned int hfi1_max_ahs = 0xFFFF;
80 module_param_named(max_ahs, hfi1_max_ahs, uint, S_IRUGO);
81 MODULE_PARM_DESC(max_ahs, "Maximum number of address handles to support");
83 unsigned int hfi1_max_cqes = 0x2FFFF;
84 module_param_named(max_cqes, hfi1_max_cqes, uint, S_IRUGO);
85 MODULE_PARM_DESC(max_cqes,
86 "Maximum number of completion queue entries to support");
88 unsigned int hfi1_max_cqs = 0x1FFFF;
89 module_param_named(max_cqs, hfi1_max_cqs, uint, S_IRUGO);
90 MODULE_PARM_DESC(max_cqs, "Maximum number of completion queues to support");
92 unsigned int hfi1_max_qp_wrs = 0x3FFF;
93 module_param_named(max_qp_wrs, hfi1_max_qp_wrs, uint, S_IRUGO);
94 MODULE_PARM_DESC(max_qp_wrs, "Maximum number of QP WRs to support");
96 unsigned int hfi1_max_qps = 16384;
97 module_param_named(max_qps, hfi1_max_qps, uint, S_IRUGO);
98 MODULE_PARM_DESC(max_qps, "Maximum number of QPs to support");
100 unsigned int hfi1_max_sges = 0x60;
101 module_param_named(max_sges, hfi1_max_sges, uint, S_IRUGO);
102 MODULE_PARM_DESC(max_sges, "Maximum number of SGEs to support");
104 unsigned int hfi1_max_mcast_grps = 16384;
105 module_param_named(max_mcast_grps, hfi1_max_mcast_grps, uint, S_IRUGO);
106 MODULE_PARM_DESC(max_mcast_grps,
107 "Maximum number of multicast groups to support");
109 unsigned int hfi1_max_mcast_qp_attached = 16;
110 module_param_named(max_mcast_qp_attached, hfi1_max_mcast_qp_attached,
112 MODULE_PARM_DESC(max_mcast_qp_attached,
113 "Maximum number of attached QPs to support");
115 unsigned int hfi1_max_srqs = 1024;
116 module_param_named(max_srqs, hfi1_max_srqs, uint, S_IRUGO);
117 MODULE_PARM_DESC(max_srqs, "Maximum number of SRQs to support");
119 unsigned int hfi1_max_srq_sges = 128;
120 module_param_named(max_srq_sges, hfi1_max_srq_sges, uint, S_IRUGO);
121 MODULE_PARM_DESC(max_srq_sges, "Maximum number of SRQ SGEs to support");
123 unsigned int hfi1_max_srq_wrs = 0x1FFFF;
124 module_param_named(max_srq_wrs, hfi1_max_srq_wrs, uint, S_IRUGO);
125 MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support");
127 static void verbs_sdma_complete(
128 struct sdma_txreq *cookie,
132 /* Length of buffer to create verbs txreq cache name */
133 #define TXREQ_NAME_LEN 24
136 * Translate ib_wr_opcode into ib_wc_opcode.
138 const enum ib_wc_opcode ib_hfi1_wc_opcode[] = {
139 [IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE,
140 [IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
141 [IB_WR_SEND] = IB_WC_SEND,
142 [IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
143 [IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
144 [IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
145 [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD
149 * Length of header by opcode, 0 --> not supported
151 const u8 hdr_len_by_opcode[256] = {
153 [IB_OPCODE_RC_SEND_FIRST] = 12 + 8,
154 [IB_OPCODE_RC_SEND_MIDDLE] = 12 + 8,
155 [IB_OPCODE_RC_SEND_LAST] = 12 + 8,
156 [IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE] = 12 + 8 + 4,
157 [IB_OPCODE_RC_SEND_ONLY] = 12 + 8,
158 [IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE] = 12 + 8 + 4,
159 [IB_OPCODE_RC_RDMA_WRITE_FIRST] = 12 + 8 + 16,
160 [IB_OPCODE_RC_RDMA_WRITE_MIDDLE] = 12 + 8,
161 [IB_OPCODE_RC_RDMA_WRITE_LAST] = 12 + 8,
162 [IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = 12 + 8 + 4,
163 [IB_OPCODE_RC_RDMA_WRITE_ONLY] = 12 + 8 + 16,
164 [IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = 12 + 8 + 20,
165 [IB_OPCODE_RC_RDMA_READ_REQUEST] = 12 + 8 + 16,
166 [IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST] = 12 + 8 + 4,
167 [IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE] = 12 + 8,
168 [IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST] = 12 + 8 + 4,
169 [IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY] = 12 + 8 + 4,
170 [IB_OPCODE_RC_ACKNOWLEDGE] = 12 + 8 + 4,
171 [IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE] = 12 + 8 + 4,
172 [IB_OPCODE_RC_COMPARE_SWAP] = 12 + 8 + 28,
173 [IB_OPCODE_RC_FETCH_ADD] = 12 + 8 + 28,
175 [IB_OPCODE_UC_SEND_FIRST] = 12 + 8,
176 [IB_OPCODE_UC_SEND_MIDDLE] = 12 + 8,
177 [IB_OPCODE_UC_SEND_LAST] = 12 + 8,
178 [IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE] = 12 + 8 + 4,
179 [IB_OPCODE_UC_SEND_ONLY] = 12 + 8,
180 [IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE] = 12 + 8 + 4,
181 [IB_OPCODE_UC_RDMA_WRITE_FIRST] = 12 + 8 + 16,
182 [IB_OPCODE_UC_RDMA_WRITE_MIDDLE] = 12 + 8,
183 [IB_OPCODE_UC_RDMA_WRITE_LAST] = 12 + 8,
184 [IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = 12 + 8 + 4,
185 [IB_OPCODE_UC_RDMA_WRITE_ONLY] = 12 + 8 + 16,
186 [IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = 12 + 8 + 20,
188 [IB_OPCODE_UD_SEND_ONLY] = 12 + 8 + 8,
189 [IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE] = 12 + 8 + 12
192 static const opcode_handler opcode_handler_tbl[256] = {
194 [IB_OPCODE_RC_SEND_FIRST] = &hfi1_rc_rcv,
195 [IB_OPCODE_RC_SEND_MIDDLE] = &hfi1_rc_rcv,
196 [IB_OPCODE_RC_SEND_LAST] = &hfi1_rc_rcv,
197 [IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE] = &hfi1_rc_rcv,
198 [IB_OPCODE_RC_SEND_ONLY] = &hfi1_rc_rcv,
199 [IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE] = &hfi1_rc_rcv,
200 [IB_OPCODE_RC_RDMA_WRITE_FIRST] = &hfi1_rc_rcv,
201 [IB_OPCODE_RC_RDMA_WRITE_MIDDLE] = &hfi1_rc_rcv,
202 [IB_OPCODE_RC_RDMA_WRITE_LAST] = &hfi1_rc_rcv,
203 [IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = &hfi1_rc_rcv,
204 [IB_OPCODE_RC_RDMA_WRITE_ONLY] = &hfi1_rc_rcv,
205 [IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = &hfi1_rc_rcv,
206 [IB_OPCODE_RC_RDMA_READ_REQUEST] = &hfi1_rc_rcv,
207 [IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST] = &hfi1_rc_rcv,
208 [IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE] = &hfi1_rc_rcv,
209 [IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST] = &hfi1_rc_rcv,
210 [IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY] = &hfi1_rc_rcv,
211 [IB_OPCODE_RC_ACKNOWLEDGE] = &hfi1_rc_rcv,
212 [IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE] = &hfi1_rc_rcv,
213 [IB_OPCODE_RC_COMPARE_SWAP] = &hfi1_rc_rcv,
214 [IB_OPCODE_RC_FETCH_ADD] = &hfi1_rc_rcv,
216 [IB_OPCODE_UC_SEND_FIRST] = &hfi1_uc_rcv,
217 [IB_OPCODE_UC_SEND_MIDDLE] = &hfi1_uc_rcv,
218 [IB_OPCODE_UC_SEND_LAST] = &hfi1_uc_rcv,
219 [IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE] = &hfi1_uc_rcv,
220 [IB_OPCODE_UC_SEND_ONLY] = &hfi1_uc_rcv,
221 [IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE] = &hfi1_uc_rcv,
222 [IB_OPCODE_UC_RDMA_WRITE_FIRST] = &hfi1_uc_rcv,
223 [IB_OPCODE_UC_RDMA_WRITE_MIDDLE] = &hfi1_uc_rcv,
224 [IB_OPCODE_UC_RDMA_WRITE_LAST] = &hfi1_uc_rcv,
225 [IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = &hfi1_uc_rcv,
226 [IB_OPCODE_UC_RDMA_WRITE_ONLY] = &hfi1_uc_rcv,
227 [IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = &hfi1_uc_rcv,
229 [IB_OPCODE_UD_SEND_ONLY] = &hfi1_ud_rcv,
230 [IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE] = &hfi1_ud_rcv,
232 [IB_OPCODE_CNP] = &hfi1_cnp_rcv
238 __be64 ib_hfi1_sys_image_guid;
241 * hfi1_copy_sge - copy data to SGE memory
243 * @data: the data to copy
244 * @length: the length of the data
245 * @copy_last: do a separate copy of the last 8 bytes
248 struct rvt_sge_state *ss,
249 void *data, u32 length,
253 struct rvt_sge *sge = &ss->sge;
268 u32 len = sge->length;
272 if (len > sge->sge_length)
273 len = sge->sge_length;
274 WARN_ON_ONCE(len == 0);
276 /* enforce byte transer ordering */
277 for (i = 0; i < len; i++)
278 ((u8 *)sge->vaddr)[i] = ((u8 *)data)[i];
280 memcpy(sge->vaddr, data, len);
284 sge->sge_length -= len;
285 if (sge->sge_length == 0) {
289 *sge = *ss->sg_list++;
290 } else if (sge->length == 0 && sge->mr->lkey) {
291 if (++sge->n >= RVT_SEGSZ) {
292 if (++sge->m >= sge->mr->mapsz)
297 sge->mr->map[sge->m]->segs[sge->n].vaddr;
299 sge->mr->map[sge->m]->segs[sge->n].length;
314 * hfi1_skip_sge - skip over SGE memory
316 * @length: the number of bytes to skip
318 void hfi1_skip_sge(struct rvt_sge_state *ss, u32 length, int release)
320 struct rvt_sge *sge = &ss->sge;
323 u32 len = sge->length;
327 if (len > sge->sge_length)
328 len = sge->sge_length;
329 WARN_ON_ONCE(len == 0);
332 sge->sge_length -= len;
333 if (sge->sge_length == 0) {
337 *sge = *ss->sg_list++;
338 } else if (sge->length == 0 && sge->mr->lkey) {
339 if (++sge->n >= RVT_SEGSZ) {
340 if (++sge->m >= sge->mr->mapsz)
345 sge->mr->map[sge->m]->segs[sge->n].vaddr;
347 sge->mr->map[sge->m]->segs[sge->n].length;
354 * Make sure the QP is ready and able to accept the given opcode.
356 static inline int qp_ok(int opcode, struct hfi1_packet *packet)
358 struct hfi1_ibport *ibp;
360 if (!(ib_rvt_state_ops[packet->qp->state] & RVT_PROCESS_RECV_OK))
362 if (((opcode & OPCODE_QP_MASK) == packet->qp->allowed_ops) ||
363 (opcode == IB_OPCODE_CNP))
366 ibp = &packet->rcd->ppd->ibport_data;
367 ibp->rvp.n_pkt_drops++;
373 * hfi1_ib_rcv - process an incoming packet
374 * @packet: data packet information
376 * This is called to process an incoming packet at interrupt level.
378 * Tlen is the length of the header + data + CRC in bytes.
380 void hfi1_ib_rcv(struct hfi1_packet *packet)
382 struct hfi1_ctxtdata *rcd = packet->rcd;
383 struct hfi1_ib_header *hdr = packet->hdr;
384 u32 tlen = packet->tlen;
385 struct hfi1_pportdata *ppd = rcd->ppd;
386 struct hfi1_ibport *ibp = &ppd->ibport_data;
387 struct rvt_dev_info *rdi = &ppd->dd->verbs_dev.rdi;
395 lnh = be16_to_cpu(hdr->lrh[0]) & 3;
396 if (lnh == HFI1_LRH_BTH)
397 packet->ohdr = &hdr->u.oth;
398 else if (lnh == HFI1_LRH_GRH) {
401 packet->ohdr = &hdr->u.l.oth;
402 if (hdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR)
404 vtf = be32_to_cpu(hdr->u.l.grh.version_tclass_flow);
405 if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
407 packet->rcv_flags |= HFI1_HAS_GRH;
411 trace_input_ibhdr(rcd->dd, hdr);
413 opcode = (be32_to_cpu(packet->ohdr->bth[0]) >> 24);
414 inc_opstats(tlen, &rcd->opstats->stats[opcode]);
416 /* Get the destination QP number. */
417 qp_num = be32_to_cpu(packet->ohdr->bth[1]) & RVT_QPN_MASK;
418 lid = be16_to_cpu(hdr->lrh[1]);
419 if (unlikely((lid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) &&
420 (lid != be16_to_cpu(IB_LID_PERMISSIVE)))) {
421 struct rvt_mcast *mcast;
422 struct rvt_mcast_qp *p;
424 if (lnh != HFI1_LRH_GRH)
426 mcast = rvt_mcast_find(&ibp->rvp, &hdr->u.l.grh.dgid);
429 list_for_each_entry_rcu(p, &mcast->qp_list, list) {
431 spin_lock_irqsave(&packet->qp->r_lock, flags);
432 if (likely((qp_ok(opcode, packet))))
433 opcode_handler_tbl[opcode](packet);
434 spin_unlock_irqrestore(&packet->qp->r_lock, flags);
437 * Notify rvt_multicast_detach() if it is waiting for us
440 if (atomic_dec_return(&mcast->refcount) <= 1)
441 wake_up(&mcast->wait);
444 packet->qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num);
449 spin_lock_irqsave(&packet->qp->r_lock, flags);
450 if (likely((qp_ok(opcode, packet))))
451 opcode_handler_tbl[opcode](packet);
452 spin_unlock_irqrestore(&packet->qp->r_lock, flags);
458 ibp->rvp.n_pkt_drops++;
462 * This is called from a timer to check for QPs
463 * which need kernel memory in order to send a packet.
465 static void mem_timer(unsigned long data)
467 struct hfi1_ibdev *dev = (struct hfi1_ibdev *)data;
468 struct list_head *list = &dev->memwait;
469 struct rvt_qp *qp = NULL;
472 struct hfi1_qp_priv *priv;
474 write_seqlock_irqsave(&dev->iowait_lock, flags);
475 if (!list_empty(list)) {
476 wait = list_first_entry(list, struct iowait, list);
477 qp = iowait_to_qp(wait);
479 list_del_init(&priv->s_iowait.list);
480 /* refcount held until actual wake up */
481 if (!list_empty(list))
482 mod_timer(&dev->mem_timer, jiffies + 1);
484 write_sequnlock_irqrestore(&dev->iowait_lock, flags);
487 hfi1_qp_wakeup(qp, RVT_S_WAIT_KMEM);
490 void update_sge(struct rvt_sge_state *ss, u32 length)
492 struct rvt_sge *sge = &ss->sge;
494 sge->vaddr += length;
495 sge->length -= length;
496 sge->sge_length -= length;
497 if (sge->sge_length == 0) {
499 *sge = *ss->sg_list++;
500 } else if (sge->length == 0 && sge->mr->lkey) {
501 if (++sge->n >= RVT_SEGSZ) {
502 if (++sge->m >= sge->mr->mapsz)
506 sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
507 sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
511 static noinline struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
514 struct hfi1_qp_priv *priv = qp->priv;
515 struct verbs_txreq *tx;
518 tx = kmem_cache_alloc(dev->verbs_txreq_cache, GFP_ATOMIC);
520 spin_lock_irqsave(&qp->s_lock, flags);
521 write_seqlock(&dev->iowait_lock);
522 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK &&
523 list_empty(&priv->s_iowait.list)) {
525 qp->s_flags |= RVT_S_WAIT_TX;
526 list_add_tail(&priv->s_iowait.list, &dev->txwait);
527 trace_hfi1_qpsleep(qp, RVT_S_WAIT_TX);
528 atomic_inc(&qp->refcount);
530 qp->s_flags &= ~RVT_S_BUSY;
531 write_sequnlock(&dev->iowait_lock);
532 spin_unlock_irqrestore(&qp->s_lock, flags);
533 tx = ERR_PTR(-EBUSY);
538 static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev,
541 struct verbs_txreq *tx;
543 tx = kmem_cache_alloc(dev->verbs_txreq_cache, GFP_ATOMIC);
545 /* call slow path to get the lock */
546 tx = __get_txreq(dev, qp);
554 void hfi1_put_txreq(struct verbs_txreq *tx)
556 struct hfi1_ibdev *dev;
560 struct hfi1_qp_priv *priv;
563 dev = to_idev(qp->ibqp.device);
569 sdma_txclean(dd_from_dev(dev), &tx->txreq);
571 /* Free verbs_txreq and return to slab cache */
572 kmem_cache_free(dev->verbs_txreq_cache, tx);
575 seq = read_seqbegin(&dev->iowait_lock);
576 if (!list_empty(&dev->txwait)) {
579 write_seqlock_irqsave(&dev->iowait_lock, flags);
580 /* Wake up first QP wanting a free struct */
581 wait = list_first_entry(&dev->txwait, struct iowait,
583 qp = iowait_to_qp(wait);
585 list_del_init(&priv->s_iowait.list);
586 /* refcount held until actual wake up */
587 write_sequnlock_irqrestore(&dev->iowait_lock, flags);
588 hfi1_qp_wakeup(qp, RVT_S_WAIT_TX);
591 } while (read_seqretry(&dev->iowait_lock, seq));
595 * This is called with progress side lock held.
598 static void verbs_sdma_complete(
599 struct sdma_txreq *cookie,
603 struct verbs_txreq *tx =
604 container_of(cookie, struct verbs_txreq, txreq);
605 struct rvt_qp *qp = tx->qp;
607 spin_lock(&qp->s_lock);
609 hfi1_send_complete(qp, tx->wqe, IB_WC_SUCCESS);
610 else if (qp->ibqp.qp_type == IB_QPT_RC) {
611 struct hfi1_ib_header *hdr;
614 hfi1_rc_send_complete(qp, hdr);
618 * This happens when the send engine notes
619 * a QP in the error state and cannot
620 * do the flush work until that QP's
621 * sdma work has finished.
623 if (qp->s_flags & RVT_S_WAIT_DMA) {
624 qp->s_flags &= ~RVT_S_WAIT_DMA;
625 hfi1_schedule_send(qp);
628 spin_unlock(&qp->s_lock);
633 static int wait_kmem(struct hfi1_ibdev *dev, struct rvt_qp *qp)
635 struct hfi1_qp_priv *priv = qp->priv;
639 spin_lock_irqsave(&qp->s_lock, flags);
640 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
641 write_seqlock(&dev->iowait_lock);
642 if (list_empty(&priv->s_iowait.list)) {
643 if (list_empty(&dev->memwait))
644 mod_timer(&dev->mem_timer, jiffies + 1);
645 qp->s_flags |= RVT_S_WAIT_KMEM;
646 list_add_tail(&priv->s_iowait.list, &dev->memwait);
647 trace_hfi1_qpsleep(qp, RVT_S_WAIT_KMEM);
648 atomic_inc(&qp->refcount);
650 write_sequnlock(&dev->iowait_lock);
651 qp->s_flags &= ~RVT_S_BUSY;
654 spin_unlock_irqrestore(&qp->s_lock, flags);
660 * This routine calls txadds for each sg entry.
662 * Add failures will revert the sge cursor
664 static int build_verbs_ulp_payload(
665 struct sdma_engine *sde,
666 struct rvt_sge_state *ss,
668 struct verbs_txreq *tx)
670 struct rvt_sge *sg_list = ss->sg_list;
671 struct rvt_sge sge = ss->sge;
672 u8 num_sge = ss->num_sge;
677 len = ss->sge.length;
680 if (len > ss->sge.sge_length)
681 len = ss->sge.sge_length;
682 WARN_ON_ONCE(len == 0);
683 ret = sdma_txadd_kvaddr(
697 ss->num_sge = num_sge;
698 ss->sg_list = sg_list;
703 * Build the number of DMA descriptors needed to send length bytes of data.
705 * NOTE: DMA mapping is held in the tx until completed in the ring or
706 * the tx desc is freed without having been submitted to the ring
708 * This routine insures the following all the helper routine
712 static int build_verbs_tx_desc(
713 struct sdma_engine *sde,
714 struct rvt_sge_state *ss,
716 struct verbs_txreq *tx,
717 struct ahg_ib_header *ahdr,
721 struct hfi1_pio_header *phdr;
722 u16 hdrbytes = tx->hdr_dwords << 2;
725 if (!ahdr->ahgcount) {
726 ret = sdma_txinit_ahg(
734 verbs_sdma_complete);
737 phdr->pbc = cpu_to_le64(pbc);
738 memcpy(&phdr->hdr, &ahdr->ibh, hdrbytes - sizeof(phdr->pbc));
740 ret = sdma_txadd_kvaddr(
744 tx->hdr_dwords << 2);
748 struct hfi1_other_headers *sohdr = &ahdr->ibh.u.oth;
749 struct hfi1_other_headers *dohdr = &phdr->hdr.u.oth;
751 /* needed in rc_send_complete() */
752 phdr->hdr.lrh[0] = ahdr->ibh.lrh[0];
753 if ((be16_to_cpu(phdr->hdr.lrh[0]) & 3) == HFI1_LRH_GRH) {
754 sohdr = &ahdr->ibh.u.l.oth;
755 dohdr = &phdr->hdr.u.l.oth;
758 dohdr->bth[0] = sohdr->bth[0];
760 dohdr->bth[2] = sohdr->bth[2];
761 ret = sdma_txinit_ahg(
769 verbs_sdma_complete);
774 /* add the ulp payload - if any. ss can be NULL for acks */
776 ret = build_verbs_ulp_payload(sde, ss, length, tx);
781 int hfi1_verbs_send_dma(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
784 struct hfi1_qp_priv *priv = qp->priv;
785 struct ahg_ib_header *ahdr = priv->s_hdr;
786 u32 hdrwords = qp->s_hdrwords;
787 struct rvt_sge_state *ss = qp->s_cur_sge;
788 u32 len = qp->s_cur_size;
789 u32 plen = hdrwords + ((len + 3) >> 2) + 2; /* includes pbc */
790 struct hfi1_ibdev *dev = ps->dev;
791 struct hfi1_pportdata *ppd = ps->ppd;
792 struct verbs_txreq *tx;
793 struct sdma_txreq *stx;
799 if (!list_empty(&priv->s_iowait.tx_head)) {
800 stx = list_first_entry(
801 &priv->s_iowait.tx_head,
804 list_del_init(&stx->list);
805 tx = container_of(stx, struct verbs_txreq, txreq);
806 ret = sdma_send_txreq(tx->sde, &priv->s_iowait, stx);
807 if (unlikely(ret == -ECOMM))
812 tx = get_txreq(dev, qp);
816 tx->sde = priv->s_sde;
818 if (likely(pbc == 0)) {
819 u32 vl = sc_to_vlt(dd_from_ibdev(qp->ibqp.device), sc5);
821 /* set PBC_DC_INFO bit (aka SC[4]) in pbc_flags */
822 pbc_flags |= (!!(sc5 & 0x10)) << PBC_DC_INFO_SHIFT;
824 pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, vl, plen);
827 tx->mr = qp->s_rdma_mr;
829 qp->s_rdma_mr = NULL;
830 tx->hdr_dwords = hdrwords + 2;
831 ret = build_verbs_tx_desc(tx->sde, ss, len, tx, ahdr, pbc);
834 trace_output_ibhdr(dd_from_ibdev(qp->ibqp.device), &ahdr->ibh);
835 ret = sdma_send_txreq(tx->sde, &priv->s_iowait, &tx->txreq);
836 if (unlikely(ret == -ECOMM))
841 /* The current one got "sent" */
844 /* kmalloc or mapping fail */
846 return wait_kmem(dev, qp);
852 * If we are now in the error state, return zero to flush the
855 static int no_bufs_available(struct rvt_qp *qp, struct send_context *sc)
857 struct hfi1_qp_priv *priv = qp->priv;
858 struct hfi1_devdata *dd = sc->dd;
859 struct hfi1_ibdev *dev = &dd->verbs_dev;
864 * Note that as soon as want_buffer() is called and
865 * possibly before it returns, sc_piobufavail()
866 * could be called. Therefore, put QP on the I/O wait list before
867 * enabling the PIO avail interrupt.
869 spin_lock_irqsave(&qp->s_lock, flags);
870 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
871 write_seqlock(&dev->iowait_lock);
872 if (list_empty(&priv->s_iowait.list)) {
873 struct hfi1_ibdev *dev = &dd->verbs_dev;
877 qp->s_flags |= RVT_S_WAIT_PIO;
878 was_empty = list_empty(&sc->piowait);
879 list_add_tail(&priv->s_iowait.list, &sc->piowait);
880 trace_hfi1_qpsleep(qp, RVT_S_WAIT_PIO);
881 atomic_inc(&qp->refcount);
882 /* counting: only call wantpiobuf_intr if first user */
884 hfi1_sc_wantpiobuf_intr(sc, 1);
886 write_sequnlock(&dev->iowait_lock);
887 qp->s_flags &= ~RVT_S_BUSY;
890 spin_unlock_irqrestore(&qp->s_lock, flags);
894 struct send_context *qp_to_send_context(struct rvt_qp *qp, u8 sc5)
896 struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
897 struct hfi1_pportdata *ppd = dd->pport + (qp->port_num - 1);
900 vl = sc_to_vlt(dd, sc5);
901 if (vl >= ppd->vls_supported && vl != 15)
903 return dd->vld[vl].sc;
906 int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
909 struct hfi1_qp_priv *priv = qp->priv;
910 struct ahg_ib_header *ahdr = priv->s_hdr;
911 u32 hdrwords = qp->s_hdrwords;
912 struct rvt_sge_state *ss = qp->s_cur_sge;
913 u32 len = qp->s_cur_size;
914 u32 dwords = (len + 3) >> 2;
915 u32 plen = hdrwords + dwords + 2; /* includes pbc */
916 struct hfi1_pportdata *ppd = ps->ppd;
917 u32 *hdr = (u32 *)&ahdr->ibh;
920 unsigned long flags = 0;
921 struct send_context *sc;
922 struct pio_buf *pbuf;
923 int wc_status = IB_WC_SUCCESS;
925 /* vl15 special case taken care of in ud.c */
927 sc = qp_to_send_context(qp, sc5);
931 if (likely(pbc == 0)) {
932 u32 vl = sc_to_vlt(dd_from_ibdev(qp->ibqp.device), sc5);
933 /* set PBC_DC_INFO bit (aka SC[4]) in pbc_flags */
934 pbc_flags |= (!!(sc5 & 0x10)) << PBC_DC_INFO_SHIFT;
935 pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, vl, plen);
937 pbuf = sc_buffer_alloc(sc, plen, NULL, NULL);
938 if (unlikely(pbuf == NULL)) {
939 if (ppd->host_link_state != HLS_UP_ACTIVE) {
941 * If we have filled the PIO buffers to capacity and are
942 * not in an active state this request is not going to
943 * go out to so just complete it with an error or else a
944 * ULP or the core may be stuck waiting.
948 "alloc failed. state not active, completing");
949 wc_status = IB_WC_GENERAL_ERR;
953 * This is a normal occurrence. The PIO buffs are full
954 * up but we are still happily sending, well we could be
955 * so lets continue to queue the request.
957 hfi1_cdbg(PIO, "alloc failed. state active, queuing");
958 return no_bufs_available(qp, sc);
963 pio_copy(ppd->dd, pbuf, pbc, hdr, hdrwords);
966 seg_pio_copy_start(pbuf, pbc, hdr, hdrwords*4);
968 void *addr = ss->sge.vaddr;
969 u32 slen = ss->sge.length;
973 update_sge(ss, slen);
974 seg_pio_copy_mid(pbuf, addr, slen);
977 seg_pio_copy_end(pbuf);
981 trace_output_ibhdr(dd_from_ibdev(qp->ibqp.device), &ahdr->ibh);
984 rvt_put_mr(qp->s_rdma_mr);
985 qp->s_rdma_mr = NULL;
990 spin_lock_irqsave(&qp->s_lock, flags);
991 hfi1_send_complete(qp, qp->s_wqe, wc_status);
992 spin_unlock_irqrestore(&qp->s_lock, flags);
993 } else if (qp->ibqp.qp_type == IB_QPT_RC) {
994 spin_lock_irqsave(&qp->s_lock, flags);
995 hfi1_rc_send_complete(qp, &ahdr->ibh);
996 spin_unlock_irqrestore(&qp->s_lock, flags);
1002 * egress_pkey_matches_entry - return 1 if the pkey matches ent (ent
1003 * being an entry from the ingress partition key table), return 0
1004 * otherwise. Use the matching criteria for egress partition keys
1005 * specified in the OPAv1 spec., section 9.1l.7.
1007 static inline int egress_pkey_matches_entry(u16 pkey, u16 ent)
1009 u16 mkey = pkey & PKEY_LOW_15_MASK;
1010 u16 ment = ent & PKEY_LOW_15_MASK;
1014 * If pkey[15] is set (full partition member),
1015 * is bit 15 in the corresponding table element
1016 * clear (limited member)?
1018 if (pkey & PKEY_MEMBER_MASK)
1019 return !!(ent & PKEY_MEMBER_MASK);
1026 * egress_pkey_check - return 0 if hdr's pkey matches according to the
1027 * criteria in the OPAv1 spec., section 9.11.7.
1029 static inline int egress_pkey_check(struct hfi1_pportdata *ppd,
1030 struct hfi1_ib_header *hdr,
1033 struct hfi1_qp_priv *priv = qp->priv;
1034 struct hfi1_other_headers *ohdr;
1035 struct hfi1_devdata *dd;
1038 u8 lnh, sc5 = priv->s_sc;
1040 if (!(ppd->part_enforce & HFI1_PART_ENFORCE_OUT))
1043 /* locate the pkey within the headers */
1044 lnh = be16_to_cpu(hdr->lrh[0]) & 3;
1045 if (lnh == HFI1_LRH_GRH)
1046 ohdr = &hdr->u.l.oth;
1050 pkey = (u16)be32_to_cpu(ohdr->bth[0]);
1052 /* If SC15, pkey[0:14] must be 0x7fff */
1053 if ((sc5 == 0xf) && ((pkey & PKEY_LOW_15_MASK) != PKEY_LOW_15_MASK))
1057 /* Is the pkey = 0x0, or 0x8000? */
1058 if ((pkey & PKEY_LOW_15_MASK) == 0)
1061 /* The most likely matching pkey has index qp->s_pkey_index */
1062 if (unlikely(!egress_pkey_matches_entry(pkey,
1063 ppd->pkeys[qp->s_pkey_index]))) {
1064 /* no match - try the entire table */
1065 for (; i < MAX_PKEY_VALUES; i++) {
1066 if (egress_pkey_matches_entry(pkey, ppd->pkeys[i]))
1071 if (i < MAX_PKEY_VALUES)
1074 incr_cntr64(&ppd->port_xmit_constraint_errors);
1076 if (!(dd->err_info_xmit_constraint.status & OPA_EI_STATUS_SMASK)) {
1077 u16 slid = be16_to_cpu(hdr->lrh[3]);
1079 dd->err_info_xmit_constraint.status |= OPA_EI_STATUS_SMASK;
1080 dd->err_info_xmit_constraint.slid = slid;
1081 dd->err_info_xmit_constraint.pkey = pkey;
1087 * hfi1_verbs_send - send a packet
1088 * @qp: the QP to send on
1089 * @ps: the state of the packet to send
1091 * Return zero if packet is sent or queued OK.
1092 * Return non-zero and clear qp->s_flags RVT_S_BUSY otherwise.
1094 int hfi1_verbs_send(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
1096 struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
1097 struct hfi1_qp_priv *priv = qp->priv;
1098 struct ahg_ib_header *ahdr = priv->s_hdr;
1101 unsigned long flags = 0;
1104 * VL15 packets (IB_QPT_SMI) will always use PIO, so we
1105 * can defer SDMA restart until link goes ACTIVE without
1106 * worrying about just how we got there.
1108 if ((qp->ibqp.qp_type == IB_QPT_SMI) ||
1109 !(dd->flags & HFI1_HAS_SEND_DMA))
1112 ret = egress_pkey_check(dd->pport, &ahdr->ibh, qp);
1113 if (unlikely(ret)) {
1115 * The value we are returning here does not get propagated to
1116 * the verbs caller. Thus we need to complete the request with
1117 * error otherwise the caller could be sitting waiting on the
1118 * completion event. Only do this for PIO. SDMA has its own
1119 * mechanism for handling the errors. So for SDMA we can just
1123 hfi1_cdbg(PIO, "%s() Failed. Completing with err",
1125 spin_lock_irqsave(&qp->s_lock, flags);
1126 hfi1_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR);
1127 spin_unlock_irqrestore(&qp->s_lock, flags);
1133 ret = dd->process_pio_send(qp, ps, 0);
1135 #ifdef CONFIG_SDMA_VERBOSITY
1136 dd_dev_err(dd, "CONFIG SDMA %s:%d %s()\n",
1137 slashstrip(__FILE__), __LINE__, __func__);
1138 dd_dev_err(dd, "SDMA hdrwords = %u, len = %u\n", qp->s_hdrwords,
1141 ret = dd->process_dma_send(qp, ps, 0);
1148 * hfi1_fill_device_attr - Fill in rvt dev info device attributes.
1149 * @dd: the device data structure
1151 static void hfi1_fill_device_attr(struct hfi1_devdata *dd)
1153 struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
1155 memset(&rdi->dparms.props, 0, sizeof(rdi->dparms.props));
1157 rdi->dparms.props.device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR |
1158 IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT |
1159 IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN |
1160 IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE;
1161 rdi->dparms.props.page_size_cap = PAGE_SIZE;
1162 rdi->dparms.props.vendor_id = dd->oui1 << 16 | dd->oui2 << 8 | dd->oui3;
1163 rdi->dparms.props.vendor_part_id = dd->pcidev->device;
1164 rdi->dparms.props.hw_ver = dd->minrev;
1165 rdi->dparms.props.sys_image_guid = ib_hfi1_sys_image_guid;
1166 rdi->dparms.props.max_mr_size = ~0ULL;
1167 rdi->dparms.props.max_qp = hfi1_max_qps;
1168 rdi->dparms.props.max_qp_wr = hfi1_max_qp_wrs;
1169 rdi->dparms.props.max_sge = hfi1_max_sges;
1170 rdi->dparms.props.max_sge_rd = hfi1_max_sges;
1171 rdi->dparms.props.max_cq = hfi1_max_cqs;
1172 rdi->dparms.props.max_ah = hfi1_max_ahs;
1173 rdi->dparms.props.max_cqe = hfi1_max_cqes;
1174 rdi->dparms.props.max_mr = rdi->lkey_table.max;
1175 rdi->dparms.props.max_fmr = rdi->lkey_table.max;
1176 rdi->dparms.props.max_map_per_fmr = 32767;
1177 rdi->dparms.props.max_pd = hfi1_max_pds;
1178 rdi->dparms.props.max_qp_rd_atom = HFI1_MAX_RDMA_ATOMIC;
1179 rdi->dparms.props.max_qp_init_rd_atom = 255;
1180 rdi->dparms.props.max_srq = hfi1_max_srqs;
1181 rdi->dparms.props.max_srq_wr = hfi1_max_srq_wrs;
1182 rdi->dparms.props.max_srq_sge = hfi1_max_srq_sges;
1183 rdi->dparms.props.atomic_cap = IB_ATOMIC_GLOB;
1184 rdi->dparms.props.max_pkeys = hfi1_get_npkeys(dd);
1185 rdi->dparms.props.max_mcast_grp = hfi1_max_mcast_grps;
1186 rdi->dparms.props.max_mcast_qp_attach = hfi1_max_mcast_qp_attached;
1187 rdi->dparms.props.max_total_mcast_qp_attach =
1188 rdi->dparms.props.max_mcast_qp_attach *
1189 rdi->dparms.props.max_mcast_grp;
1192 static inline u16 opa_speed_to_ib(u16 in)
1196 if (in & OPA_LINK_SPEED_25G)
1197 out |= IB_SPEED_EDR;
1198 if (in & OPA_LINK_SPEED_12_5G)
1199 out |= IB_SPEED_FDR;
1205 * Convert a single OPA link width (no multiple flags) to an IB value.
1206 * A zero OPA link width means link down, which means the IB width value
1209 static inline u16 opa_width_to_ib(u16 in)
1212 case OPA_LINK_WIDTH_1X:
1213 /* map 2x and 3x to 1x as they don't exist in IB */
1214 case OPA_LINK_WIDTH_2X:
1215 case OPA_LINK_WIDTH_3X:
1217 default: /* link down or unknown, return our largest width */
1218 case OPA_LINK_WIDTH_4X:
1223 static int query_port(struct rvt_dev_info *rdi, u8 port_num,
1224 struct ib_port_attr *props)
1226 struct hfi1_ibdev *verbs_dev = dev_from_rdi(rdi);
1227 struct hfi1_devdata *dd = dd_from_dev(verbs_dev);
1228 struct hfi1_pportdata *ppd = &dd->pport[port_num - 1];
1231 props->lid = lid ? lid : 0;
1232 props->lmc = ppd->lmc;
1233 /* OPA logical states match IB logical states */
1234 props->state = driver_lstate(ppd);
1235 props->phys_state = hfi1_ibphys_portstate(ppd);
1236 props->gid_tbl_len = HFI1_GUIDS_PER_PORT;
1237 props->active_width = (u8)opa_width_to_ib(ppd->link_width_active);
1238 /* see rate_show() in ib core/sysfs.c */
1239 props->active_speed = (u8)opa_speed_to_ib(ppd->link_speed_active);
1240 props->max_vl_num = ppd->vls_supported;
1242 /* Once we are a "first class" citizen and have added the OPA MTUs to
1243 * the core we can advertise the larger MTU enum to the ULPs, for now
1244 * advertise only 4K.
1246 * Those applications which are either OPA aware or pass the MTU enum
1247 * from the Path Records to us will get the new 8k MTU. Those that
1248 * attempt to process the MTU enum may fail in various ways.
1250 props->max_mtu = mtu_to_enum((!valid_ib_mtu(hfi1_max_mtu) ?
1251 4096 : hfi1_max_mtu), IB_MTU_4096);
1252 props->active_mtu = !valid_ib_mtu(ppd->ibmtu) ? props->max_mtu :
1253 mtu_to_enum(ppd->ibmtu, IB_MTU_2048);
1258 static int modify_device(struct ib_device *device,
1259 int device_modify_mask,
1260 struct ib_device_modify *device_modify)
1262 struct hfi1_devdata *dd = dd_from_ibdev(device);
1266 if (device_modify_mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
1267 IB_DEVICE_MODIFY_NODE_DESC)) {
1272 if (device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC) {
1273 memcpy(device->node_desc, device_modify->node_desc, 64);
1274 for (i = 0; i < dd->num_pports; i++) {
1275 struct hfi1_ibport *ibp = &dd->pport[i].ibport_data;
1277 hfi1_node_desc_chg(ibp);
1281 if (device_modify_mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID) {
1282 ib_hfi1_sys_image_guid =
1283 cpu_to_be64(device_modify->sys_image_guid);
1284 for (i = 0; i < dd->num_pports; i++) {
1285 struct hfi1_ibport *ibp = &dd->pport[i].ibport_data;
1287 hfi1_sys_guid_chg(ibp);
1297 static int shut_down_port(struct rvt_dev_info *rdi, u8 port_num)
1299 struct hfi1_ibdev *verbs_dev = dev_from_rdi(rdi);
1300 struct hfi1_devdata *dd = dd_from_dev(verbs_dev);
1301 struct hfi1_pportdata *ppd = &dd->pport[port_num - 1];
1304 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_UNKNOWN, 0,
1305 OPA_LINKDOWN_REASON_UNKNOWN);
1306 ret = set_link_state(ppd, HLS_DN_DOWNDEF);
1310 static int hfi1_get_guid_be(struct rvt_dev_info *rdi, struct rvt_ibport *rvp,
1311 int guid_index, __be64 *guid)
1313 struct hfi1_ibport *ibp = container_of(rvp, struct hfi1_ibport, rvp);
1314 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
1316 if (guid_index == 0)
1317 *guid = cpu_to_be64(ppd->guid);
1318 else if (guid_index < HFI1_GUIDS_PER_PORT)
1319 *guid = ibp->guids[guid_index - 1];
1327 * convert ah port,sl to sc
1329 u8 ah_to_sc(struct ib_device *ibdev, struct ib_ah_attr *ah)
1331 struct hfi1_ibport *ibp = to_iport(ibdev, ah->port_num);
1333 return ibp->sl_to_sc[ah->sl];
1336 static int hfi1_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr)
1338 struct hfi1_ibport *ibp;
1339 struct hfi1_pportdata *ppd;
1340 struct hfi1_devdata *dd;
1343 /* test the mapping for validity */
1344 ibp = to_iport(ibdev, ah_attr->port_num);
1345 ppd = ppd_from_ibp(ibp);
1346 sc5 = ibp->sl_to_sc[ah_attr->sl];
1347 dd = dd_from_ppd(ppd);
1348 if (sc_to_vlt(dd, sc5) > num_vls && sc_to_vlt(dd, sc5) != 0xf)
1353 static void hfi1_notify_new_ah(struct ib_device *ibdev,
1354 struct ib_ah_attr *ah_attr,
1357 struct hfi1_ibport *ibp;
1358 struct hfi1_pportdata *ppd;
1359 struct hfi1_devdata *dd;
1363 * Do not trust reading anything from rvt_ah at this point as it is not
1364 * done being setup. We can however modify things which we need to set.
1367 ibp = to_iport(ibdev, ah_attr->port_num);
1368 ppd = ppd_from_ibp(ibp);
1369 sc5 = ibp->sl_to_sc[ah->attr.sl];
1370 dd = dd_from_ppd(ppd);
1371 ah->vl = sc_to_vlt(dd, sc5);
1372 if (ah->vl < num_vls || ah->vl == 15)
1373 ah->log_pmtu = ilog2(dd->vld[ah->vl].mtu);
1376 struct ib_ah *hfi1_create_qp0_ah(struct hfi1_ibport *ibp, u16 dlid)
1378 struct ib_ah_attr attr;
1379 struct ib_ah *ah = ERR_PTR(-EINVAL);
1382 memset(&attr, 0, sizeof(attr));
1384 attr.port_num = ppd_from_ibp(ibp)->port;
1386 qp0 = rcu_dereference(ibp->rvp.qp[0]);
1388 ah = ib_create_ah(qp0->ibqp.pd, &attr);
1394 * hfi1_get_npkeys - return the size of the PKEY table for context 0
1395 * @dd: the hfi1_ib device
1397 unsigned hfi1_get_npkeys(struct hfi1_devdata *dd)
1399 return ARRAY_SIZE(dd->pport[0].pkeys);
1402 static void init_ibport(struct hfi1_pportdata *ppd)
1404 struct hfi1_ibport *ibp = &ppd->ibport_data;
1405 size_t sz = ARRAY_SIZE(ibp->sl_to_sc);
1408 for (i = 0; i < sz; i++) {
1409 ibp->sl_to_sc[i] = i;
1410 ibp->sc_to_sl[i] = i;
1413 spin_lock_init(&ibp->rvp.lock);
1414 /* Set the prefix to the default value (see ch. 4.1.1) */
1415 ibp->rvp.gid_prefix = IB_DEFAULT_GID_PREFIX;
1416 ibp->rvp.sm_lid = 0;
1417 /* Below should only set bits defined in OPA PortInfo.CapabilityMask */
1418 ibp->rvp.port_cap_flags = IB_PORT_AUTO_MIGR_SUP |
1419 IB_PORT_CAP_MASK_NOTICE_SUP;
1420 ibp->rvp.pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA;
1421 ibp->rvp.pma_counter_select[1] = IB_PMA_PORT_RCV_DATA;
1422 ibp->rvp.pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS;
1423 ibp->rvp.pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS;
1424 ibp->rvp.pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT;
1426 RCU_INIT_POINTER(ibp->rvp.qp[0], NULL);
1427 RCU_INIT_POINTER(ibp->rvp.qp[1], NULL);
1430 static void verbs_txreq_kmem_cache_ctor(void *obj)
1432 struct verbs_txreq *tx = obj;
1434 memset(tx, 0, sizeof(*tx));
1438 * hfi1_register_ib_device - register our device with the infiniband core
1439 * @dd: the device data structure
1440 * Return 0 if successful, errno if unsuccessful.
1442 int hfi1_register_ib_device(struct hfi1_devdata *dd)
1444 struct hfi1_ibdev *dev = &dd->verbs_dev;
1445 struct ib_device *ibdev = &dev->rdi.ibdev;
1446 struct hfi1_pportdata *ppd = dd->pport;
1449 size_t lcpysz = IB_DEVICE_NAME_MAX;
1451 char buf[TXREQ_NAME_LEN];
1453 for (i = 0; i < dd->num_pports; i++)
1454 init_ibport(ppd + i);
1456 /* Only need to initialize non-zero fields. */
1458 init_timer(&dev->mem_timer);
1459 dev->mem_timer.function = mem_timer;
1460 dev->mem_timer.data = (unsigned long) dev;
1462 seqlock_init(&dev->iowait_lock);
1463 INIT_LIST_HEAD(&dev->txwait);
1464 INIT_LIST_HEAD(&dev->memwait);
1466 descq_cnt = sdma_get_descq_cnt();
1468 snprintf(buf, sizeof(buf), "hfi1_%u_vtxreq_cache", dd->unit);
1469 /* SLAB_HWCACHE_ALIGN for AHG */
1470 dev->verbs_txreq_cache = kmem_cache_create(buf,
1471 sizeof(struct verbs_txreq),
1472 0, SLAB_HWCACHE_ALIGN,
1473 verbs_txreq_kmem_cache_ctor);
1474 if (!dev->verbs_txreq_cache) {
1476 goto err_verbs_txreq;
1480 * The system image GUID is supposed to be the same for all
1481 * HFIs in a single system but since there can be other
1482 * device types in the system, we can't be sure this is unique.
1484 if (!ib_hfi1_sys_image_guid)
1485 ib_hfi1_sys_image_guid = cpu_to_be64(ppd->guid);
1486 lcpysz = strlcpy(ibdev->name, class_name(), lcpysz);
1487 strlcpy(ibdev->name + lcpysz, "_%d", IB_DEVICE_NAME_MAX - lcpysz);
1488 ibdev->owner = THIS_MODULE;
1489 ibdev->node_guid = cpu_to_be64(ppd->guid);
1490 ibdev->phys_port_cnt = dd->num_pports;
1491 ibdev->dma_device = &dd->pcidev->dev;
1492 ibdev->modify_device = modify_device;
1494 /* keep process mad in the driver */
1495 ibdev->process_mad = hfi1_process_mad;
1497 strncpy(ibdev->node_desc, init_utsname()->nodename,
1498 sizeof(ibdev->node_desc));
1501 * Fill in rvt info object.
1503 dd->verbs_dev.rdi.driver_f.port_callback = hfi1_create_port_files;
1504 dd->verbs_dev.rdi.driver_f.get_card_name = get_card_name;
1505 dd->verbs_dev.rdi.driver_f.get_pci_dev = get_pci_dev;
1506 dd->verbs_dev.rdi.driver_f.check_ah = hfi1_check_ah;
1507 dd->verbs_dev.rdi.driver_f.notify_new_ah = hfi1_notify_new_ah;
1508 dd->verbs_dev.rdi.driver_f.get_guid_be = hfi1_get_guid_be;
1509 dd->verbs_dev.rdi.driver_f.query_port_state = query_port;
1510 dd->verbs_dev.rdi.driver_f.shut_down_port = shut_down_port;
1511 dd->verbs_dev.rdi.driver_f.cap_mask_chg = hfi1_cap_mask_chg;
1513 * Fill in rvt info device attributes.
1515 hfi1_fill_device_attr(dd);
1518 dd->verbs_dev.rdi.dparms.qp_table_size = hfi1_qp_table_size;
1519 dd->verbs_dev.rdi.dparms.qpn_start = 0;
1520 dd->verbs_dev.rdi.dparms.qpn_inc = 1;
1521 dd->verbs_dev.rdi.dparms.qos_shift = dd->qos_shift;
1522 dd->verbs_dev.rdi.dparms.qpn_res_start = kdeth_qp << 16;
1523 dd->verbs_dev.rdi.dparms.qpn_res_end =
1524 dd->verbs_dev.rdi.dparms.qpn_res_start + 65535;
1525 dd->verbs_dev.rdi.dparms.max_rdma_atomic = HFI1_MAX_RDMA_ATOMIC;
1526 dd->verbs_dev.rdi.dparms.psn_mask = PSN_MASK;
1527 dd->verbs_dev.rdi.dparms.psn_shift = PSN_SHIFT;
1528 dd->verbs_dev.rdi.dparms.psn_modify_mask = PSN_MODIFY_MASK;
1529 dd->verbs_dev.rdi.dparms.core_cap_flags = RDMA_CORE_PORT_INTEL_OPA;
1530 dd->verbs_dev.rdi.dparms.max_mad_size = OPA_MGMT_MAD_SIZE;
1532 dd->verbs_dev.rdi.driver_f.qp_priv_alloc = qp_priv_alloc;
1533 dd->verbs_dev.rdi.driver_f.qp_priv_free = qp_priv_free;
1534 dd->verbs_dev.rdi.driver_f.free_all_qps = free_all_qps;
1535 dd->verbs_dev.rdi.driver_f.notify_qp_reset = notify_qp_reset;
1536 dd->verbs_dev.rdi.driver_f.do_send = hfi1_do_send;
1537 dd->verbs_dev.rdi.driver_f.schedule_send = hfi1_schedule_send;
1538 dd->verbs_dev.rdi.driver_f.get_pmtu_from_attr = get_pmtu_from_attr;
1539 dd->verbs_dev.rdi.driver_f.notify_error_qp = notify_error_qp;
1540 dd->verbs_dev.rdi.driver_f.flush_qp_waiters = flush_qp_waiters;
1541 dd->verbs_dev.rdi.driver_f.stop_send_queue = stop_send_queue;
1542 dd->verbs_dev.rdi.driver_f.quiesce_qp = quiesce_qp;
1543 dd->verbs_dev.rdi.driver_f.notify_error_qp = notify_error_qp;
1544 dd->verbs_dev.rdi.driver_f.mtu_from_qp = mtu_from_qp;
1545 dd->verbs_dev.rdi.driver_f.mtu_to_path_mtu = mtu_to_path_mtu;
1546 dd->verbs_dev.rdi.driver_f.check_modify_qp = hfi1_check_modify_qp;
1547 dd->verbs_dev.rdi.driver_f.modify_qp = hfi1_modify_qp;
1548 dd->verbs_dev.rdi.driver_f.check_send_wr = hfi1_check_send_wr;
1550 /* completeion queue */
1551 snprintf(dd->verbs_dev.rdi.dparms.cq_name,
1552 sizeof(dd->verbs_dev.rdi.dparms.cq_name),
1553 "hfi1_cq%d", dd->unit);
1554 dd->verbs_dev.rdi.dparms.node = dd->node;
1557 dd->verbs_dev.rdi.flags = 0; /* Let rdmavt handle it all */
1558 dd->verbs_dev.rdi.dparms.lkey_table_size = hfi1_lkey_table_size;
1559 dd->verbs_dev.rdi.dparms.nports = dd->num_pports;
1560 dd->verbs_dev.rdi.dparms.npkeys = hfi1_get_npkeys(dd);
1563 for (i = 0; i < dd->num_pports; i++, ppd++)
1564 rvt_init_port(&dd->verbs_dev.rdi,
1565 &ppd->ibport_data.rvp,
1569 ret = rvt_register_device(&dd->verbs_dev.rdi);
1571 goto err_verbs_txreq;
1573 ret = hfi1_verbs_register_sysfs(dd);
1580 rvt_unregister_device(&dd->verbs_dev.rdi);
1582 kmem_cache_destroy(dev->verbs_txreq_cache);
1583 dd_dev_err(dd, "cannot register verbs: %d!\n", -ret);
1587 void hfi1_unregister_ib_device(struct hfi1_devdata *dd)
1589 struct hfi1_ibdev *dev = &dd->verbs_dev;
1591 hfi1_verbs_unregister_sysfs(dd);
1593 rvt_unregister_device(&dd->verbs_dev.rdi);
1595 if (!list_empty(&dev->txwait))
1596 dd_dev_err(dd, "txwait list not empty!\n");
1597 if (!list_empty(&dev->memwait))
1598 dd_dev_err(dd, "memwait list not empty!\n");
1600 del_timer_sync(&dev->mem_timer);
1601 kmem_cache_destroy(dev->verbs_txreq_cache);
1604 void hfi1_cnp_rcv(struct hfi1_packet *packet)
1606 struct hfi1_ibport *ibp = &packet->rcd->ppd->ibport_data;
1607 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
1608 struct hfi1_ib_header *hdr = packet->hdr;
1609 struct rvt_qp *qp = packet->qp;
1612 u8 sl, sc5, sc4_bit, svc_type;
1613 bool sc4_set = has_sc4_bit(packet);
1615 switch (packet->qp->ibqp.qp_type) {
1617 rlid = qp->remote_ah_attr.dlid;
1618 rqpn = qp->remote_qpn;
1619 svc_type = IB_CC_SVCTYPE_UC;
1622 rlid = qp->remote_ah_attr.dlid;
1623 rqpn = qp->remote_qpn;
1624 svc_type = IB_CC_SVCTYPE_RC;
1629 svc_type = IB_CC_SVCTYPE_UD;
1632 ibp->rvp.n_pkt_drops++;
1636 sc4_bit = sc4_set << 4;
1637 sc5 = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
1639 sl = ibp->sc_to_sl[sc5];
1640 lqpn = qp->ibqp.qp_num;
1642 process_becn(ppd, sl, rlid, lqpn, rqpn, svc_type);