2 * cxgb4_uld.c:Chelsio Upper Layer Driver Interface for T4/T5/T6 SGE management
4 * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * Written by: Atul Gupta (atul.gupta@chelsio.com)
35 * Written by: Hariprasad Shenai (hariprasad@chelsio.com)
38 #include <linux/kernel.h>
39 #include <linux/module.h>
40 #include <linux/errno.h>
41 #include <linux/types.h>
42 #include <linux/debugfs.h>
43 #include <linux/export.h>
44 #include <linux/list.h>
45 #include <linux/skbuff.h>
46 #include <linux/pci.h>
49 #include "cxgb4_uld.h"
54 #define for_each_uldrxq(m, i) for (i = 0; i < ((m)->nrxq + (m)->nciq); i++)
56 static int get_msix_idx_from_bmap(struct adapter *adap)
58 struct uld_msix_bmap *bmap = &adap->msix_bmap_ulds;
60 unsigned int msix_idx;
62 spin_lock_irqsave(&bmap->lock, flags);
63 msix_idx = find_first_zero_bit(bmap->msix_bmap, bmap->mapsize);
64 if (msix_idx < bmap->mapsize) {
65 __set_bit(msix_idx, bmap->msix_bmap);
67 spin_unlock_irqrestore(&bmap->lock, flags);
71 spin_unlock_irqrestore(&bmap->lock, flags);
75 static void free_msix_idx_in_bmap(struct adapter *adap, unsigned int msix_idx)
77 struct uld_msix_bmap *bmap = &adap->msix_bmap_ulds;
80 spin_lock_irqsave(&bmap->lock, flags);
81 __clear_bit(msix_idx, bmap->msix_bmap);
82 spin_unlock_irqrestore(&bmap->lock, flags);
85 /* Flush the aggregated lro sessions */
86 static void uldrx_flush_handler(struct sge_rspq *q)
88 struct adapter *adap = q->adap;
90 if (adap->uld[q->uld].lro_flush)
91 adap->uld[q->uld].lro_flush(&q->lro_mgr);
95 * uldrx_handler - response queue handler for ULD queues
96 * @q: the response queue that received the packet
97 * @rsp: the response queue descriptor holding the offload message
98 * @gl: the gather list of packet fragments
100 * Deliver an ingress offload packet to a ULD. All processing is done by
101 * the ULD, we just maintain statistics.
103 static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
104 const struct pkt_gl *gl)
106 struct adapter *adap = q->adap;
107 struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
110 /* FW can send CPLs encapsulated in a CPL_FW4_MSG */
111 if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG &&
112 ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL)
115 if (q->flush_handler)
116 ret = adap->uld[q->uld].lro_rx_handler(adap->uld[q->uld].handle,
117 rsp, gl, &q->lro_mgr,
120 ret = adap->uld[q->uld].rx_handler(adap->uld[q->uld].handle,
130 else if (gl == CXGB4_MSG_AN)
137 static int alloc_uld_rxqs(struct adapter *adap,
138 struct sge_uld_rxq_info *rxq_info, bool lro)
140 struct sge *s = &adap->sge;
141 unsigned int nq = rxq_info->nrxq + rxq_info->nciq;
142 struct sge_ofld_rxq *q = rxq_info->uldrxq;
143 unsigned short *ids = rxq_info->rspq_id;
144 unsigned int bmap_idx = 0;
145 unsigned int per_chan;
146 int i, err, msi_idx, que_idx = 0;
148 per_chan = rxq_info->nrxq / adap->params.nports;
150 if (adap->flags & USING_MSIX)
153 msi_idx = -((int)s->intrq.abs_id + 1);
155 for (i = 0; i < nq; i++, q++) {
156 if (i == rxq_info->nrxq) {
157 /* start allocation of concentrator queues */
158 per_chan = rxq_info->nciq / adap->params.nports;
163 bmap_idx = get_msix_idx_from_bmap(adap);
164 msi_idx = adap->msix_info_ulds[bmap_idx].idx;
166 err = t4_sge_alloc_rxq(adap, &q->rspq, false,
167 adap->port[que_idx++ / per_chan],
169 q->fl.size ? &q->fl : NULL,
171 lro ? uldrx_flush_handler : NULL,
176 rxq_info->msix_tbl[i] = bmap_idx;
177 memset(&q->stats, 0, sizeof(q->stats));
179 ids[i] = q->rspq.abs_id;
183 q = rxq_info->uldrxq;
184 for ( ; i; i--, q++) {
186 free_rspq_fl(adap, &q->rspq,
187 q->fl.size ? &q->fl : NULL);
193 setup_sge_queues_uld(struct adapter *adap, unsigned int uld_type, bool lro)
195 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
198 if (adap->flags & USING_MSIX) {
199 rxq_info->msix_tbl = kcalloc((rxq_info->nrxq + rxq_info->nciq),
200 sizeof(unsigned short),
202 if (!rxq_info->msix_tbl)
206 ret = !(!alloc_uld_rxqs(adap, rxq_info, lro));
208 /* Tell uP to route control queue completions to rdma rspq */
209 if (adap->flags & FULL_INIT_DONE &&
210 !ret && uld_type == CXGB4_ULD_RDMA) {
211 struct sge *s = &adap->sge;
212 unsigned int cmplqid;
215 cmdop = FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL;
216 for_each_port(adap, i) {
217 cmplqid = rxq_info->uldrxq[i].rspq.cntxt_id;
218 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
219 FW_PARAMS_PARAM_X_V(cmdop) |
220 FW_PARAMS_PARAM_YZ_V(s->ctrlq[i].q.cntxt_id));
221 ret = t4_set_params(adap, adap->mbox, adap->pf,
222 0, 1, ¶m, &cmplqid);
228 static void t4_free_uld_rxqs(struct adapter *adap, int n,
229 struct sge_ofld_rxq *q)
231 for ( ; n; n--, q++) {
233 free_rspq_fl(adap, &q->rspq,
234 q->fl.size ? &q->fl : NULL);
238 static void free_sge_queues_uld(struct adapter *adap, unsigned int uld_type)
240 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
242 if (adap->flags & FULL_INIT_DONE && uld_type == CXGB4_ULD_RDMA) {
243 struct sge *s = &adap->sge;
244 u32 param, cmdop, cmplqid = 0;
247 cmdop = FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL;
248 for_each_port(adap, i) {
249 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
250 FW_PARAMS_PARAM_X_V(cmdop) |
251 FW_PARAMS_PARAM_YZ_V(s->ctrlq[i].q.cntxt_id));
252 t4_set_params(adap, adap->mbox, adap->pf,
253 0, 1, ¶m, &cmplqid);
258 t4_free_uld_rxqs(adap, rxq_info->nciq,
259 rxq_info->uldrxq + rxq_info->nrxq);
260 t4_free_uld_rxqs(adap, rxq_info->nrxq, rxq_info->uldrxq);
261 if (adap->flags & USING_MSIX)
262 kfree(rxq_info->msix_tbl);
265 static int cfg_queues_uld(struct adapter *adap, unsigned int uld_type,
266 const struct cxgb4_uld_info *uld_info)
268 struct sge *s = &adap->sge;
269 struct sge_uld_rxq_info *rxq_info;
270 int i, nrxq, ciq_size;
272 rxq_info = kzalloc(sizeof(*rxq_info), GFP_KERNEL);
276 if (adap->flags & USING_MSIX && uld_info->nrxq > s->nqs_per_uld) {
278 rxq_info->nrxq = roundup(i, adap->params.nports);
280 i = min_t(int, uld_info->nrxq,
282 rxq_info->nrxq = roundup(i, adap->params.nports);
284 if (!uld_info->ciq) {
287 if (adap->flags & USING_MSIX)
288 rxq_info->nciq = min_t(int, s->nqs_per_uld,
291 rxq_info->nciq = min_t(int, MAX_OFLD_QSETS,
293 rxq_info->nciq = ((rxq_info->nciq / adap->params.nports) *
294 adap->params.nports);
295 rxq_info->nciq = max_t(int, rxq_info->nciq,
296 adap->params.nports);
299 nrxq = rxq_info->nrxq + rxq_info->nciq; /* total rxq's */
300 rxq_info->uldrxq = kcalloc(nrxq, sizeof(struct sge_ofld_rxq),
302 if (!rxq_info->uldrxq) {
307 rxq_info->rspq_id = kcalloc(nrxq, sizeof(unsigned short), GFP_KERNEL);
308 if (!rxq_info->rspq_id) {
309 kfree(rxq_info->uldrxq);
314 for (i = 0; i < rxq_info->nrxq; i++) {
315 struct sge_ofld_rxq *r = &rxq_info->uldrxq[i];
317 init_rspq(adap, &r->rspq, 5, 1, uld_info->rxq_size, 64);
318 r->rspq.uld = uld_type;
322 ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids;
323 if (ciq_size > SGE_MAX_IQ_SIZE) {
324 dev_warn(adap->pdev_dev, "CIQ size too small for available IQs\n");
325 ciq_size = SGE_MAX_IQ_SIZE;
328 for (i = rxq_info->nrxq; i < nrxq; i++) {
329 struct sge_ofld_rxq *r = &rxq_info->uldrxq[i];
331 init_rspq(adap, &r->rspq, 5, 1, ciq_size, 64);
332 r->rspq.uld = uld_type;
335 memcpy(rxq_info->name, uld_info->name, IFNAMSIZ);
336 adap->sge.uld_rxq_info[uld_type] = rxq_info;
341 static void free_queues_uld(struct adapter *adap, unsigned int uld_type)
343 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
345 adap->sge.uld_rxq_info[uld_type] = NULL;
346 kfree(rxq_info->rspq_id);
347 kfree(rxq_info->uldrxq);
352 request_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type)
354 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
356 unsigned int idx, bmap_idx;
358 for_each_uldrxq(rxq_info, idx) {
359 bmap_idx = rxq_info->msix_tbl[idx];
360 err = request_irq(adap->msix_info_ulds[bmap_idx].vec,
362 adap->msix_info_ulds[bmap_idx].desc,
363 &rxq_info->uldrxq[idx].rspq);
370 bmap_idx = rxq_info->msix_tbl[idx];
371 free_msix_idx_in_bmap(adap, bmap_idx);
372 free_irq(adap->msix_info_ulds[bmap_idx].vec,
373 &rxq_info->uldrxq[idx].rspq);
379 free_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type)
381 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
382 unsigned int idx, bmap_idx;
384 for_each_uldrxq(rxq_info, idx) {
385 bmap_idx = rxq_info->msix_tbl[idx];
387 free_msix_idx_in_bmap(adap, bmap_idx);
388 free_irq(adap->msix_info_ulds[bmap_idx].vec,
389 &rxq_info->uldrxq[idx].rspq);
393 static void name_msix_vecs_uld(struct adapter *adap, unsigned int uld_type)
395 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
396 int n = sizeof(adap->msix_info_ulds[0].desc);
397 unsigned int idx, bmap_idx;
399 for_each_uldrxq(rxq_info, idx) {
400 bmap_idx = rxq_info->msix_tbl[idx];
402 snprintf(adap->msix_info_ulds[bmap_idx].desc, n, "%s-%s%d",
403 adap->port[0]->name, rxq_info->name, idx);
407 static void enable_rx(struct adapter *adap, struct sge_rspq *q)
413 napi_enable(&q->napi);
415 /* 0-increment GTS to start the timer and enable interrupts */
416 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
417 SEINTARM_V(q->intr_params) |
418 INGRESSQID_V(q->cntxt_id));
421 static void quiesce_rx(struct adapter *adap, struct sge_rspq *q)
424 napi_disable(&q->napi);
427 static void enable_rx_uld(struct adapter *adap, unsigned int uld_type)
429 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
432 for_each_uldrxq(rxq_info, idx)
433 enable_rx(adap, &rxq_info->uldrxq[idx].rspq);
436 static void quiesce_rx_uld(struct adapter *adap, unsigned int uld_type)
438 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
441 for_each_uldrxq(rxq_info, idx)
442 quiesce_rx(adap, &rxq_info->uldrxq[idx].rspq);
446 free_sge_txq_uld(struct adapter *adap, struct sge_uld_txq_info *txq_info)
448 int nq = txq_info->ntxq;
451 for (i = 0; i < nq; i++) {
452 struct sge_uld_txq *txq = &txq_info->uldtxq[i];
454 if (txq && txq->q.desc) {
455 tasklet_kill(&txq->qresume_tsk);
456 t4_ofld_eq_free(adap, adap->mbox, adap->pf, 0,
458 free_tx_desc(adap, &txq->q, txq->q.in_use, false);
460 __skb_queue_purge(&txq->sendq);
461 free_txq(adap, &txq->q);
467 alloc_sge_txq_uld(struct adapter *adap, struct sge_uld_txq_info *txq_info,
468 unsigned int uld_type)
470 struct sge *s = &adap->sge;
471 int nq = txq_info->ntxq;
474 j = nq / adap->params.nports;
475 for (i = 0; i < nq; i++) {
476 struct sge_uld_txq *txq = &txq_info->uldtxq[i];
479 err = t4_sge_alloc_uld_txq(adap, txq, adap->port[i / j],
480 s->fw_evtq.cntxt_id, uld_type);
486 free_sge_txq_uld(adap, txq_info);
491 release_sge_txq_uld(struct adapter *adap, unsigned int uld_type)
493 struct sge_uld_txq_info *txq_info = NULL;
494 int tx_uld_type = TX_ULD(uld_type);
496 txq_info = adap->sge.uld_txq_info[tx_uld_type];
498 if (txq_info && atomic_dec_and_test(&txq_info->users)) {
499 free_sge_txq_uld(adap, txq_info);
500 kfree(txq_info->uldtxq);
502 adap->sge.uld_txq_info[tx_uld_type] = NULL;
507 setup_sge_txq_uld(struct adapter *adap, unsigned int uld_type,
508 const struct cxgb4_uld_info *uld_info)
510 struct sge_uld_txq_info *txq_info = NULL;
513 tx_uld_type = TX_ULD(uld_type);
514 txq_info = adap->sge.uld_txq_info[tx_uld_type];
516 if ((tx_uld_type == CXGB4_TX_OFLD) && txq_info &&
517 (atomic_inc_return(&txq_info->users) > 1))
520 txq_info = kzalloc(sizeof(*txq_info), GFP_KERNEL);
523 if (uld_type == CXGB4_ULD_CRYPTO) {
524 i = min_t(int, adap->vres.ncrypto_fc,
526 txq_info->ntxq = rounddown(i, adap->params.nports);
527 if (txq_info->ntxq <= 0) {
528 dev_warn(adap->pdev_dev, "Crypto Tx Queues can't be zero\n");
534 i = min_t(int, uld_info->ntxq, num_online_cpus());
535 txq_info->ntxq = roundup(i, adap->params.nports);
537 txq_info->uldtxq = kcalloc(txq_info->ntxq, sizeof(struct sge_uld_txq),
539 if (!txq_info->uldtxq) {
544 if (alloc_sge_txq_uld(adap, txq_info, tx_uld_type)) {
545 kfree(txq_info->uldtxq);
550 atomic_inc(&txq_info->users);
551 adap->sge.uld_txq_info[tx_uld_type] = txq_info;
555 static void uld_queue_init(struct adapter *adap, unsigned int uld_type,
556 struct cxgb4_lld_info *lli)
558 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
559 int tx_uld_type = TX_ULD(uld_type);
560 struct sge_uld_txq_info *txq_info = adap->sge.uld_txq_info[tx_uld_type];
562 lli->rxq_ids = rxq_info->rspq_id;
563 lli->nrxq = rxq_info->nrxq;
564 lli->ciq_ids = rxq_info->rspq_id + rxq_info->nrxq;
565 lli->nciq = rxq_info->nciq;
566 lli->ntxq = txq_info->ntxq;
569 int t4_uld_mem_alloc(struct adapter *adap)
571 struct sge *s = &adap->sge;
573 adap->uld = kcalloc(CXGB4_ULD_MAX, sizeof(*adap->uld), GFP_KERNEL);
577 s->uld_rxq_info = kcalloc(CXGB4_ULD_MAX,
578 sizeof(struct sge_uld_rxq_info *),
580 if (!s->uld_rxq_info)
583 s->uld_txq_info = kcalloc(CXGB4_TX_MAX,
584 sizeof(struct sge_uld_txq_info *),
586 if (!s->uld_txq_info)
591 kfree(s->uld_rxq_info);
597 void t4_uld_mem_free(struct adapter *adap)
599 struct sge *s = &adap->sge;
601 kfree(s->uld_txq_info);
602 kfree(s->uld_rxq_info);
606 /* This function should be called with uld_mutex taken. */
607 static void cxgb4_shutdown_uld_adapter(struct adapter *adap, enum cxgb4_uld type)
609 if (adap->uld[type].handle) {
610 adap->uld[type].handle = NULL;
611 adap->uld[type].add = NULL;
612 release_sge_txq_uld(adap, type);
614 if (adap->flags & FULL_INIT_DONE)
615 quiesce_rx_uld(adap, type);
617 if (adap->flags & USING_MSIX)
618 free_msix_queue_irqs_uld(adap, type);
620 free_sge_queues_uld(adap, type);
621 free_queues_uld(adap, type);
625 void t4_uld_clean_up(struct adapter *adap)
629 mutex_lock(&uld_mutex);
630 for (i = 0; i < CXGB4_ULD_MAX; i++) {
631 if (!adap->uld[i].handle)
634 cxgb4_shutdown_uld_adapter(adap, i);
636 mutex_unlock(&uld_mutex);
639 static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld)
643 lld->pdev = adap->pdev;
645 lld->l2t = adap->l2t;
646 lld->tids = &adap->tids;
647 lld->ports = adap->port;
648 lld->vr = &adap->vres;
649 lld->mtus = adap->params.mtus;
650 lld->nchan = adap->params.nports;
651 lld->nports = adap->params.nports;
652 lld->wr_cred = adap->params.ofldq_wr_cred;
653 lld->crypto = adap->params.crypto;
654 lld->iscsi_iolen = MAXRXDATA_G(t4_read_reg(adap, TP_PARA_REG2_A));
655 lld->iscsi_tagmask = t4_read_reg(adap, ULP_RX_ISCSI_TAGMASK_A);
656 lld->iscsi_pgsz_order = t4_read_reg(adap, ULP_RX_ISCSI_PSZ_A);
657 lld->iscsi_llimit = t4_read_reg(adap, ULP_RX_ISCSI_LLIMIT_A);
658 lld->iscsi_ppm = &adap->iscsi_ppm;
659 lld->adapter_type = adap->params.chip;
660 lld->cclk_ps = 1000000000 / adap->params.vpd.cclk;
661 lld->udb_density = 1 << adap->params.sge.eq_qpp;
662 lld->ucq_density = 1 << adap->params.sge.iq_qpp;
663 lld->filt_mode = adap->params.tp.vlan_pri_map;
664 /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
665 for (i = 0; i < NCHAN; i++)
667 lld->gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS_A);
668 lld->db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL_A);
669 lld->fw_vers = adap->params.fw_vers;
670 lld->dbfifo_int_thresh = dbfifo_int_thresh;
671 lld->sge_ingpadboundary = adap->sge.fl_align;
672 lld->sge_egrstatuspagesize = adap->sge.stat_len;
673 lld->sge_pktshift = adap->sge.pktshift;
674 lld->ulp_crypto = adap->params.crypto;
675 lld->enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
676 lld->max_ordird_qp = adap->params.max_ordird_qp;
677 lld->max_ird_adapter = adap->params.max_ird_adapter;
678 lld->ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
679 lld->nodeid = dev_to_node(adap->pdev_dev);
680 lld->fr_nsmr_tpte_wr_support = adap->params.fr_nsmr_tpte_wr_support;
681 lld->write_w_imm_support = adap->params.write_w_imm_support;
682 lld->write_cmpl_support = adap->params.write_cmpl_support;
685 static void uld_attach(struct adapter *adap, unsigned int uld)
688 struct cxgb4_lld_info lli;
690 uld_init(adap, &lli);
691 uld_queue_init(adap, uld, &lli);
693 handle = adap->uld[uld].add(&lli);
694 if (IS_ERR(handle)) {
695 dev_warn(adap->pdev_dev,
696 "could not attach to the %s driver, error %ld\n",
697 adap->uld[uld].name, PTR_ERR(handle));
701 adap->uld[uld].handle = handle;
702 t4_register_netevent_notifier();
704 if (adap->flags & FULL_INIT_DONE)
705 adap->uld[uld].state_change(handle, CXGB4_STATE_UP);
709 * cxgb4_register_uld - register an upper-layer driver
710 * @type: the ULD type
711 * @p: the ULD methods
713 * Registers an upper-layer driver with this driver and notifies the ULD
714 * about any presently available devices that support its type. Returns
715 * %-EBUSY if a ULD of the same type is already registered.
717 void cxgb4_register_uld(enum cxgb4_uld type,
718 const struct cxgb4_uld_info *p)
721 struct adapter *adap;
723 if (type >= CXGB4_ULD_MAX)
726 mutex_lock(&uld_mutex);
727 list_for_each_entry(adap, &adapter_list, list_node) {
728 if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) ||
729 (type != CXGB4_ULD_CRYPTO && !is_offload(adap)))
731 if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip))
733 ret = cfg_queues_uld(adap, type, p);
736 ret = setup_sge_queues_uld(adap, type, p->lro);
739 if (adap->flags & USING_MSIX) {
740 name_msix_vecs_uld(adap, type);
741 ret = request_msix_queue_irqs_uld(adap, type);
745 if (adap->flags & FULL_INIT_DONE)
746 enable_rx_uld(adap, type);
747 if (adap->uld[type].add)
749 ret = setup_sge_txq_uld(adap, type, p);
752 adap->uld[type] = *p;
753 uld_attach(adap, type);
756 if (adap->flags & FULL_INIT_DONE)
757 quiesce_rx_uld(adap, type);
758 if (adap->flags & USING_MSIX)
759 free_msix_queue_irqs_uld(adap, type);
761 free_sge_queues_uld(adap, type);
763 free_queues_uld(adap, type);
765 dev_warn(adap->pdev_dev,
766 "ULD registration failed for uld type %d\n", type);
768 mutex_unlock(&uld_mutex);
771 EXPORT_SYMBOL(cxgb4_register_uld);
774 * cxgb4_unregister_uld - unregister an upper-layer driver
775 * @type: the ULD type
777 * Unregisters an existing upper-layer driver.
779 int cxgb4_unregister_uld(enum cxgb4_uld type)
781 struct adapter *adap;
783 if (type >= CXGB4_ULD_MAX)
786 mutex_lock(&uld_mutex);
787 list_for_each_entry(adap, &adapter_list, list_node) {
788 if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) ||
789 (type != CXGB4_ULD_CRYPTO && !is_offload(adap)))
791 if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip))
794 cxgb4_shutdown_uld_adapter(adap, type);
796 mutex_unlock(&uld_mutex);
800 EXPORT_SYMBOL(cxgb4_unregister_uld);