cxgb4/chcr : Register to tls add and del callback
[linux-2.6-microblaze.git] / drivers / net / ethernet / chelsio / cxgb4 / cxgb4_uld.c
1 /*
2  * cxgb4_uld.c:Chelsio Upper Layer Driver Interface for T4/T5/T6 SGE management
3  *
4  * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  *
34  *  Written by: Atul Gupta (atul.gupta@chelsio.com)
35  *  Written by: Hariprasad Shenai (hariprasad@chelsio.com)
36  */
37
38 #include <linux/kernel.h>
39 #include <linux/module.h>
40 #include <linux/errno.h>
41 #include <linux/types.h>
42 #include <linux/debugfs.h>
43 #include <linux/export.h>
44 #include <linux/list.h>
45 #include <linux/skbuff.h>
46 #include <linux/pci.h>
47
48 #include "cxgb4.h"
49 #include "cxgb4_uld.h"
50 #include "t4_regs.h"
51 #include "t4fw_api.h"
52 #include "t4_msg.h"
53
54 #define for_each_uldrxq(m, i) for (i = 0; i < ((m)->nrxq + (m)->nciq); i++)
55
56 /* Flush the aggregated lro sessions */
57 static void uldrx_flush_handler(struct sge_rspq *q)
58 {
59         struct adapter *adap = q->adap;
60
61         if (adap->uld[q->uld].lro_flush)
62                 adap->uld[q->uld].lro_flush(&q->lro_mgr);
63 }
64
65 /**
66  *      uldrx_handler - response queue handler for ULD queues
67  *      @q: the response queue that received the packet
68  *      @rsp: the response queue descriptor holding the offload message
69  *      @gl: the gather list of packet fragments
70  *
71  *      Deliver an ingress offload packet to a ULD.  All processing is done by
72  *      the ULD, we just maintain statistics.
73  */
74 static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
75                          const struct pkt_gl *gl)
76 {
77         struct adapter *adap = q->adap;
78         struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
79         int ret;
80
81         /* FW can send CPLs encapsulated in a CPL_FW4_MSG */
82         if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG &&
83             ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL)
84                 rsp += 2;
85
86         if (q->flush_handler)
87                 ret = adap->uld[q->uld].lro_rx_handler(adap->uld[q->uld].handle,
88                                 rsp, gl, &q->lro_mgr,
89                                 &q->napi);
90         else
91                 ret = adap->uld[q->uld].rx_handler(adap->uld[q->uld].handle,
92                                 rsp, gl);
93
94         if (ret) {
95                 rxq->stats.nomem++;
96                 return -1;
97         }
98
99         if (!gl)
100                 rxq->stats.imm++;
101         else if (gl == CXGB4_MSG_AN)
102                 rxq->stats.an++;
103         else
104                 rxq->stats.pkts++;
105         return 0;
106 }
107
108 static int alloc_uld_rxqs(struct adapter *adap,
109                           struct sge_uld_rxq_info *rxq_info, bool lro)
110 {
111         unsigned int nq = rxq_info->nrxq + rxq_info->nciq;
112         struct sge_ofld_rxq *q = rxq_info->uldrxq;
113         unsigned short *ids = rxq_info->rspq_id;
114         int i, err, msi_idx, que_idx = 0;
115         struct sge *s = &adap->sge;
116         unsigned int per_chan;
117
118         per_chan = rxq_info->nrxq / adap->params.nports;
119
120         if (adap->flags & CXGB4_USING_MSIX)
121                 msi_idx = 1;
122         else
123                 msi_idx = -((int)s->intrq.abs_id + 1);
124
125         for (i = 0; i < nq; i++, q++) {
126                 if (i == rxq_info->nrxq) {
127                         /* start allocation of concentrator queues */
128                         per_chan = rxq_info->nciq / adap->params.nports;
129                         que_idx = 0;
130                 }
131
132                 if (msi_idx >= 0) {
133                         msi_idx = cxgb4_get_msix_idx_from_bmap(adap);
134                         if (msi_idx < 0) {
135                                 err = -ENOSPC;
136                                 goto freeout;
137                         }
138
139                         snprintf(adap->msix_info[msi_idx].desc,
140                                  sizeof(adap->msix_info[msi_idx].desc),
141                                  "%s-%s%d",
142                                  adap->port[0]->name, rxq_info->name, i);
143
144                         q->msix = &adap->msix_info[msi_idx];
145                 }
146                 err = t4_sge_alloc_rxq(adap, &q->rspq, false,
147                                        adap->port[que_idx++ / per_chan],
148                                        msi_idx,
149                                        q->fl.size ? &q->fl : NULL,
150                                        uldrx_handler,
151                                        lro ? uldrx_flush_handler : NULL,
152                                        0);
153                 if (err)
154                         goto freeout;
155
156                 memset(&q->stats, 0, sizeof(q->stats));
157                 if (ids)
158                         ids[i] = q->rspq.abs_id;
159         }
160         return 0;
161 freeout:
162         q = rxq_info->uldrxq;
163         for ( ; i; i--, q++) {
164                 if (q->rspq.desc)
165                         free_rspq_fl(adap, &q->rspq,
166                                      q->fl.size ? &q->fl : NULL);
167                 if (q->msix)
168                         cxgb4_free_msix_idx_in_bmap(adap, q->msix->idx);
169         }
170         return err;
171 }
172
173 static int
174 setup_sge_queues_uld(struct adapter *adap, unsigned int uld_type, bool lro)
175 {
176         struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
177         int i, ret = 0;
178
179         ret = !(!alloc_uld_rxqs(adap, rxq_info, lro));
180
181         /* Tell uP to route control queue completions to rdma rspq */
182         if (adap->flags & CXGB4_FULL_INIT_DONE &&
183             !ret && uld_type == CXGB4_ULD_RDMA) {
184                 struct sge *s = &adap->sge;
185                 unsigned int cmplqid;
186                 u32 param, cmdop;
187
188                 cmdop = FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL;
189                 for_each_port(adap, i) {
190                         cmplqid = rxq_info->uldrxq[i].rspq.cntxt_id;
191                         param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
192                                  FW_PARAMS_PARAM_X_V(cmdop) |
193                                  FW_PARAMS_PARAM_YZ_V(s->ctrlq[i].q.cntxt_id));
194                         ret = t4_set_params(adap, adap->mbox, adap->pf,
195                                             0, 1, &param, &cmplqid);
196                 }
197         }
198         return ret;
199 }
200
201 static void t4_free_uld_rxqs(struct adapter *adap, int n,
202                              struct sge_ofld_rxq *q)
203 {
204         for ( ; n; n--, q++) {
205                 if (q->rspq.desc)
206                         free_rspq_fl(adap, &q->rspq,
207                                      q->fl.size ? &q->fl : NULL);
208         }
209 }
210
211 static void free_sge_queues_uld(struct adapter *adap, unsigned int uld_type)
212 {
213         struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
214
215         if (adap->flags & CXGB4_FULL_INIT_DONE && uld_type == CXGB4_ULD_RDMA) {
216                 struct sge *s = &adap->sge;
217                 u32 param, cmdop, cmplqid = 0;
218                 int i;
219
220                 cmdop = FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL;
221                 for_each_port(adap, i) {
222                         param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
223                                  FW_PARAMS_PARAM_X_V(cmdop) |
224                                  FW_PARAMS_PARAM_YZ_V(s->ctrlq[i].q.cntxt_id));
225                         t4_set_params(adap, adap->mbox, adap->pf,
226                                       0, 1, &param, &cmplqid);
227                 }
228         }
229
230         if (rxq_info->nciq)
231                 t4_free_uld_rxqs(adap, rxq_info->nciq,
232                                  rxq_info->uldrxq + rxq_info->nrxq);
233         t4_free_uld_rxqs(adap, rxq_info->nrxq, rxq_info->uldrxq);
234 }
235
236 static int cfg_queues_uld(struct adapter *adap, unsigned int uld_type,
237                           const struct cxgb4_uld_info *uld_info)
238 {
239         struct sge *s = &adap->sge;
240         struct sge_uld_rxq_info *rxq_info;
241         int i, nrxq, ciq_size;
242
243         rxq_info = kzalloc(sizeof(*rxq_info), GFP_KERNEL);
244         if (!rxq_info)
245                 return -ENOMEM;
246
247         if (adap->flags & CXGB4_USING_MSIX && uld_info->nrxq > s->nqs_per_uld) {
248                 i = s->nqs_per_uld;
249                 rxq_info->nrxq = roundup(i, adap->params.nports);
250         } else {
251                 i = min_t(int, uld_info->nrxq,
252                           num_online_cpus());
253                 rxq_info->nrxq = roundup(i, adap->params.nports);
254         }
255         if (!uld_info->ciq) {
256                 rxq_info->nciq = 0;
257         } else  {
258                 if (adap->flags & CXGB4_USING_MSIX)
259                         rxq_info->nciq = min_t(int, s->nqs_per_uld,
260                                                num_online_cpus());
261                 else
262                         rxq_info->nciq = min_t(int, MAX_OFLD_QSETS,
263                                                num_online_cpus());
264                 rxq_info->nciq = ((rxq_info->nciq / adap->params.nports) *
265                                   adap->params.nports);
266                 rxq_info->nciq = max_t(int, rxq_info->nciq,
267                                        adap->params.nports);
268         }
269
270         nrxq = rxq_info->nrxq + rxq_info->nciq; /* total rxq's */
271         rxq_info->uldrxq = kcalloc(nrxq, sizeof(struct sge_ofld_rxq),
272                                    GFP_KERNEL);
273         if (!rxq_info->uldrxq) {
274                 kfree(rxq_info);
275                 return -ENOMEM;
276         }
277
278         rxq_info->rspq_id = kcalloc(nrxq, sizeof(unsigned short), GFP_KERNEL);
279         if (!rxq_info->rspq_id) {
280                 kfree(rxq_info->uldrxq);
281                 kfree(rxq_info);
282                 return -ENOMEM;
283         }
284
285         for (i = 0; i < rxq_info->nrxq; i++) {
286                 struct sge_ofld_rxq *r = &rxq_info->uldrxq[i];
287
288                 init_rspq(adap, &r->rspq, 5, 1, uld_info->rxq_size, 64);
289                 r->rspq.uld = uld_type;
290                 r->fl.size = 72;
291         }
292
293         ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids;
294         if (ciq_size > SGE_MAX_IQ_SIZE) {
295                 dev_warn(adap->pdev_dev, "CIQ size too small for available IQs\n");
296                 ciq_size = SGE_MAX_IQ_SIZE;
297         }
298
299         for (i = rxq_info->nrxq; i < nrxq; i++) {
300                 struct sge_ofld_rxq *r = &rxq_info->uldrxq[i];
301
302                 init_rspq(adap, &r->rspq, 5, 1, ciq_size, 64);
303                 r->rspq.uld = uld_type;
304         }
305
306         memcpy(rxq_info->name, uld_info->name, IFNAMSIZ);
307         adap->sge.uld_rxq_info[uld_type] = rxq_info;
308
309         return 0;
310 }
311
312 static void free_queues_uld(struct adapter *adap, unsigned int uld_type)
313 {
314         struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
315
316         adap->sge.uld_rxq_info[uld_type] = NULL;
317         kfree(rxq_info->rspq_id);
318         kfree(rxq_info->uldrxq);
319         kfree(rxq_info);
320 }
321
322 static int
323 request_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type)
324 {
325         struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
326         struct msix_info *minfo;
327         unsigned int idx;
328         int err = 0;
329
330         for_each_uldrxq(rxq_info, idx) {
331                 minfo = rxq_info->uldrxq[idx].msix;
332                 err = request_irq(minfo->vec,
333                                   t4_sge_intr_msix, 0,
334                                   minfo->desc,
335                                   &rxq_info->uldrxq[idx].rspq);
336                 if (err)
337                         goto unwind;
338
339                 cxgb4_set_msix_aff(adap, minfo->vec,
340                                    &minfo->aff_mask, idx);
341         }
342         return 0;
343
344 unwind:
345         while (idx-- > 0) {
346                 minfo = rxq_info->uldrxq[idx].msix;
347                 cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask);
348                 cxgb4_free_msix_idx_in_bmap(adap, minfo->idx);
349                 free_irq(minfo->vec, &rxq_info->uldrxq[idx].rspq);
350         }
351         return err;
352 }
353
354 static void
355 free_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type)
356 {
357         struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
358         struct msix_info *minfo;
359         unsigned int idx;
360
361         for_each_uldrxq(rxq_info, idx) {
362                 minfo = rxq_info->uldrxq[idx].msix;
363                 cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask);
364                 cxgb4_free_msix_idx_in_bmap(adap, minfo->idx);
365                 free_irq(minfo->vec, &rxq_info->uldrxq[idx].rspq);
366         }
367 }
368
369 static void enable_rx_uld(struct adapter *adap, unsigned int uld_type)
370 {
371         struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
372         int idx;
373
374         for_each_uldrxq(rxq_info, idx) {
375                 struct sge_rspq *q = &rxq_info->uldrxq[idx].rspq;
376
377                 if (!q)
378                         continue;
379
380                 cxgb4_enable_rx(adap, q);
381         }
382 }
383
384 static void quiesce_rx_uld(struct adapter *adap, unsigned int uld_type)
385 {
386         struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
387         int idx;
388
389         for_each_uldrxq(rxq_info, idx) {
390                 struct sge_rspq *q = &rxq_info->uldrxq[idx].rspq;
391
392                 if (!q)
393                         continue;
394
395                 cxgb4_quiesce_rx(q);
396         }
397 }
398
399 static void
400 free_sge_txq_uld(struct adapter *adap, struct sge_uld_txq_info *txq_info)
401 {
402         int nq = txq_info->ntxq;
403         int i;
404
405         for (i = 0; i < nq; i++) {
406                 struct sge_uld_txq *txq = &txq_info->uldtxq[i];
407
408                 if (txq && txq->q.desc) {
409                         tasklet_kill(&txq->qresume_tsk);
410                         t4_ofld_eq_free(adap, adap->mbox, adap->pf, 0,
411                                         txq->q.cntxt_id);
412                         free_tx_desc(adap, &txq->q, txq->q.in_use, false);
413                         kfree(txq->q.sdesc);
414                         __skb_queue_purge(&txq->sendq);
415                         free_txq(adap, &txq->q);
416                 }
417         }
418 }
419
420 static int
421 alloc_sge_txq_uld(struct adapter *adap, struct sge_uld_txq_info *txq_info,
422                   unsigned int uld_type)
423 {
424         struct sge *s = &adap->sge;
425         int nq = txq_info->ntxq;
426         int i, j, err;
427
428         j = nq / adap->params.nports;
429         for (i = 0; i < nq; i++) {
430                 struct sge_uld_txq *txq = &txq_info->uldtxq[i];
431
432                 txq->q.size = 1024;
433                 err = t4_sge_alloc_uld_txq(adap, txq, adap->port[i / j],
434                                            s->fw_evtq.cntxt_id, uld_type);
435                 if (err)
436                         goto freeout;
437         }
438         return 0;
439 freeout:
440         free_sge_txq_uld(adap, txq_info);
441         return err;
442 }
443
444 static void
445 release_sge_txq_uld(struct adapter *adap, unsigned int uld_type)
446 {
447         struct sge_uld_txq_info *txq_info = NULL;
448         int tx_uld_type = TX_ULD(uld_type);
449
450         txq_info = adap->sge.uld_txq_info[tx_uld_type];
451
452         if (txq_info && atomic_dec_and_test(&txq_info->users)) {
453                 free_sge_txq_uld(adap, txq_info);
454                 kfree(txq_info->uldtxq);
455                 kfree(txq_info);
456                 adap->sge.uld_txq_info[tx_uld_type] = NULL;
457         }
458 }
459
460 static int
461 setup_sge_txq_uld(struct adapter *adap, unsigned int uld_type,
462                   const struct cxgb4_uld_info *uld_info)
463 {
464         struct sge_uld_txq_info *txq_info = NULL;
465         int tx_uld_type, i;
466
467         tx_uld_type = TX_ULD(uld_type);
468         txq_info = adap->sge.uld_txq_info[tx_uld_type];
469
470         if ((tx_uld_type == CXGB4_TX_OFLD) && txq_info &&
471             (atomic_inc_return(&txq_info->users) > 1))
472                 return 0;
473
474         txq_info = kzalloc(sizeof(*txq_info), GFP_KERNEL);
475         if (!txq_info)
476                 return -ENOMEM;
477         if (uld_type == CXGB4_ULD_CRYPTO) {
478                 i = min_t(int, adap->vres.ncrypto_fc,
479                           num_online_cpus());
480                 txq_info->ntxq = rounddown(i, adap->params.nports);
481                 if (txq_info->ntxq <= 0) {
482                         dev_warn(adap->pdev_dev, "Crypto Tx Queues can't be zero\n");
483                         kfree(txq_info);
484                         return -EINVAL;
485                 }
486
487         } else {
488                 i = min_t(int, uld_info->ntxq, num_online_cpus());
489                 txq_info->ntxq = roundup(i, adap->params.nports);
490         }
491         txq_info->uldtxq = kcalloc(txq_info->ntxq, sizeof(struct sge_uld_txq),
492                                    GFP_KERNEL);
493         if (!txq_info->uldtxq) {
494                 kfree(txq_info);
495                 return -ENOMEM;
496         }
497
498         if (alloc_sge_txq_uld(adap, txq_info, tx_uld_type)) {
499                 kfree(txq_info->uldtxq);
500                 kfree(txq_info);
501                 return -ENOMEM;
502         }
503
504         atomic_inc(&txq_info->users);
505         adap->sge.uld_txq_info[tx_uld_type] = txq_info;
506         return 0;
507 }
508
509 static void uld_queue_init(struct adapter *adap, unsigned int uld_type,
510                            struct cxgb4_lld_info *lli)
511 {
512         struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
513         int tx_uld_type = TX_ULD(uld_type);
514         struct sge_uld_txq_info *txq_info = adap->sge.uld_txq_info[tx_uld_type];
515
516         lli->rxq_ids = rxq_info->rspq_id;
517         lli->nrxq = rxq_info->nrxq;
518         lli->ciq_ids = rxq_info->rspq_id + rxq_info->nrxq;
519         lli->nciq = rxq_info->nciq;
520         lli->ntxq = txq_info->ntxq;
521 }
522
523 int t4_uld_mem_alloc(struct adapter *adap)
524 {
525         struct sge *s = &adap->sge;
526
527         adap->uld = kcalloc(CXGB4_ULD_MAX, sizeof(*adap->uld), GFP_KERNEL);
528         if (!adap->uld)
529                 return -ENOMEM;
530
531         s->uld_rxq_info = kcalloc(CXGB4_ULD_MAX,
532                                   sizeof(struct sge_uld_rxq_info *),
533                                   GFP_KERNEL);
534         if (!s->uld_rxq_info)
535                 goto err_uld;
536
537         s->uld_txq_info = kcalloc(CXGB4_TX_MAX,
538                                   sizeof(struct sge_uld_txq_info *),
539                                   GFP_KERNEL);
540         if (!s->uld_txq_info)
541                 goto err_uld_rx;
542         return 0;
543
544 err_uld_rx:
545         kfree(s->uld_rxq_info);
546 err_uld:
547         kfree(adap->uld);
548         return -ENOMEM;
549 }
550
551 void t4_uld_mem_free(struct adapter *adap)
552 {
553         struct sge *s = &adap->sge;
554
555         kfree(s->uld_txq_info);
556         kfree(s->uld_rxq_info);
557         kfree(adap->uld);
558 }
559
560 /* This function should be called with uld_mutex taken. */
561 static void cxgb4_shutdown_uld_adapter(struct adapter *adap, enum cxgb4_uld type)
562 {
563         if (adap->uld[type].handle) {
564                 adap->uld[type].handle = NULL;
565                 adap->uld[type].add = NULL;
566                 release_sge_txq_uld(adap, type);
567
568                 if (adap->flags & CXGB4_FULL_INIT_DONE)
569                         quiesce_rx_uld(adap, type);
570
571                 if (adap->flags & CXGB4_USING_MSIX)
572                         free_msix_queue_irqs_uld(adap, type);
573
574                 free_sge_queues_uld(adap, type);
575                 free_queues_uld(adap, type);
576         }
577 }
578
579 void t4_uld_clean_up(struct adapter *adap)
580 {
581         unsigned int i;
582
583         mutex_lock(&uld_mutex);
584         for (i = 0; i < CXGB4_ULD_MAX; i++) {
585                 if (!adap->uld[i].handle)
586                         continue;
587
588                 cxgb4_shutdown_uld_adapter(adap, i);
589         }
590         mutex_unlock(&uld_mutex);
591 }
592
593 static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld)
594 {
595         int i;
596
597         lld->pdev = adap->pdev;
598         lld->pf = adap->pf;
599         lld->l2t = adap->l2t;
600         lld->tids = &adap->tids;
601         lld->ports = adap->port;
602         lld->vr = &adap->vres;
603         lld->mtus = adap->params.mtus;
604         lld->nchan = adap->params.nports;
605         lld->nports = adap->params.nports;
606         lld->wr_cred = adap->params.ofldq_wr_cred;
607         lld->crypto = adap->params.crypto;
608         lld->iscsi_iolen = MAXRXDATA_G(t4_read_reg(adap, TP_PARA_REG2_A));
609         lld->iscsi_tagmask = t4_read_reg(adap, ULP_RX_ISCSI_TAGMASK_A);
610         lld->iscsi_pgsz_order = t4_read_reg(adap, ULP_RX_ISCSI_PSZ_A);
611         lld->iscsi_llimit = t4_read_reg(adap, ULP_RX_ISCSI_LLIMIT_A);
612         lld->iscsi_ppm = &adap->iscsi_ppm;
613         lld->adapter_type = adap->params.chip;
614         lld->cclk_ps = 1000000000 / adap->params.vpd.cclk;
615         lld->udb_density = 1 << adap->params.sge.eq_qpp;
616         lld->ucq_density = 1 << adap->params.sge.iq_qpp;
617         lld->sge_host_page_size = 1 << (adap->params.sge.hps + 10);
618         lld->filt_mode = adap->params.tp.vlan_pri_map;
619         /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
620         for (i = 0; i < NCHAN; i++)
621                 lld->tx_modq[i] = i;
622         lld->gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS_A);
623         lld->db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL_A);
624         lld->fw_vers = adap->params.fw_vers;
625         lld->dbfifo_int_thresh = dbfifo_int_thresh;
626         lld->sge_ingpadboundary = adap->sge.fl_align;
627         lld->sge_egrstatuspagesize = adap->sge.stat_len;
628         lld->sge_pktshift = adap->sge.pktshift;
629         lld->ulp_crypto = adap->params.crypto;
630         lld->enable_fw_ofld_conn = adap->flags & CXGB4_FW_OFLD_CONN;
631         lld->max_ordird_qp = adap->params.max_ordird_qp;
632         lld->max_ird_adapter = adap->params.max_ird_adapter;
633         lld->ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
634         lld->nodeid = dev_to_node(adap->pdev_dev);
635         lld->fr_nsmr_tpte_wr_support = adap->params.fr_nsmr_tpte_wr_support;
636         lld->write_w_imm_support = adap->params.write_w_imm_support;
637         lld->write_cmpl_support = adap->params.write_cmpl_support;
638 }
639
640 static int uld_attach(struct adapter *adap, unsigned int uld)
641 {
642         struct cxgb4_lld_info lli;
643         void *handle;
644
645         uld_init(adap, &lli);
646         uld_queue_init(adap, uld, &lli);
647
648         handle = adap->uld[uld].add(&lli);
649         if (IS_ERR(handle)) {
650                 dev_warn(adap->pdev_dev,
651                          "could not attach to the %s driver, error %ld\n",
652                          adap->uld[uld].name, PTR_ERR(handle));
653                 return PTR_ERR(handle);
654         }
655
656         adap->uld[uld].handle = handle;
657         t4_register_netevent_notifier();
658
659         if (adap->flags & CXGB4_FULL_INIT_DONE)
660                 adap->uld[uld].state_change(handle, CXGB4_STATE_UP);
661
662         return 0;
663 }
664
665 #ifdef CONFIG_CHELSIO_TLS_DEVICE
666 /* cxgb4_set_ktls_feature: request FW to enable/disable ktls settings.
667  * @adap: adapter info
668  * @enable: 1 to enable / 0 to disable ktls settings.
669  */
670 static void cxgb4_set_ktls_feature(struct adapter *adap, bool enable)
671 {
672         u32 params = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
673                       FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_KTLS_TX_HW) |
674                       FW_PARAMS_PARAM_Y_V(enable));
675         int ret = 0;
676
677         ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, &params, &params);
678         /* if fw returns failure, clear the ktls flag */
679         if (ret)
680                 adap->params.crypto &= ~ULP_CRYPTO_KTLS_INLINE;
681 }
682 #endif
683
684 /* cxgb4_register_uld - register an upper-layer driver
685  * @type: the ULD type
686  * @p: the ULD methods
687  *
688  * Registers an upper-layer driver with this driver and notifies the ULD
689  * about any presently available devices that support its type.
690  */
691 void cxgb4_register_uld(enum cxgb4_uld type,
692                         const struct cxgb4_uld_info *p)
693 {
694         struct adapter *adap;
695         int ret = 0;
696
697         if (type >= CXGB4_ULD_MAX)
698                 return;
699
700         mutex_lock(&uld_mutex);
701         list_for_each_entry(adap, &adapter_list, list_node) {
702                 if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) ||
703                     (type != CXGB4_ULD_CRYPTO && !is_offload(adap)))
704                         continue;
705                 if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip))
706                         continue;
707                 ret = cfg_queues_uld(adap, type, p);
708                 if (ret)
709                         goto out;
710                 ret = setup_sge_queues_uld(adap, type, p->lro);
711                 if (ret)
712                         goto free_queues;
713                 if (adap->flags & CXGB4_USING_MSIX) {
714                         ret = request_msix_queue_irqs_uld(adap, type);
715                         if (ret)
716                                 goto free_rxq;
717                 }
718                 if (adap->flags & CXGB4_FULL_INIT_DONE)
719                         enable_rx_uld(adap, type);
720 #ifdef CONFIG_CHELSIO_TLS_DEVICE
721                 /* send mbox to enable ktls related settings. */
722                 if (type == CXGB4_ULD_CRYPTO &&
723                     (adap->params.crypto & FW_CAPS_CONFIG_TX_TLS_HW))
724                         cxgb4_set_ktls_feature(adap, 1);
725 #endif
726                 if (adap->uld[type].add)
727                         goto free_irq;
728                 ret = setup_sge_txq_uld(adap, type, p);
729                 if (ret)
730                         goto free_irq;
731                 adap->uld[type] = *p;
732                 ret = uld_attach(adap, type);
733                 if (ret)
734                         goto free_txq;
735                 continue;
736 free_txq:
737                 release_sge_txq_uld(adap, type);
738 free_irq:
739                 if (adap->flags & CXGB4_FULL_INIT_DONE)
740                         quiesce_rx_uld(adap, type);
741                 if (adap->flags & CXGB4_USING_MSIX)
742                         free_msix_queue_irqs_uld(adap, type);
743 free_rxq:
744                 free_sge_queues_uld(adap, type);
745 free_queues:
746                 free_queues_uld(adap, type);
747 out:
748                 dev_warn(adap->pdev_dev,
749                          "ULD registration failed for uld type %d\n", type);
750         }
751         mutex_unlock(&uld_mutex);
752         return;
753 }
754 EXPORT_SYMBOL(cxgb4_register_uld);
755
756 /**
757  *      cxgb4_unregister_uld - unregister an upper-layer driver
758  *      @type: the ULD type
759  *
760  *      Unregisters an existing upper-layer driver.
761  */
762 int cxgb4_unregister_uld(enum cxgb4_uld type)
763 {
764         struct adapter *adap;
765
766         if (type >= CXGB4_ULD_MAX)
767                 return -EINVAL;
768
769         mutex_lock(&uld_mutex);
770         list_for_each_entry(adap, &adapter_list, list_node) {
771                 if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) ||
772                     (type != CXGB4_ULD_CRYPTO && !is_offload(adap)))
773                         continue;
774                 if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip))
775                         continue;
776
777                 cxgb4_shutdown_uld_adapter(adap, type);
778
779 #ifdef CONFIG_CHELSIO_TLS_DEVICE
780                 /* send mbox to disable ktls related settings. */
781                 if (type == CXGB4_ULD_CRYPTO &&
782                     (adap->params.crypto & FW_CAPS_CONFIG_TX_TLS_HW))
783                         cxgb4_set_ktls_feature(adap, 0);
784 #endif
785         }
786         mutex_unlock(&uld_mutex);
787
788         return 0;
789 }
790 EXPORT_SYMBOL(cxgb4_unregister_uld);