1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2019 Chelsio Communications. All rights reserved. */
5 #include "cxgb4_tc_mqprio.h"
8 static int cxgb4_mqprio_validate(struct net_device *dev,
9 struct tc_mqprio_qopt_offload *mqprio)
11 u64 min_rate = 0, max_rate = 0, max_link_rate;
12 struct port_info *pi = netdev2pinfo(dev);
13 struct adapter *adap = netdev2adap(dev);
14 u32 speed, qcount = 0, qoffset = 0;
15 u32 start_a, start_b, end_a, end_b;
19 if (!mqprio->qopt.num_tc)
22 if (mqprio->qopt.hw != TC_MQPRIO_HW_OFFLOAD_TCS) {
23 netdev_err(dev, "Only full TC hardware offload is supported\n");
25 } else if (mqprio->mode != TC_MQPRIO_MODE_CHANNEL) {
26 netdev_err(dev, "Only channel mode offload is supported\n");
28 } else if (mqprio->shaper != TC_MQPRIO_SHAPER_BW_RATE) {
29 netdev_err(dev, "Only bandwidth rate shaper supported\n");
31 } else if (mqprio->qopt.num_tc > adap->params.nsched_cls) {
33 "Only %u traffic classes supported by hardware\n",
34 adap->params.nsched_cls);
38 ret = t4_get_link_params(pi, NULL, &speed, NULL);
40 netdev_err(dev, "Failed to get link speed, ret: %d\n", ret);
44 /* Convert from Mbps to bps */
45 max_link_rate = (u64)speed * 1000 * 1000;
47 for (i = 0; i < mqprio->qopt.num_tc; i++) {
48 qoffset = max_t(u16, mqprio->qopt.offset[i], qoffset);
49 qcount += mqprio->qopt.count[i];
51 start_a = mqprio->qopt.offset[i];
52 end_a = start_a + mqprio->qopt.count[i] - 1;
53 for (j = i + 1; j < mqprio->qopt.num_tc; j++) {
54 start_b = mqprio->qopt.offset[j];
55 end_b = start_b + mqprio->qopt.count[j] - 1;
57 /* If queue count is 0, then the traffic
58 * belonging to this class will not use
59 * ETHOFLD queues. So, no need to validate
62 if (!mqprio->qopt.count[i])
65 if (!mqprio->qopt.count[j])
68 if (max_t(u32, start_a, start_b) <=
69 min_t(u32, end_a, end_b)) {
71 "Queues can't overlap across tc\n");
76 /* Convert byte per second to bits per second */
77 min_rate += (mqprio->min_rate[i] * 8);
78 max_rate += (mqprio->max_rate[i] * 8);
81 if (qoffset >= adap->tids.neotids || qcount > adap->tids.neotids)
84 if (min_rate > max_link_rate || max_rate > max_link_rate) {
86 "Total Min/Max (%llu/%llu) Rate > supported (%llu)\n",
87 min_rate, max_rate, max_link_rate);
94 static int cxgb4_init_eosw_txq(struct net_device *dev,
95 struct sge_eosw_txq *eosw_txq,
98 struct adapter *adap = netdev2adap(dev);
99 struct tx_sw_desc *ring;
101 memset(eosw_txq, 0, sizeof(*eosw_txq));
103 ring = kcalloc(CXGB4_EOSW_TXQ_DEFAULT_DESC_NUM,
104 sizeof(*ring), GFP_KERNEL);
108 eosw_txq->desc = ring;
109 eosw_txq->ndesc = CXGB4_EOSW_TXQ_DEFAULT_DESC_NUM;
110 spin_lock_init(&eosw_txq->lock);
111 eosw_txq->state = CXGB4_EO_STATE_CLOSED;
112 eosw_txq->eotid = eotid;
113 eosw_txq->hwtid = adap->tids.eotid_base + eosw_txq->eotid;
114 eosw_txq->cred = adap->params.ofldq_wr_cred;
115 eosw_txq->hwqid = hwqid;
116 eosw_txq->netdev = dev;
117 tasklet_init(&eosw_txq->qresume_tsk, cxgb4_ethofld_restart,
118 (unsigned long)eosw_txq);
122 static void cxgb4_clean_eosw_txq(struct net_device *dev,
123 struct sge_eosw_txq *eosw_txq)
125 struct adapter *adap = netdev2adap(dev);
127 cxgb4_eosw_txq_free_desc(adap, eosw_txq, eosw_txq->ndesc);
129 eosw_txq->last_pidx = 0;
131 eosw_txq->last_cidx = 0;
132 eosw_txq->flowc_idx = 0;
134 eosw_txq->cred = adap->params.ofldq_wr_cred;
135 eosw_txq->ncompl = 0;
136 eosw_txq->last_compl = 0;
137 eosw_txq->state = CXGB4_EO_STATE_CLOSED;
140 static void cxgb4_free_eosw_txq(struct net_device *dev,
141 struct sge_eosw_txq *eosw_txq)
143 spin_lock_bh(&eosw_txq->lock);
144 cxgb4_clean_eosw_txq(dev, eosw_txq);
145 kfree(eosw_txq->desc);
146 spin_unlock_bh(&eosw_txq->lock);
147 tasklet_kill(&eosw_txq->qresume_tsk);
150 static int cxgb4_mqprio_alloc_hw_resources(struct net_device *dev)
152 struct port_info *pi = netdev2pinfo(dev);
153 struct adapter *adap = netdev2adap(dev);
154 struct sge_ofld_rxq *eorxq;
155 struct sge_eohw_txq *eotxq;
159 /* Allocate ETHOFLD hardware queue structures if not done already */
160 if (!refcount_read(&adap->tc_mqprio->refcnt)) {
161 adap->sge.eohw_rxq = kcalloc(adap->sge.eoqsets,
162 sizeof(struct sge_ofld_rxq),
164 if (!adap->sge.eohw_rxq)
167 adap->sge.eohw_txq = kcalloc(adap->sge.eoqsets,
168 sizeof(struct sge_eohw_txq),
170 if (!adap->sge.eohw_txq) {
171 kfree(adap->sge.eohw_rxq);
175 refcount_set(&adap->tc_mqprio->refcnt, 1);
177 refcount_inc(&adap->tc_mqprio->refcnt);
180 if (!(adap->flags & CXGB4_USING_MSIX))
181 msix = -((int)adap->sge.intrq.abs_id + 1);
183 for (i = 0; i < pi->nqsets; i++) {
184 eorxq = &adap->sge.eohw_rxq[pi->first_qset + i];
185 eotxq = &adap->sge.eohw_txq[pi->first_qset + i];
187 /* Allocate Rxqs for receiving ETHOFLD Tx completions */
189 msix = cxgb4_get_msix_idx_from_bmap(adap);
192 goto out_free_queues;
195 eorxq->msix = &adap->msix_info[msix];
196 snprintf(eorxq->msix->desc,
197 sizeof(eorxq->msix->desc),
198 "%s-eorxq%d", dev->name, i);
201 init_rspq(adap, &eorxq->rspq,
202 CXGB4_EOHW_RXQ_DEFAULT_INTR_USEC,
203 CXGB4_EOHW_RXQ_DEFAULT_PKT_CNT,
204 CXGB4_EOHW_RXQ_DEFAULT_DESC_NUM,
205 CXGB4_EOHW_RXQ_DEFAULT_DESC_SIZE);
207 eorxq->fl.size = CXGB4_EOHW_FLQ_DEFAULT_DESC_NUM;
209 ret = t4_sge_alloc_rxq(adap, &eorxq->rspq, false,
210 dev, msix, &eorxq->fl,
211 cxgb4_ethofld_rx_handler,
214 goto out_free_queues;
216 /* Allocate ETHOFLD hardware Txqs */
217 eotxq->q.size = CXGB4_EOHW_TXQ_DEFAULT_DESC_NUM;
218 ret = t4_sge_alloc_ethofld_txq(adap, eotxq, dev,
219 eorxq->rspq.cntxt_id);
221 goto out_free_queues;
223 /* Allocate IRQs, set IRQ affinity, and start Rx */
224 if (adap->flags & CXGB4_USING_MSIX) {
225 ret = request_irq(eorxq->msix->vec, t4_sge_intr_msix, 0,
226 eorxq->msix->desc, &eorxq->rspq);
230 cxgb4_set_msix_aff(adap, eorxq->msix->vec,
231 &eorxq->msix->aff_mask, i);
234 if (adap->flags & CXGB4_FULL_INIT_DONE)
235 cxgb4_enable_rx(adap, &eorxq->rspq);
242 eorxq = &adap->sge.eohw_rxq[pi->first_qset + i];
244 if (adap->flags & CXGB4_FULL_INIT_DONE)
245 cxgb4_quiesce_rx(&eorxq->rspq);
247 if (adap->flags & CXGB4_USING_MSIX) {
248 cxgb4_clear_msix_aff(eorxq->msix->vec,
249 eorxq->msix->aff_mask);
250 free_irq(eorxq->msix->vec, &eorxq->rspq);
255 for (i = 0; i < pi->nqsets; i++) {
256 eorxq = &adap->sge.eohw_rxq[pi->first_qset + i];
257 eotxq = &adap->sge.eohw_txq[pi->first_qset + i];
259 if (eorxq->rspq.desc)
260 free_rspq_fl(adap, &eorxq->rspq, &eorxq->fl);
262 cxgb4_free_msix_idx_in_bmap(adap, eorxq->msix->idx);
263 t4_sge_free_ethofld_txq(adap, eotxq);
266 if (refcount_dec_and_test(&adap->tc_mqprio->refcnt)) {
267 kfree(adap->sge.eohw_txq);
268 kfree(adap->sge.eohw_rxq);
273 static void cxgb4_mqprio_free_hw_resources(struct net_device *dev)
275 struct port_info *pi = netdev2pinfo(dev);
276 struct adapter *adap = netdev2adap(dev);
277 struct sge_ofld_rxq *eorxq;
278 struct sge_eohw_txq *eotxq;
281 /* Return if no ETHOFLD structures have been allocated yet */
282 if (!refcount_read(&adap->tc_mqprio->refcnt))
285 /* Return if no hardware queues have been allocated */
286 if (!adap->sge.eohw_rxq[pi->first_qset].rspq.desc)
289 for (i = 0; i < pi->nqsets; i++) {
290 eorxq = &adap->sge.eohw_rxq[pi->first_qset + i];
291 eotxq = &adap->sge.eohw_txq[pi->first_qset + i];
293 /* Device removal path will already disable NAPI
294 * before unregistering netdevice. So, only disable
295 * NAPI if we're not in device removal path
297 if (!(adap->flags & CXGB4_SHUTTING_DOWN))
298 cxgb4_quiesce_rx(&eorxq->rspq);
300 if (adap->flags & CXGB4_USING_MSIX) {
301 cxgb4_clear_msix_aff(eorxq->msix->vec,
302 eorxq->msix->aff_mask);
303 free_irq(eorxq->msix->vec, &eorxq->rspq);
304 cxgb4_free_msix_idx_in_bmap(adap, eorxq->msix->idx);
307 free_rspq_fl(adap, &eorxq->rspq, &eorxq->fl);
308 t4_sge_free_ethofld_txq(adap, eotxq);
311 /* Free up ETHOFLD structures if there are no users */
312 if (refcount_dec_and_test(&adap->tc_mqprio->refcnt)) {
313 kfree(adap->sge.eohw_txq);
314 kfree(adap->sge.eohw_rxq);
318 static int cxgb4_mqprio_alloc_tc(struct net_device *dev,
319 struct tc_mqprio_qopt_offload *mqprio)
321 struct ch_sched_params p = {
322 .type = SCHED_CLASS_TYPE_PACKET,
323 .u.params.level = SCHED_CLASS_LEVEL_CL_RL,
324 .u.params.mode = SCHED_CLASS_MODE_FLOW,
325 .u.params.rateunit = SCHED_CLASS_RATEUNIT_BITS,
326 .u.params.ratemode = SCHED_CLASS_RATEMODE_ABS,
327 .u.params.class = SCHED_CLS_NONE,
328 .u.params.weight = 0,
329 .u.params.pktsize = dev->mtu,
331 struct cxgb4_tc_port_mqprio *tc_port_mqprio;
332 struct port_info *pi = netdev2pinfo(dev);
333 struct adapter *adap = netdev2adap(dev);
334 struct sched_class *e;
338 tc_port_mqprio = &adap->tc_mqprio->port_mqprio[pi->port_id];
339 p.u.params.channel = pi->tx_chan;
340 for (i = 0; i < mqprio->qopt.num_tc; i++) {
341 /* Convert from bytes per second to Kbps */
342 p.u.params.minrate = div_u64(mqprio->min_rate[i] * 8, 1000);
343 p.u.params.maxrate = div_u64(mqprio->max_rate[i] * 8, 1000);
345 /* Request larger burst buffer for smaller MTU, so
346 * that hardware can work on more data per burst
349 if (dev->mtu <= ETH_DATA_LEN)
350 p.u.params.burstsize = 8 * dev->mtu;
352 e = cxgb4_sched_class_alloc(dev, &p);
358 tc_port_mqprio->tc_hwtc_map[i] = e->idx;
365 cxgb4_sched_class_free(dev, tc_port_mqprio->tc_hwtc_map[i]);
370 static void cxgb4_mqprio_free_tc(struct net_device *dev)
372 struct cxgb4_tc_port_mqprio *tc_port_mqprio;
373 struct port_info *pi = netdev2pinfo(dev);
374 struct adapter *adap = netdev2adap(dev);
377 tc_port_mqprio = &adap->tc_mqprio->port_mqprio[pi->port_id];
378 for (i = 0; i < tc_port_mqprio->mqprio.qopt.num_tc; i++)
379 cxgb4_sched_class_free(dev, tc_port_mqprio->tc_hwtc_map[i]);
382 static int cxgb4_mqprio_class_bind(struct net_device *dev,
383 struct sge_eosw_txq *eosw_txq,
386 struct ch_sched_flowc fe;
389 init_completion(&eosw_txq->completion);
391 fe.tid = eosw_txq->eotid;
394 ret = cxgb4_sched_class_bind(dev, &fe, SCHED_FLOWC);
398 ret = wait_for_completion_timeout(&eosw_txq->completion,
399 CXGB4_FLOWC_WAIT_TIMEOUT);
406 static void cxgb4_mqprio_class_unbind(struct net_device *dev,
407 struct sge_eosw_txq *eosw_txq,
410 struct adapter *adap = netdev2adap(dev);
411 struct ch_sched_flowc fe;
413 /* If we're shutting down, interrupts are disabled and no completions
414 * come back. So, skip waiting for completions in this scenario.
416 if (!(adap->flags & CXGB4_SHUTTING_DOWN))
417 init_completion(&eosw_txq->completion);
419 fe.tid = eosw_txq->eotid;
421 cxgb4_sched_class_unbind(dev, &fe, SCHED_FLOWC);
423 if (!(adap->flags & CXGB4_SHUTTING_DOWN))
424 wait_for_completion_timeout(&eosw_txq->completion,
425 CXGB4_FLOWC_WAIT_TIMEOUT);
428 static int cxgb4_mqprio_enable_offload(struct net_device *dev,
429 struct tc_mqprio_qopt_offload *mqprio)
431 struct cxgb4_tc_port_mqprio *tc_port_mqprio;
432 u32 qoffset, qcount, tot_qcount, qid, hwqid;
433 struct port_info *pi = netdev2pinfo(dev);
434 struct adapter *adap = netdev2adap(dev);
435 struct sge_eosw_txq *eosw_txq;
440 ret = cxgb4_mqprio_alloc_hw_resources(dev);
444 tc_port_mqprio = &adap->tc_mqprio->port_mqprio[pi->port_id];
445 for (i = 0; i < mqprio->qopt.num_tc; i++) {
446 qoffset = mqprio->qopt.offset[i];
447 qcount = mqprio->qopt.count[i];
448 for (j = 0; j < qcount; j++) {
449 eotid = cxgb4_get_free_eotid(&adap->tids);
452 goto out_free_eotids;
456 hwqid = pi->first_qset + (eotid % pi->nqsets);
457 eosw_txq = &tc_port_mqprio->eosw_txq[qid];
458 ret = cxgb4_init_eosw_txq(dev, eosw_txq,
461 goto out_free_eotids;
463 cxgb4_alloc_eotid(&adap->tids, eotid, eosw_txq);
465 hwtc = tc_port_mqprio->tc_hwtc_map[i];
466 ret = cxgb4_mqprio_class_bind(dev, eosw_txq, hwtc);
468 goto out_free_eotids;
472 memcpy(&tc_port_mqprio->mqprio, mqprio,
473 sizeof(struct tc_mqprio_qopt_offload));
475 /* Inform the stack about the configured tc params.
477 * Set the correct queue map. If no queue count has been
478 * specified, then send the traffic through default NIC
479 * queues; instead of ETHOFLD queues.
481 ret = netdev_set_num_tc(dev, mqprio->qopt.num_tc);
483 goto out_free_eotids;
485 tot_qcount = pi->nqsets;
486 for (i = 0; i < mqprio->qopt.num_tc; i++) {
487 qcount = mqprio->qopt.count[i];
489 qoffset = mqprio->qopt.offset[i] + pi->nqsets;
495 ret = netdev_set_tc_queue(dev, i, qcount, qoffset);
499 tot_qcount += mqprio->qopt.count[i];
502 ret = netif_set_real_num_tx_queues(dev, tot_qcount);
506 tc_port_mqprio->state = CXGB4_MQPRIO_STATE_ACTIVE;
510 netdev_reset_tc(dev);
511 i = mqprio->qopt.num_tc;
515 qoffset = mqprio->qopt.offset[i];
516 qcount = mqprio->qopt.count[i];
517 for (j = 0; j < qcount; j++) {
518 eosw_txq = &tc_port_mqprio->eosw_txq[qoffset + j];
520 hwtc = tc_port_mqprio->tc_hwtc_map[i];
521 cxgb4_mqprio_class_unbind(dev, eosw_txq, hwtc);
523 cxgb4_free_eotid(&adap->tids, eosw_txq->eotid);
524 cxgb4_free_eosw_txq(dev, eosw_txq);
528 cxgb4_mqprio_free_hw_resources(dev);
532 static void cxgb4_mqprio_disable_offload(struct net_device *dev)
534 struct cxgb4_tc_port_mqprio *tc_port_mqprio;
535 struct port_info *pi = netdev2pinfo(dev);
536 struct adapter *adap = netdev2adap(dev);
537 struct sge_eosw_txq *eosw_txq;
542 tc_port_mqprio = &adap->tc_mqprio->port_mqprio[pi->port_id];
543 if (tc_port_mqprio->state != CXGB4_MQPRIO_STATE_ACTIVE)
546 netdev_reset_tc(dev);
547 netif_set_real_num_tx_queues(dev, pi->nqsets);
549 for (i = 0; i < tc_port_mqprio->mqprio.qopt.num_tc; i++) {
550 qoffset = tc_port_mqprio->mqprio.qopt.offset[i];
551 qcount = tc_port_mqprio->mqprio.qopt.count[i];
552 for (j = 0; j < qcount; j++) {
553 eosw_txq = &tc_port_mqprio->eosw_txq[qoffset + j];
555 hwtc = tc_port_mqprio->tc_hwtc_map[i];
556 cxgb4_mqprio_class_unbind(dev, eosw_txq, hwtc);
558 cxgb4_free_eotid(&adap->tids, eosw_txq->eotid);
559 cxgb4_free_eosw_txq(dev, eosw_txq);
563 cxgb4_mqprio_free_hw_resources(dev);
565 /* Free up the traffic classes */
566 cxgb4_mqprio_free_tc(dev);
568 memset(&tc_port_mqprio->mqprio, 0,
569 sizeof(struct tc_mqprio_qopt_offload));
571 tc_port_mqprio->state = CXGB4_MQPRIO_STATE_DISABLED;
574 int cxgb4_setup_tc_mqprio(struct net_device *dev,
575 struct tc_mqprio_qopt_offload *mqprio)
577 struct adapter *adap = netdev2adap(dev);
578 bool needs_bring_up = false;
581 ret = cxgb4_mqprio_validate(dev, mqprio);
585 mutex_lock(&adap->tc_mqprio->mqprio_mutex);
587 /* To configure tc params, the current allocated EOTIDs must
588 * be freed up. However, they can't be freed up if there's
589 * traffic running on the interface. So, ensure interface is
590 * down before configuring tc params.
592 if (netif_running(dev)) {
594 needs_bring_up = true;
597 cxgb4_mqprio_disable_offload(dev);
599 /* If requested for clear, then just return since resources are
600 * already freed up by now.
602 if (!mqprio->qopt.num_tc)
605 /* Allocate free available traffic classes and configure
606 * their rate parameters.
608 ret = cxgb4_mqprio_alloc_tc(dev, mqprio);
612 ret = cxgb4_mqprio_enable_offload(dev, mqprio);
614 cxgb4_mqprio_free_tc(dev);
622 mutex_unlock(&adap->tc_mqprio->mqprio_mutex);
626 void cxgb4_mqprio_stop_offload(struct adapter *adap)
628 struct cxgb4_tc_port_mqprio *tc_port_mqprio;
629 struct net_device *dev;
632 if (!adap->tc_mqprio || !adap->tc_mqprio->port_mqprio)
635 mutex_lock(&adap->tc_mqprio->mqprio_mutex);
636 for_each_port(adap, i) {
641 tc_port_mqprio = &adap->tc_mqprio->port_mqprio[i];
642 if (!tc_port_mqprio->mqprio.qopt.num_tc)
645 cxgb4_mqprio_disable_offload(dev);
647 mutex_unlock(&adap->tc_mqprio->mqprio_mutex);
650 int cxgb4_init_tc_mqprio(struct adapter *adap)
652 struct cxgb4_tc_port_mqprio *tc_port_mqprio, *port_mqprio;
653 struct cxgb4_tc_mqprio *tc_mqprio;
654 struct sge_eosw_txq *eosw_txq;
658 tc_mqprio = kzalloc(sizeof(*tc_mqprio), GFP_KERNEL);
662 tc_port_mqprio = kcalloc(adap->params.nports, sizeof(*tc_port_mqprio),
664 if (!tc_port_mqprio) {
666 goto out_free_mqprio;
669 mutex_init(&tc_mqprio->mqprio_mutex);
671 tc_mqprio->port_mqprio = tc_port_mqprio;
672 for (i = 0; i < adap->params.nports; i++) {
673 port_mqprio = &tc_mqprio->port_mqprio[i];
674 eosw_txq = kcalloc(adap->tids.neotids, sizeof(*eosw_txq),
680 port_mqprio->eosw_txq = eosw_txq;
683 adap->tc_mqprio = tc_mqprio;
684 refcount_set(&adap->tc_mqprio->refcnt, 0);
688 for (i = 0; i < adap->params.nports; i++) {
689 port_mqprio = &tc_mqprio->port_mqprio[i];
690 kfree(port_mqprio->eosw_txq);
692 kfree(tc_port_mqprio);
699 void cxgb4_cleanup_tc_mqprio(struct adapter *adap)
701 struct cxgb4_tc_port_mqprio *port_mqprio;
704 if (adap->tc_mqprio) {
705 mutex_lock(&adap->tc_mqprio->mqprio_mutex);
706 if (adap->tc_mqprio->port_mqprio) {
707 for (i = 0; i < adap->params.nports; i++) {
708 struct net_device *dev = adap->port[i];
711 cxgb4_mqprio_disable_offload(dev);
712 port_mqprio = &adap->tc_mqprio->port_mqprio[i];
713 kfree(port_mqprio->eosw_txq);
715 kfree(adap->tc_mqprio->port_mqprio);
717 mutex_unlock(&adap->tc_mqprio->mqprio_mutex);
718 kfree(adap->tc_mqprio);