1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
3 * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
4 * stmmac TC Handling (HW only)
7 #include <net/pkt_cls.h>
8 #include <net/tc_act/tc_gact.h>
14 static void tc_fill_all_pass_entry(struct stmmac_tc_entry *entry)
16 memset(entry, 0, sizeof(*entry));
18 entry->is_last = true;
19 entry->is_frag = false;
22 entry->val.match_data = 0x0;
23 entry->val.match_en = 0x0;
25 entry->val.dma_ch_no = 0x0;
28 static struct stmmac_tc_entry *tc_find_entry(struct stmmac_priv *priv,
29 struct tc_cls_u32_offload *cls,
32 struct stmmac_tc_entry *entry, *first = NULL, *dup = NULL;
33 u32 loc = cls->knode.handle;
36 for (i = 0; i < priv->tc_entries_max; i++) {
37 entry = &priv->tc_entries[i];
38 if (!entry->in_use && !first && free)
40 if ((entry->handle == loc) && !free && !entry->is_frag)
51 memset(&first->val, 0, sizeof(first->val));
57 static int tc_fill_actions(struct stmmac_tc_entry *entry,
58 struct stmmac_tc_entry *frag,
59 struct tc_cls_u32_offload *cls)
61 struct stmmac_tc_entry *action_entry = entry;
62 const struct tc_action *act;
63 struct tcf_exts *exts;
66 exts = cls->knode.exts;
67 if (!tcf_exts_has_actions(exts))
72 tcf_exts_for_each_action(i, act, exts) {
74 if (is_tcf_gact_ok(act)) {
75 action_entry->val.af = 1;
79 if (is_tcf_gact_shot(act)) {
80 action_entry->val.rf = 1;
91 static int tc_fill_entry(struct stmmac_priv *priv,
92 struct tc_cls_u32_offload *cls)
94 struct stmmac_tc_entry *entry, *frag = NULL;
95 struct tc_u32_sel *sel = cls->knode.sel;
96 u32 off, data, mask, real_off, rem;
97 u32 prio = cls->common.prio << 16;
100 /* Only 1 match per entry */
101 if (sel->nkeys <= 0 || sel->nkeys > 1)
104 off = sel->keys[0].off << sel->offshift;
105 data = sel->keys[0].val;
106 mask = sel->keys[0].mask;
108 switch (ntohs(cls->common.protocol)) {
118 if (off > priv->tc_off_max)
124 entry = tc_find_entry(priv, cls, true);
129 frag = tc_find_entry(priv, cls, true);
135 entry->frag_ptr = frag;
136 entry->val.match_en = (mask << (rem * 8)) &
137 GENMASK(31, rem * 8);
138 entry->val.match_data = (data << (rem * 8)) &
139 GENMASK(31, rem * 8);
140 entry->val.frame_offset = real_off;
143 frag->val.match_en = (mask >> (rem * 8)) &
144 GENMASK(rem * 8 - 1, 0);
145 frag->val.match_data = (data >> (rem * 8)) &
146 GENMASK(rem * 8 - 1, 0);
147 frag->val.frame_offset = real_off + 1;
149 frag->is_frag = true;
151 entry->frag_ptr = NULL;
152 entry->val.match_en = mask;
153 entry->val.match_data = data;
154 entry->val.frame_offset = real_off;
158 ret = tc_fill_actions(entry, frag, cls);
166 frag->in_use = false;
167 entry->in_use = false;
171 static void tc_unfill_entry(struct stmmac_priv *priv,
172 struct tc_cls_u32_offload *cls)
174 struct stmmac_tc_entry *entry;
176 entry = tc_find_entry(priv, cls, false);
180 entry->in_use = false;
181 if (entry->frag_ptr) {
182 entry = entry->frag_ptr;
183 entry->is_frag = false;
184 entry->in_use = false;
188 static int tc_config_knode(struct stmmac_priv *priv,
189 struct tc_cls_u32_offload *cls)
193 ret = tc_fill_entry(priv, cls);
197 ret = stmmac_rxp_config(priv, priv->hw->pcsr, priv->tc_entries,
198 priv->tc_entries_max);
205 tc_unfill_entry(priv, cls);
209 static int tc_delete_knode(struct stmmac_priv *priv,
210 struct tc_cls_u32_offload *cls)
214 /* Set entry and fragments as not used */
215 tc_unfill_entry(priv, cls);
217 ret = stmmac_rxp_config(priv, priv->hw->pcsr, priv->tc_entries,
218 priv->tc_entries_max);
225 static int tc_setup_cls_u32(struct stmmac_priv *priv,
226 struct tc_cls_u32_offload *cls)
228 switch (cls->command) {
229 case TC_CLSU32_REPLACE_KNODE:
230 tc_unfill_entry(priv, cls);
232 case TC_CLSU32_NEW_KNODE:
233 return tc_config_knode(priv, cls);
234 case TC_CLSU32_DELETE_KNODE:
235 return tc_delete_knode(priv, cls);
241 static int tc_init(struct stmmac_priv *priv)
243 struct dma_features *dma_cap = &priv->dma_cap;
247 if (dma_cap->l3l4fnum) {
248 priv->flow_entries_max = dma_cap->l3l4fnum;
249 priv->flow_entries = devm_kcalloc(priv->device,
251 sizeof(*priv->flow_entries),
253 if (!priv->flow_entries)
256 for (i = 0; i < priv->flow_entries_max; i++)
257 priv->flow_entries[i].idx = i;
259 dev_info(priv->device, "Enabled Flow TC (entries=%d)\n",
260 priv->flow_entries_max);
263 /* Fail silently as we can still use remaining features, e.g. CBS */
264 if (!dma_cap->frpsel)
267 switch (dma_cap->frpbs) {
269 priv->tc_off_max = 64;
272 priv->tc_off_max = 128;
275 priv->tc_off_max = 256;
281 switch (dma_cap->frpes) {
295 /* Reserve one last filter which lets all pass */
296 priv->tc_entries_max = count;
297 priv->tc_entries = devm_kcalloc(priv->device,
298 count, sizeof(*priv->tc_entries), GFP_KERNEL);
299 if (!priv->tc_entries)
302 tc_fill_all_pass_entry(&priv->tc_entries[count - 1]);
304 dev_info(priv->device, "Enabling HW TC (entries=%d, max_off=%d)\n",
305 priv->tc_entries_max, priv->tc_off_max);
309 static int tc_setup_cbs(struct stmmac_priv *priv,
310 struct tc_cbs_qopt_offload *qopt)
312 u32 tx_queues_count = priv->plat->tx_queues_to_use;
313 u32 queue = qopt->queue;
319 /* Queue 0 is not AVB capable */
320 if (queue <= 0 || queue >= tx_queues_count)
322 if (!priv->dma_cap.av)
325 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
326 if (mode_to_use == MTL_QUEUE_DCB && qopt->enable) {
327 ret = stmmac_dma_qmode(priv, priv->ioaddr, queue, MTL_QUEUE_AVB);
331 priv->plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB;
332 } else if (!qopt->enable) {
333 return stmmac_dma_qmode(priv, priv->ioaddr, queue, MTL_QUEUE_DCB);
336 /* Port Transmit Rate and Speed Divider */
337 ptr = (priv->speed == SPEED_100) ? 4 : 8;
338 speed_div = (priv->speed == SPEED_100) ? 100000 : 1000000;
340 /* Final adjustments for HW */
341 value = div_s64(qopt->idleslope * 1024ll * ptr, speed_div);
342 priv->plat->tx_queues_cfg[queue].idle_slope = value & GENMASK(31, 0);
344 value = div_s64(-qopt->sendslope * 1024ll * ptr, speed_div);
345 priv->plat->tx_queues_cfg[queue].send_slope = value & GENMASK(31, 0);
347 value = qopt->hicredit * 1024ll * 8;
348 priv->plat->tx_queues_cfg[queue].high_credit = value & GENMASK(31, 0);
350 value = qopt->locredit * 1024ll * 8;
351 priv->plat->tx_queues_cfg[queue].low_credit = value & GENMASK(31, 0);
353 ret = stmmac_config_cbs(priv, priv->hw,
354 priv->plat->tx_queues_cfg[queue].send_slope,
355 priv->plat->tx_queues_cfg[queue].idle_slope,
356 priv->plat->tx_queues_cfg[queue].high_credit,
357 priv->plat->tx_queues_cfg[queue].low_credit,
362 dev_info(priv->device, "CBS queue %d: send %d, idle %d, hi %d, lo %d\n",
363 queue, qopt->sendslope, qopt->idleslope,
364 qopt->hicredit, qopt->locredit);
368 static int tc_parse_flow_actions(struct stmmac_priv *priv,
369 struct flow_action *action,
370 struct stmmac_flow_entry *entry,
371 struct netlink_ext_ack *extack)
373 struct flow_action_entry *act;
376 if (!flow_action_has_entries(action))
379 if (!flow_action_basic_hw_stats_check(action, extack))
382 flow_action_for_each(i, act, action) {
384 case FLOW_ACTION_DROP:
385 entry->action |= STMMAC_FLOW_ACTION_DROP;
392 /* Nothing to do, maybe inverse filter ? */
396 static int tc_add_basic_flow(struct stmmac_priv *priv,
397 struct flow_cls_offload *cls,
398 struct stmmac_flow_entry *entry)
400 struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
401 struct flow_dissector *dissector = rule->match.dissector;
402 struct flow_match_basic match;
404 /* Nothing to do here */
405 if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_BASIC))
408 flow_rule_match_basic(rule, &match);
409 entry->ip_proto = match.key->ip_proto;
413 static int tc_add_ip4_flow(struct stmmac_priv *priv,
414 struct flow_cls_offload *cls,
415 struct stmmac_flow_entry *entry)
417 struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
418 struct flow_dissector *dissector = rule->match.dissector;
419 bool inv = entry->action & STMMAC_FLOW_ACTION_DROP;
420 struct flow_match_ipv4_addrs match;
424 /* Nothing to do here */
425 if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_IPV4_ADDRS))
428 flow_rule_match_ipv4_addrs(rule, &match);
429 hw_match = ntohl(match.key->src) & ntohl(match.mask->src);
431 ret = stmmac_config_l3_filter(priv, priv->hw, entry->idx, true,
432 false, true, inv, hw_match);
437 hw_match = ntohl(match.key->dst) & ntohl(match.mask->dst);
439 ret = stmmac_config_l3_filter(priv, priv->hw, entry->idx, true,
440 false, false, inv, hw_match);
448 static int tc_add_ports_flow(struct stmmac_priv *priv,
449 struct flow_cls_offload *cls,
450 struct stmmac_flow_entry *entry)
452 struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
453 struct flow_dissector *dissector = rule->match.dissector;
454 bool inv = entry->action & STMMAC_FLOW_ACTION_DROP;
455 struct flow_match_ports match;
460 /* Nothing to do here */
461 if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_PORTS))
464 switch (entry->ip_proto) {
475 flow_rule_match_ports(rule, &match);
477 hw_match = ntohs(match.key->src) & ntohs(match.mask->src);
479 ret = stmmac_config_l4_filter(priv, priv->hw, entry->idx, true,
480 is_udp, true, inv, hw_match);
485 hw_match = ntohs(match.key->dst) & ntohs(match.mask->dst);
487 ret = stmmac_config_l4_filter(priv, priv->hw, entry->idx, true,
488 is_udp, false, inv, hw_match);
497 static struct stmmac_flow_entry *tc_find_flow(struct stmmac_priv *priv,
498 struct flow_cls_offload *cls,
503 for (i = 0; i < priv->flow_entries_max; i++) {
504 struct stmmac_flow_entry *entry = &priv->flow_entries[i];
506 if (entry->cookie == cls->cookie)
508 if (get_free && (entry->in_use == false))
516 int (*fn)(struct stmmac_priv *priv, struct flow_cls_offload *cls,
517 struct stmmac_flow_entry *entry);
518 } tc_flow_parsers[] = {
519 { .fn = tc_add_basic_flow },
520 { .fn = tc_add_ip4_flow },
521 { .fn = tc_add_ports_flow },
524 static int tc_add_flow(struct stmmac_priv *priv,
525 struct flow_cls_offload *cls)
527 struct stmmac_flow_entry *entry = tc_find_flow(priv, cls, false);
528 struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
532 entry = tc_find_flow(priv, cls, true);
537 ret = tc_parse_flow_actions(priv, &rule->action, entry,
542 for (i = 0; i < ARRAY_SIZE(tc_flow_parsers); i++) {
543 ret = tc_flow_parsers[i].fn(priv, cls, entry);
545 entry->in_use = true;
553 entry->cookie = cls->cookie;
557 static int tc_del_flow(struct stmmac_priv *priv,
558 struct flow_cls_offload *cls)
560 struct stmmac_flow_entry *entry = tc_find_flow(priv, cls, false);
563 if (!entry || !entry->in_use)
567 ret = stmmac_config_l4_filter(priv, priv->hw, entry->idx, false,
568 false, false, false, 0);
570 ret = stmmac_config_l3_filter(priv, priv->hw, entry->idx, false,
571 false, false, false, 0);
574 entry->in_use = false;
576 entry->is_l4 = false;
580 static int tc_setup_cls(struct stmmac_priv *priv,
581 struct flow_cls_offload *cls)
585 /* When RSS is enabled, the filtering will be bypassed */
586 if (priv->rss.enable)
589 switch (cls->command) {
590 case FLOW_CLS_REPLACE:
591 ret = tc_add_flow(priv, cls);
593 case FLOW_CLS_DESTROY:
594 ret = tc_del_flow(priv, cls);
603 static int tc_setup_taprio(struct stmmac_priv *priv,
604 struct tc_taprio_qopt_offload *qopt)
606 u32 size, wid = priv->dma_cap.estwid, dep = priv->dma_cap.estdep;
607 struct plat_stmmacenet_data *plat = priv->plat;
608 struct timespec64 time;
613 if (!priv->dma_cap.estsel)
652 if (qopt->num_entries >= dep)
654 if (!qopt->base_time)
656 if (!qopt->cycle_time)
660 plat->est = devm_kzalloc(priv->device, sizeof(*plat->est),
665 memset(plat->est, 0, sizeof(*plat->est));
668 size = qopt->num_entries;
670 priv->plat->est->gcl_size = size;
671 priv->plat->est->enable = qopt->enable;
673 for (i = 0; i < size; i++) {
674 s64 delta_ns = qopt->entries[i].interval;
675 u32 gates = qopt->entries[i].gate_mask;
677 if (delta_ns > GENMASK(wid, 0))
679 if (gates > GENMASK(31 - wid, 0))
682 switch (qopt->entries[i].command) {
683 case TC_TAPRIO_CMD_SET_GATES:
687 case TC_TAPRIO_CMD_SET_AND_HOLD:
691 case TC_TAPRIO_CMD_SET_AND_RELEASE:
699 priv->plat->est->gcl[i] = delta_ns | (gates << wid);
702 /* Adjust for real system time */
703 time = ktime_to_timespec64(qopt->base_time);
704 priv->plat->est->btr[0] = (u32)time.tv_nsec;
705 priv->plat->est->btr[1] = (u32)time.tv_sec;
707 ctr = qopt->cycle_time;
708 priv->plat->est->ctr[0] = do_div(ctr, NSEC_PER_SEC);
709 priv->plat->est->ctr[1] = (u32)ctr;
711 if (fpe && !priv->dma_cap.fpesel)
714 ret = stmmac_fpe_configure(priv, priv->ioaddr,
715 priv->plat->tx_queues_to_use,
716 priv->plat->rx_queues_to_use, fpe);
718 netdev_err(priv->dev, "failed to enable Frame Preemption\n");
722 ret = stmmac_est_configure(priv, priv->ioaddr, priv->plat->est,
723 priv->plat->clk_ptp_rate);
725 netdev_err(priv->dev, "failed to configure EST\n");
729 netdev_info(priv->dev, "configured EST\n");
733 priv->plat->est->enable = false;
734 stmmac_est_configure(priv, priv->ioaddr, priv->plat->est,
735 priv->plat->clk_ptp_rate);
739 static int tc_setup_etf(struct stmmac_priv *priv,
740 struct tc_etf_qopt_offload *qopt)
742 if (!priv->dma_cap.tbssel)
744 if (qopt->queue >= priv->plat->tx_queues_to_use)
746 if (!(priv->tx_queue[qopt->queue].tbs & STMMAC_TBS_AVAIL))
750 priv->tx_queue[qopt->queue].tbs |= STMMAC_TBS_EN;
752 priv->tx_queue[qopt->queue].tbs &= ~STMMAC_TBS_EN;
754 netdev_info(priv->dev, "%s ETF for Queue %d\n",
755 qopt->enable ? "enabled" : "disabled", qopt->queue);
759 const struct stmmac_tc_ops dwmac510_tc_ops = {
761 .setup_cls_u32 = tc_setup_cls_u32,
762 .setup_cbs = tc_setup_cbs,
763 .setup_cls = tc_setup_cls,
764 .setup_taprio = tc_setup_taprio,
765 .setup_etf = tc_setup_etf,