1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Ethernet driver
4 * Copyright (C) 2021 Marvell.
8 #include <linux/netdevice.h>
9 #include <linux/etherdevice.h>
10 #include <linux/inetdevice.h>
11 #include <linux/rhashtable.h>
12 #include <linux/bitfield.h>
13 #include <net/flow_dissector.h>
14 #include <net/pkt_cls.h>
15 #include <net/tc_act/tc_gact.h>
16 #include <net/tc_act/tc_mirred.h>
17 #include <net/tc_act/tc_vlan.h>
21 #include "otx2_common.h"
23 /* Egress rate limiting definitions */
24 #define MAX_BURST_EXPONENT 0x0FULL
25 #define MAX_BURST_MANTISSA 0xFFULL
26 #define MAX_BURST_SIZE 130816ULL
27 #define MAX_RATE_DIVIDER_EXPONENT 12ULL
28 #define MAX_RATE_EXPONENT 0x0FULL
29 #define MAX_RATE_MANTISSA 0xFFULL
31 /* Bitfields in NIX_TLX_PIR register */
32 #define TLX_RATE_MANTISSA GENMASK_ULL(8, 1)
33 #define TLX_RATE_EXPONENT GENMASK_ULL(12, 9)
34 #define TLX_RATE_DIVIDER_EXPONENT GENMASK_ULL(16, 13)
35 #define TLX_BURST_MANTISSA GENMASK_ULL(36, 29)
36 #define TLX_BURST_EXPONENT GENMASK_ULL(40, 37)
38 struct otx2_tc_flow_stats {
45 struct rhash_head node;
49 struct otx2_tc_flow_stats stats;
50 spinlock_t lock; /* lock for stats */
57 int otx2_tc_alloc_ent_bitmap(struct otx2_nic *nic)
59 struct otx2_tc_info *tc = &nic->tc_info;
61 if (!nic->flow_cfg->max_flows)
64 /* Max flows changed, free the existing bitmap */
65 kfree(tc->tc_entries_bitmap);
67 tc->tc_entries_bitmap =
68 kcalloc(BITS_TO_LONGS(nic->flow_cfg->max_flows),
69 sizeof(long), GFP_KERNEL);
70 if (!tc->tc_entries_bitmap) {
71 netdev_err(nic->netdev,
72 "Unable to alloc TC flow entries bitmap\n");
78 EXPORT_SYMBOL(otx2_tc_alloc_ent_bitmap);
80 static void otx2_get_egress_burst_cfg(u32 burst, u32 *burst_exp,
85 /* Burst is calculated as
86 * ((256 + BURST_MANTISSA) << (1 + BURST_EXPONENT)) / 256
87 * Max supported burst size is 130,816 bytes.
89 burst = min_t(u32, burst, MAX_BURST_SIZE);
91 *burst_exp = ilog2(burst) ? ilog2(burst) - 1 : 0;
92 tmp = burst - rounddown_pow_of_two(burst);
93 if (burst < MAX_BURST_MANTISSA)
94 *burst_mantissa = tmp * 2;
96 *burst_mantissa = tmp / (1ULL << (*burst_exp - 7));
98 *burst_exp = MAX_BURST_EXPONENT;
99 *burst_mantissa = MAX_BURST_MANTISSA;
103 static void otx2_get_egress_rate_cfg(u32 maxrate, u32 *exp,
104 u32 *mantissa, u32 *div_exp)
108 /* Rate calculation by hardware
110 * PIR_ADD = ((256 + mantissa) << exp) / 256
111 * rate = (2 * PIR_ADD) / ( 1 << div_exp)
112 * The resultant rate is in Mbps.
115 /* 2Mbps to 100Gbps can be expressed with div_exp = 0.
116 * Setting this to '0' will ease the calculation of
117 * exponent and mantissa.
122 *exp = ilog2(maxrate) ? ilog2(maxrate) - 1 : 0;
123 tmp = maxrate - rounddown_pow_of_two(maxrate);
124 if (maxrate < MAX_RATE_MANTISSA)
127 *mantissa = tmp / (1ULL << (*exp - 7));
129 /* Instead of disabling rate limiting, set all values to max */
130 *exp = MAX_RATE_EXPONENT;
131 *mantissa = MAX_RATE_MANTISSA;
135 static int otx2_set_matchall_egress_rate(struct otx2_nic *nic, u32 burst, u32 maxrate)
137 struct otx2_hw *hw = &nic->hw;
138 struct nix_txschq_config *req;
139 u32 burst_exp, burst_mantissa;
140 u32 exp, mantissa, div_exp;
143 /* All SQs share the same TL4, so pick the first scheduler */
144 txschq = hw->txschq_list[NIX_TXSCH_LVL_TL4][0];
146 /* Get exponent and mantissa values from the desired rate */
147 otx2_get_egress_burst_cfg(burst, &burst_exp, &burst_mantissa);
148 otx2_get_egress_rate_cfg(maxrate, &exp, &mantissa, &div_exp);
150 mutex_lock(&nic->mbox.lock);
151 req = otx2_mbox_alloc_msg_nix_txschq_cfg(&nic->mbox);
153 mutex_unlock(&nic->mbox.lock);
157 req->lvl = NIX_TXSCH_LVL_TL4;
159 req->reg[0] = NIX_AF_TL4X_PIR(txschq);
160 req->regval[0] = FIELD_PREP(TLX_BURST_EXPONENT, burst_exp) |
161 FIELD_PREP(TLX_BURST_MANTISSA, burst_mantissa) |
162 FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) |
163 FIELD_PREP(TLX_RATE_EXPONENT, exp) |
164 FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0);
166 err = otx2_sync_mbox_msg(&nic->mbox);
167 mutex_unlock(&nic->mbox.lock);
171 static int otx2_tc_validate_flow(struct otx2_nic *nic,
172 struct flow_action *actions,
173 struct netlink_ext_ack *extack)
175 if (nic->flags & OTX2_FLAG_INTF_DOWN) {
176 NL_SET_ERR_MSG_MOD(extack, "Interface not initialized");
180 if (!flow_action_has_entries(actions)) {
181 NL_SET_ERR_MSG_MOD(extack, "MATCHALL offload called with no action");
185 if (!flow_offload_has_one_action(actions)) {
186 NL_SET_ERR_MSG_MOD(extack,
187 "Egress MATCHALL offload supports only 1 policing action");
193 static int otx2_policer_validate(const struct flow_action *action,
194 const struct flow_action_entry *act,
195 struct netlink_ext_ack *extack)
197 if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
198 NL_SET_ERR_MSG_MOD(extack,
199 "Offload not supported when exceed action is not drop");
203 if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
204 act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
205 NL_SET_ERR_MSG_MOD(extack,
206 "Offload not supported when conform action is not pipe or ok");
210 if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
211 !flow_action_is_last_entry(action, act)) {
212 NL_SET_ERR_MSG_MOD(extack,
213 "Offload not supported when conform action is ok, but action is not last");
217 if (act->police.peakrate_bytes_ps ||
218 act->police.avrate || act->police.overhead) {
219 NL_SET_ERR_MSG_MOD(extack,
220 "Offload not supported when peakrate/avrate/overhead is configured");
227 static int otx2_tc_egress_matchall_install(struct otx2_nic *nic,
228 struct tc_cls_matchall_offload *cls)
230 struct netlink_ext_ack *extack = cls->common.extack;
231 struct flow_action *actions = &cls->rule->action;
232 struct flow_action_entry *entry;
236 err = otx2_tc_validate_flow(nic, actions, extack);
240 if (nic->flags & OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED) {
241 NL_SET_ERR_MSG_MOD(extack,
242 "Only one Egress MATCHALL ratelimiter can be offloaded");
246 entry = &cls->rule->action.entries[0];
248 case FLOW_ACTION_POLICE:
249 err = otx2_policer_validate(&cls->rule->action, entry, extack);
253 if (entry->police.rate_pkt_ps) {
254 NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second");
257 /* Convert bytes per second to Mbps */
258 rate = entry->police.rate_bytes_ps * 8;
259 rate = max_t(u32, rate / 1000000, 1);
260 err = otx2_set_matchall_egress_rate(nic, entry->police.burst, rate);
263 nic->flags |= OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED;
266 NL_SET_ERR_MSG_MOD(extack,
267 "Only police action is supported with Egress MATCHALL offload");
274 static int otx2_tc_egress_matchall_delete(struct otx2_nic *nic,
275 struct tc_cls_matchall_offload *cls)
277 struct netlink_ext_ack *extack = cls->common.extack;
280 if (nic->flags & OTX2_FLAG_INTF_DOWN) {
281 NL_SET_ERR_MSG_MOD(extack, "Interface not initialized");
285 err = otx2_set_matchall_egress_rate(nic, 0, 0);
286 nic->flags &= ~OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED;
290 static int otx2_tc_act_set_police(struct otx2_nic *nic,
291 struct otx2_tc_flow *node,
292 struct flow_cls_offload *f,
293 u64 rate, u32 burst, u32 mark,
294 struct npc_install_flow_req *req, bool pps)
296 struct netlink_ext_ack *extack = f->common.extack;
297 struct otx2_hw *hw = &nic->hw;
300 rq_idx = find_first_zero_bit(&nic->rq_bmap, hw->rx_queues);
301 if (rq_idx >= hw->rx_queues) {
302 NL_SET_ERR_MSG_MOD(extack, "Police action rules exceeded");
306 mutex_lock(&nic->mbox.lock);
308 rc = cn10k_alloc_leaf_profile(nic, &node->leaf_profile);
310 mutex_unlock(&nic->mbox.lock);
314 rc = cn10k_set_ipolicer_rate(nic, node->leaf_profile, burst, rate, pps);
318 rc = cn10k_map_unmap_rq_policer(nic, rq_idx, node->leaf_profile, true);
322 mutex_unlock(&nic->mbox.lock);
324 req->match_id = mark & 0xFFFFULL;
326 req->op = NIX_RX_ACTIONOP_UCAST;
327 set_bit(rq_idx, &nic->rq_bmap);
328 node->is_act_police = true;
334 if (cn10k_free_leaf_profile(nic, node->leaf_profile))
335 netdev_err(nic->netdev,
336 "Unable to free leaf bandwidth profile(%d)\n",
338 mutex_unlock(&nic->mbox.lock);
342 static int otx2_tc_parse_actions(struct otx2_nic *nic,
343 struct flow_action *flow_action,
344 struct npc_install_flow_req *req,
345 struct flow_cls_offload *f,
346 struct otx2_tc_flow *node)
348 struct netlink_ext_ack *extack = f->common.extack;
349 struct flow_action_entry *act;
350 struct net_device *target;
351 struct otx2_nic *priv;
359 if (!flow_action_has_entries(flow_action)) {
360 NL_SET_ERR_MSG_MOD(extack, "no tc actions specified");
364 flow_action_for_each(i, act, flow_action) {
366 case FLOW_ACTION_DROP:
367 req->op = NIX_RX_ACTIONOP_DROP;
369 case FLOW_ACTION_ACCEPT:
370 req->op = NIX_RX_ACTION_DEFAULT;
372 case FLOW_ACTION_REDIRECT_INGRESS:
374 priv = netdev_priv(target);
375 /* npc_install_flow_req doesn't support passing a target pcifunc */
376 if (rvu_get_pf(nic->pcifunc) != rvu_get_pf(priv->pcifunc)) {
377 NL_SET_ERR_MSG_MOD(extack,
378 "can't redirect to other pf/vf");
381 req->vf = priv->pcifunc & RVU_PFVF_FUNC_MASK;
382 req->op = NIX_RX_ACTION_DEFAULT;
384 case FLOW_ACTION_VLAN_POP:
385 req->vtag0_valid = true;
386 /* use RX_VTAG_TYPE7 which is initialized to strip vlan tag */
387 req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE7;
389 case FLOW_ACTION_POLICE:
390 /* Ingress ratelimiting is not supported on OcteonTx2 */
391 if (is_dev_otx2(nic->pdev)) {
392 NL_SET_ERR_MSG_MOD(extack,
393 "Ingress policing not supported on this platform");
397 err = otx2_policer_validate(flow_action, act, extack);
401 if (act->police.rate_bytes_ps > 0) {
402 rate = act->police.rate_bytes_ps * 8;
403 burst = act->police.burst;
404 } else if (act->police.rate_pkt_ps > 0) {
405 /* The algorithm used to calculate rate
406 * mantissa, exponent values for a given token
407 * rate (token can be byte or packet) requires
408 * token rate to be mutiplied by 8.
410 rate = act->police.rate_pkt_ps * 8;
411 burst = act->police.burst_pkt;
416 case FLOW_ACTION_MARK:
425 NL_SET_ERR_MSG_MOD(extack,
426 "rate limit police offload requires a single action");
431 return otx2_tc_act_set_police(nic, node, f, rate, burst,
437 static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
438 struct flow_cls_offload *f,
439 struct npc_install_flow_req *req)
441 struct netlink_ext_ack *extack = f->common.extack;
442 struct flow_msg *flow_spec = &req->packet;
443 struct flow_msg *flow_mask = &req->mask;
444 struct flow_dissector *dissector;
445 struct flow_rule *rule;
448 rule = flow_cls_offload_flow_rule(f);
449 dissector = rule->match.dissector;
451 if ((dissector->used_keys &
452 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
453 BIT(FLOW_DISSECTOR_KEY_BASIC) |
454 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
455 BIT(FLOW_DISSECTOR_KEY_VLAN) |
456 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
457 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
458 BIT(FLOW_DISSECTOR_KEY_PORTS) |
459 BIT(FLOW_DISSECTOR_KEY_IP)))) {
460 netdev_info(nic->netdev, "unsupported flow used key 0x%x",
461 dissector->used_keys);
465 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
466 struct flow_match_basic match;
468 flow_rule_match_basic(rule, &match);
470 /* All EtherTypes can be matched, no hw limitation */
471 flow_spec->etype = match.key->n_proto;
472 flow_mask->etype = match.mask->n_proto;
473 req->features |= BIT_ULL(NPC_ETYPE);
475 if (match.mask->ip_proto &&
476 (match.key->ip_proto != IPPROTO_TCP &&
477 match.key->ip_proto != IPPROTO_UDP &&
478 match.key->ip_proto != IPPROTO_SCTP &&
479 match.key->ip_proto != IPPROTO_ICMP &&
480 match.key->ip_proto != IPPROTO_ICMPV6)) {
481 netdev_info(nic->netdev,
482 "ip_proto=0x%x not supported\n",
483 match.key->ip_proto);
486 if (match.mask->ip_proto)
487 ip_proto = match.key->ip_proto;
489 if (ip_proto == IPPROTO_UDP)
490 req->features |= BIT_ULL(NPC_IPPROTO_UDP);
491 else if (ip_proto == IPPROTO_TCP)
492 req->features |= BIT_ULL(NPC_IPPROTO_TCP);
493 else if (ip_proto == IPPROTO_SCTP)
494 req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
495 else if (ip_proto == IPPROTO_ICMP)
496 req->features |= BIT_ULL(NPC_IPPROTO_ICMP);
497 else if (ip_proto == IPPROTO_ICMPV6)
498 req->features |= BIT_ULL(NPC_IPPROTO_ICMP6);
501 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
502 struct flow_match_eth_addrs match;
504 flow_rule_match_eth_addrs(rule, &match);
505 if (!is_zero_ether_addr(match.mask->src)) {
506 NL_SET_ERR_MSG_MOD(extack, "src mac match not supported");
510 if (!is_zero_ether_addr(match.mask->dst)) {
511 ether_addr_copy(flow_spec->dmac, (u8 *)&match.key->dst);
512 ether_addr_copy(flow_mask->dmac,
513 (u8 *)&match.mask->dst);
514 req->features |= BIT_ULL(NPC_DMAC);
518 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
519 struct flow_match_ip match;
521 flow_rule_match_ip(rule, &match);
522 if ((ntohs(flow_spec->etype) != ETH_P_IP) &&
524 NL_SET_ERR_MSG_MOD(extack, "tos not supported");
527 if (match.mask->ttl) {
528 NL_SET_ERR_MSG_MOD(extack, "ttl not supported");
531 flow_spec->tos = match.key->tos;
532 flow_mask->tos = match.mask->tos;
533 req->features |= BIT_ULL(NPC_TOS);
536 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
537 struct flow_match_vlan match;
538 u16 vlan_tci, vlan_tci_mask;
540 flow_rule_match_vlan(rule, &match);
542 if (ntohs(match.key->vlan_tpid) != ETH_P_8021Q) {
543 netdev_err(nic->netdev, "vlan tpid 0x%x not supported\n",
544 ntohs(match.key->vlan_tpid));
548 if (match.mask->vlan_id ||
549 match.mask->vlan_dei ||
550 match.mask->vlan_priority) {
551 vlan_tci = match.key->vlan_id |
552 match.key->vlan_dei << 12 |
553 match.key->vlan_priority << 13;
555 vlan_tci_mask = match.mask->vlan_id |
556 match.mask->vlan_dei << 12 |
557 match.mask->vlan_priority << 13;
559 flow_spec->vlan_tci = htons(vlan_tci);
560 flow_mask->vlan_tci = htons(vlan_tci_mask);
561 req->features |= BIT_ULL(NPC_OUTER_VID);
565 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
566 struct flow_match_ipv4_addrs match;
568 flow_rule_match_ipv4_addrs(rule, &match);
570 flow_spec->ip4dst = match.key->dst;
571 flow_mask->ip4dst = match.mask->dst;
572 req->features |= BIT_ULL(NPC_DIP_IPV4);
574 flow_spec->ip4src = match.key->src;
575 flow_mask->ip4src = match.mask->src;
576 req->features |= BIT_ULL(NPC_SIP_IPV4);
577 } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
578 struct flow_match_ipv6_addrs match;
580 flow_rule_match_ipv6_addrs(rule, &match);
582 if (ipv6_addr_loopback(&match.key->dst) ||
583 ipv6_addr_loopback(&match.key->src)) {
584 NL_SET_ERR_MSG_MOD(extack,
585 "Flow matching IPv6 loopback addr not supported");
589 if (!ipv6_addr_any(&match.mask->dst)) {
590 memcpy(&flow_spec->ip6dst,
591 (struct in6_addr *)&match.key->dst,
592 sizeof(flow_spec->ip6dst));
593 memcpy(&flow_mask->ip6dst,
594 (struct in6_addr *)&match.mask->dst,
595 sizeof(flow_spec->ip6dst));
596 req->features |= BIT_ULL(NPC_DIP_IPV6);
599 if (!ipv6_addr_any(&match.mask->src)) {
600 memcpy(&flow_spec->ip6src,
601 (struct in6_addr *)&match.key->src,
602 sizeof(flow_spec->ip6src));
603 memcpy(&flow_mask->ip6src,
604 (struct in6_addr *)&match.mask->src,
605 sizeof(flow_spec->ip6src));
606 req->features |= BIT_ULL(NPC_SIP_IPV6);
610 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
611 struct flow_match_ports match;
613 flow_rule_match_ports(rule, &match);
615 flow_spec->dport = match.key->dst;
616 flow_mask->dport = match.mask->dst;
617 if (ip_proto == IPPROTO_UDP)
618 req->features |= BIT_ULL(NPC_DPORT_UDP);
619 else if (ip_proto == IPPROTO_TCP)
620 req->features |= BIT_ULL(NPC_DPORT_TCP);
621 else if (ip_proto == IPPROTO_SCTP)
622 req->features |= BIT_ULL(NPC_DPORT_SCTP);
624 flow_spec->sport = match.key->src;
625 flow_mask->sport = match.mask->src;
626 if (ip_proto == IPPROTO_UDP)
627 req->features |= BIT_ULL(NPC_SPORT_UDP);
628 else if (ip_proto == IPPROTO_TCP)
629 req->features |= BIT_ULL(NPC_SPORT_TCP);
630 else if (ip_proto == IPPROTO_SCTP)
631 req->features |= BIT_ULL(NPC_SPORT_SCTP);
634 return otx2_tc_parse_actions(nic, &rule->action, req, f, node);
637 static int otx2_del_mcam_flow_entry(struct otx2_nic *nic, u16 entry)
639 struct npc_delete_flow_req *req;
642 mutex_lock(&nic->mbox.lock);
643 req = otx2_mbox_alloc_msg_npc_delete_flow(&nic->mbox);
645 mutex_unlock(&nic->mbox.lock);
651 /* Send message to AF */
652 err = otx2_sync_mbox_msg(&nic->mbox);
654 netdev_err(nic->netdev, "Failed to delete MCAM flow entry %d\n",
656 mutex_unlock(&nic->mbox.lock);
659 mutex_unlock(&nic->mbox.lock);
664 static int otx2_tc_del_flow(struct otx2_nic *nic,
665 struct flow_cls_offload *tc_flow_cmd)
667 struct otx2_flow_config *flow_cfg = nic->flow_cfg;
668 struct otx2_tc_info *tc_info = &nic->tc_info;
669 struct otx2_tc_flow *flow_node;
672 flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
673 &tc_flow_cmd->cookie,
674 tc_info->flow_ht_params);
676 netdev_err(nic->netdev, "tc flow not found for cookie 0x%lx\n",
677 tc_flow_cmd->cookie);
681 if (flow_node->is_act_police) {
682 mutex_lock(&nic->mbox.lock);
684 err = cn10k_map_unmap_rq_policer(nic, flow_node->rq,
685 flow_node->leaf_profile, false);
687 netdev_err(nic->netdev,
688 "Unmapping RQ %d & profile %d failed\n",
689 flow_node->rq, flow_node->leaf_profile);
691 err = cn10k_free_leaf_profile(nic, flow_node->leaf_profile);
693 netdev_err(nic->netdev,
694 "Unable to free leaf bandwidth profile(%d)\n",
695 flow_node->leaf_profile);
697 __clear_bit(flow_node->rq, &nic->rq_bmap);
699 mutex_unlock(&nic->mbox.lock);
702 otx2_del_mcam_flow_entry(nic, flow_node->entry);
704 WARN_ON(rhashtable_remove_fast(&nic->tc_info.flow_table,
706 nic->tc_info.flow_ht_params));
707 kfree_rcu(flow_node, rcu);
709 clear_bit(flow_node->bitpos, tc_info->tc_entries_bitmap);
710 flow_cfg->nr_flows--;
715 static int otx2_tc_add_flow(struct otx2_nic *nic,
716 struct flow_cls_offload *tc_flow_cmd)
718 struct netlink_ext_ack *extack = tc_flow_cmd->common.extack;
719 struct otx2_flow_config *flow_cfg = nic->flow_cfg;
720 struct otx2_tc_info *tc_info = &nic->tc_info;
721 struct otx2_tc_flow *new_node, *old_node;
722 struct npc_install_flow_req *req, dummy;
725 if (!(nic->flags & OTX2_FLAG_TC_FLOWER_SUPPORT))
728 if (bitmap_full(tc_info->tc_entries_bitmap, flow_cfg->max_flows)) {
729 NL_SET_ERR_MSG_MOD(extack,
730 "Free MCAM entry not available to add the flow");
734 /* allocate memory for the new flow and it's node */
735 new_node = kzalloc(sizeof(*new_node), GFP_KERNEL);
738 spin_lock_init(&new_node->lock);
739 new_node->cookie = tc_flow_cmd->cookie;
741 memset(&dummy, 0, sizeof(struct npc_install_flow_req));
743 rc = otx2_tc_prepare_flow(nic, new_node, tc_flow_cmd, &dummy);
745 kfree_rcu(new_node, rcu);
749 /* If a flow exists with the same cookie, delete it */
750 old_node = rhashtable_lookup_fast(&tc_info->flow_table,
751 &tc_flow_cmd->cookie,
752 tc_info->flow_ht_params);
754 otx2_tc_del_flow(nic, tc_flow_cmd);
756 mutex_lock(&nic->mbox.lock);
757 req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox);
759 mutex_unlock(&nic->mbox.lock);
764 memcpy(&dummy.hdr, &req->hdr, sizeof(struct mbox_msghdr));
765 memcpy(req, &dummy, sizeof(struct npc_install_flow_req));
767 new_node->bitpos = find_first_zero_bit(tc_info->tc_entries_bitmap,
768 flow_cfg->max_flows);
769 req->channel = nic->hw.rx_chan_base;
770 req->entry = flow_cfg->flow_ent[flow_cfg->max_flows - new_node->bitpos - 1];
771 req->intf = NIX_INTF_RX;
773 new_node->entry = req->entry;
775 /* Send message to AF */
776 rc = otx2_sync_mbox_msg(&nic->mbox);
778 NL_SET_ERR_MSG_MOD(extack, "Failed to install MCAM flow entry");
779 mutex_unlock(&nic->mbox.lock);
780 kfree_rcu(new_node, rcu);
783 mutex_unlock(&nic->mbox.lock);
785 /* add new flow to flow-table */
786 rc = rhashtable_insert_fast(&nic->tc_info.flow_table, &new_node->node,
787 nic->tc_info.flow_ht_params);
789 otx2_del_mcam_flow_entry(nic, req->entry);
790 kfree_rcu(new_node, rcu);
794 set_bit(new_node->bitpos, tc_info->tc_entries_bitmap);
795 flow_cfg->nr_flows++;
800 if (new_node->is_act_police) {
801 mutex_lock(&nic->mbox.lock);
803 err = cn10k_map_unmap_rq_policer(nic, new_node->rq,
804 new_node->leaf_profile, false);
806 netdev_err(nic->netdev,
807 "Unmapping RQ %d & profile %d failed\n",
808 new_node->rq, new_node->leaf_profile);
809 err = cn10k_free_leaf_profile(nic, new_node->leaf_profile);
811 netdev_err(nic->netdev,
812 "Unable to free leaf bandwidth profile(%d)\n",
813 new_node->leaf_profile);
815 __clear_bit(new_node->rq, &nic->rq_bmap);
817 mutex_unlock(&nic->mbox.lock);
823 static int otx2_tc_get_flow_stats(struct otx2_nic *nic,
824 struct flow_cls_offload *tc_flow_cmd)
826 struct otx2_tc_info *tc_info = &nic->tc_info;
827 struct npc_mcam_get_stats_req *req;
828 struct npc_mcam_get_stats_rsp *rsp;
829 struct otx2_tc_flow_stats *stats;
830 struct otx2_tc_flow *flow_node;
833 flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
834 &tc_flow_cmd->cookie,
835 tc_info->flow_ht_params);
837 netdev_info(nic->netdev, "tc flow not found for cookie %lx",
838 tc_flow_cmd->cookie);
842 mutex_lock(&nic->mbox.lock);
844 req = otx2_mbox_alloc_msg_npc_mcam_entry_stats(&nic->mbox);
846 mutex_unlock(&nic->mbox.lock);
850 req->entry = flow_node->entry;
852 err = otx2_sync_mbox_msg(&nic->mbox);
854 netdev_err(nic->netdev, "Failed to get stats for MCAM flow entry %d\n",
856 mutex_unlock(&nic->mbox.lock);
860 rsp = (struct npc_mcam_get_stats_rsp *)otx2_mbox_get_rsp
861 (&nic->mbox.mbox, 0, &req->hdr);
863 mutex_unlock(&nic->mbox.lock);
867 mutex_unlock(&nic->mbox.lock);
872 stats = &flow_node->stats;
874 spin_lock(&flow_node->lock);
875 flow_stats_update(&tc_flow_cmd->stats, 0x0, rsp->stat - stats->pkts, 0x0, 0x0,
876 FLOW_ACTION_HW_STATS_IMMEDIATE);
877 stats->pkts = rsp->stat;
878 spin_unlock(&flow_node->lock);
883 static int otx2_setup_tc_cls_flower(struct otx2_nic *nic,
884 struct flow_cls_offload *cls_flower)
886 switch (cls_flower->command) {
887 case FLOW_CLS_REPLACE:
888 return otx2_tc_add_flow(nic, cls_flower);
889 case FLOW_CLS_DESTROY:
890 return otx2_tc_del_flow(nic, cls_flower);
892 return otx2_tc_get_flow_stats(nic, cls_flower);
898 static int otx2_tc_ingress_matchall_install(struct otx2_nic *nic,
899 struct tc_cls_matchall_offload *cls)
901 struct netlink_ext_ack *extack = cls->common.extack;
902 struct flow_action *actions = &cls->rule->action;
903 struct flow_action_entry *entry;
907 err = otx2_tc_validate_flow(nic, actions, extack);
911 if (nic->flags & OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED) {
912 NL_SET_ERR_MSG_MOD(extack,
913 "Only one ingress MATCHALL ratelimitter can be offloaded");
917 entry = &cls->rule->action.entries[0];
919 case FLOW_ACTION_POLICE:
920 /* Ingress ratelimiting is not supported on OcteonTx2 */
921 if (is_dev_otx2(nic->pdev)) {
922 NL_SET_ERR_MSG_MOD(extack,
923 "Ingress policing not supported on this platform");
927 err = cn10k_alloc_matchall_ipolicer(nic);
931 /* Convert to bits per second */
932 rate = entry->police.rate_bytes_ps * 8;
933 err = cn10k_set_matchall_ipolicer_rate(nic, entry->police.burst, rate);
936 nic->flags |= OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED;
939 NL_SET_ERR_MSG_MOD(extack,
940 "Only police action supported with Ingress MATCHALL offload");
947 static int otx2_tc_ingress_matchall_delete(struct otx2_nic *nic,
948 struct tc_cls_matchall_offload *cls)
950 struct netlink_ext_ack *extack = cls->common.extack;
953 if (nic->flags & OTX2_FLAG_INTF_DOWN) {
954 NL_SET_ERR_MSG_MOD(extack, "Interface not initialized");
958 err = cn10k_free_matchall_ipolicer(nic);
959 nic->flags &= ~OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED;
963 static int otx2_setup_tc_ingress_matchall(struct otx2_nic *nic,
964 struct tc_cls_matchall_offload *cls_matchall)
966 switch (cls_matchall->command) {
967 case TC_CLSMATCHALL_REPLACE:
968 return otx2_tc_ingress_matchall_install(nic, cls_matchall);
969 case TC_CLSMATCHALL_DESTROY:
970 return otx2_tc_ingress_matchall_delete(nic, cls_matchall);
971 case TC_CLSMATCHALL_STATS:
979 static int otx2_setup_tc_block_ingress_cb(enum tc_setup_type type,
980 void *type_data, void *cb_priv)
982 struct otx2_nic *nic = cb_priv;
984 if (!tc_cls_can_offload_and_chain0(nic->netdev, type_data))
988 case TC_SETUP_CLSFLOWER:
989 return otx2_setup_tc_cls_flower(nic, type_data);
990 case TC_SETUP_CLSMATCHALL:
991 return otx2_setup_tc_ingress_matchall(nic, type_data);
999 static int otx2_setup_tc_egress_matchall(struct otx2_nic *nic,
1000 struct tc_cls_matchall_offload *cls_matchall)
1002 switch (cls_matchall->command) {
1003 case TC_CLSMATCHALL_REPLACE:
1004 return otx2_tc_egress_matchall_install(nic, cls_matchall);
1005 case TC_CLSMATCHALL_DESTROY:
1006 return otx2_tc_egress_matchall_delete(nic, cls_matchall);
1007 case TC_CLSMATCHALL_STATS:
1015 static int otx2_setup_tc_block_egress_cb(enum tc_setup_type type,
1016 void *type_data, void *cb_priv)
1018 struct otx2_nic *nic = cb_priv;
1020 if (!tc_cls_can_offload_and_chain0(nic->netdev, type_data))
1024 case TC_SETUP_CLSMATCHALL:
1025 return otx2_setup_tc_egress_matchall(nic, type_data);
1033 static LIST_HEAD(otx2_block_cb_list);
1035 static int otx2_setup_tc_block(struct net_device *netdev,
1036 struct flow_block_offload *f)
1038 struct otx2_nic *nic = netdev_priv(netdev);
1039 flow_setup_cb_t *cb;
1042 if (f->block_shared)
1045 if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) {
1046 cb = otx2_setup_tc_block_ingress_cb;
1048 } else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) {
1049 cb = otx2_setup_tc_block_egress_cb;
1055 return flow_block_cb_setup_simple(f, &otx2_block_cb_list, cb,
1059 int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type,
1063 case TC_SETUP_BLOCK:
1064 return otx2_setup_tc_block(netdev, type_data);
1069 EXPORT_SYMBOL(otx2_setup_tc);
1071 static const struct rhashtable_params tc_flow_ht_params = {
1072 .head_offset = offsetof(struct otx2_tc_flow, node),
1073 .key_offset = offsetof(struct otx2_tc_flow, cookie),
1074 .key_len = sizeof(((struct otx2_tc_flow *)0)->cookie),
1075 .automatic_shrinking = true,
1078 int otx2_init_tc(struct otx2_nic *nic)
1080 struct otx2_tc_info *tc = &nic->tc_info;
1083 /* Exclude receive queue 0 being used for police action */
1084 set_bit(0, &nic->rq_bmap);
1086 if (!nic->flow_cfg) {
1087 netdev_err(nic->netdev,
1088 "Can't init TC, nic->flow_cfg is not setup\n");
1092 err = otx2_tc_alloc_ent_bitmap(nic);
1096 tc->flow_ht_params = tc_flow_ht_params;
1097 return rhashtable_init(&tc->flow_table, &tc->flow_ht_params);
1099 EXPORT_SYMBOL(otx2_init_tc);
1101 void otx2_shutdown_tc(struct otx2_nic *nic)
1103 struct otx2_tc_info *tc = &nic->tc_info;
1105 kfree(tc->tc_entries_bitmap);
1106 rhashtable_destroy(&tc->flow_table);
1108 EXPORT_SYMBOL(otx2_shutdown_tc);