1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Physcial Function ethernet driver
4 * Copyright (C) 2021 Marvell.
6 #include <linux/netdevice.h>
7 #include <linux/etherdevice.h>
8 #include <linux/inetdevice.h>
9 #include <linux/rhashtable.h>
10 #include <linux/bitfield.h>
11 #include <net/flow_dissector.h>
12 #include <net/pkt_cls.h>
13 #include <net/tc_act/tc_gact.h>
14 #include <net/tc_act/tc_mirred.h>
15 #include <net/tc_act/tc_vlan.h>
18 #include "otx2_common.h"
20 /* Egress rate limiting definitions */
21 #define MAX_BURST_EXPONENT 0x0FULL
22 #define MAX_BURST_MANTISSA 0xFFULL
23 #define MAX_BURST_SIZE 130816ULL
24 #define MAX_RATE_DIVIDER_EXPONENT 12ULL
25 #define MAX_RATE_EXPONENT 0x0FULL
26 #define MAX_RATE_MANTISSA 0xFFULL
28 /* Bitfields in NIX_TLX_PIR register */
29 #define TLX_RATE_MANTISSA GENMASK_ULL(8, 1)
30 #define TLX_RATE_EXPONENT GENMASK_ULL(12, 9)
31 #define TLX_RATE_DIVIDER_EXPONENT GENMASK_ULL(16, 13)
32 #define TLX_BURST_MANTISSA GENMASK_ULL(36, 29)
33 #define TLX_BURST_EXPONENT GENMASK_ULL(40, 37)
35 struct otx2_tc_flow_stats {
42 struct rhash_head node;
47 struct otx2_tc_flow_stats stats;
48 spinlock_t lock; /* lock for stats */
51 static void otx2_get_egress_burst_cfg(u32 burst, u32 *burst_exp,
56 /* Burst is calculated as
57 * ((256 + BURST_MANTISSA) << (1 + BURST_EXPONENT)) / 256
58 * Max supported burst size is 130,816 bytes.
60 burst = min_t(u32, burst, MAX_BURST_SIZE);
62 *burst_exp = ilog2(burst) ? ilog2(burst) - 1 : 0;
63 tmp = burst - rounddown_pow_of_two(burst);
64 if (burst < MAX_BURST_MANTISSA)
65 *burst_mantissa = tmp * 2;
67 *burst_mantissa = tmp / (1ULL << (*burst_exp - 7));
69 *burst_exp = MAX_BURST_EXPONENT;
70 *burst_mantissa = MAX_BURST_MANTISSA;
74 static void otx2_get_egress_rate_cfg(u32 maxrate, u32 *exp,
75 u32 *mantissa, u32 *div_exp)
79 /* Rate calculation by hardware
81 * PIR_ADD = ((256 + mantissa) << exp) / 256
82 * rate = (2 * PIR_ADD) / ( 1 << div_exp)
83 * The resultant rate is in Mbps.
86 /* 2Mbps to 100Gbps can be expressed with div_exp = 0.
87 * Setting this to '0' will ease the calculation of
88 * exponent and mantissa.
93 *exp = ilog2(maxrate) ? ilog2(maxrate) - 1 : 0;
94 tmp = maxrate - rounddown_pow_of_two(maxrate);
95 if (maxrate < MAX_RATE_MANTISSA)
98 *mantissa = tmp / (1ULL << (*exp - 7));
100 /* Instead of disabling rate limiting, set all values to max */
101 *exp = MAX_RATE_EXPONENT;
102 *mantissa = MAX_RATE_MANTISSA;
106 static int otx2_set_matchall_egress_rate(struct otx2_nic *nic, u32 burst, u32 maxrate)
108 struct otx2_hw *hw = &nic->hw;
109 struct nix_txschq_config *req;
110 u32 burst_exp, burst_mantissa;
111 u32 exp, mantissa, div_exp;
114 /* All SQs share the same TL4, so pick the first scheduler */
115 txschq = hw->txschq_list[NIX_TXSCH_LVL_TL4][0];
117 /* Get exponent and mantissa values from the desired rate */
118 otx2_get_egress_burst_cfg(burst, &burst_exp, &burst_mantissa);
119 otx2_get_egress_rate_cfg(maxrate, &exp, &mantissa, &div_exp);
121 mutex_lock(&nic->mbox.lock);
122 req = otx2_mbox_alloc_msg_nix_txschq_cfg(&nic->mbox);
124 mutex_unlock(&nic->mbox.lock);
128 req->lvl = NIX_TXSCH_LVL_TL4;
130 req->reg[0] = NIX_AF_TL4X_PIR(txschq);
131 req->regval[0] = FIELD_PREP(TLX_BURST_EXPONENT, burst_exp) |
132 FIELD_PREP(TLX_BURST_MANTISSA, burst_mantissa) |
133 FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) |
134 FIELD_PREP(TLX_RATE_EXPONENT, exp) |
135 FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0);
137 err = otx2_sync_mbox_msg(&nic->mbox);
138 mutex_unlock(&nic->mbox.lock);
142 static int otx2_tc_validate_flow(struct otx2_nic *nic,
143 struct flow_action *actions,
144 struct netlink_ext_ack *extack)
146 if (nic->flags & OTX2_FLAG_INTF_DOWN) {
147 NL_SET_ERR_MSG_MOD(extack, "Interface not initialized");
151 if (!flow_action_has_entries(actions)) {
152 NL_SET_ERR_MSG_MOD(extack, "MATCHALL offload called with no action");
156 if (!flow_offload_has_one_action(actions)) {
157 NL_SET_ERR_MSG_MOD(extack,
158 "Egress MATCHALL offload supports only 1 policing action");
164 static int otx2_tc_egress_matchall_install(struct otx2_nic *nic,
165 struct tc_cls_matchall_offload *cls)
167 struct netlink_ext_ack *extack = cls->common.extack;
168 struct flow_action *actions = &cls->rule->action;
169 struct flow_action_entry *entry;
173 err = otx2_tc_validate_flow(nic, actions, extack);
177 if (nic->flags & OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED) {
178 NL_SET_ERR_MSG_MOD(extack,
179 "Only one Egress MATCHALL ratelimitter can be offloaded");
183 entry = &cls->rule->action.entries[0];
185 case FLOW_ACTION_POLICE:
186 if (entry->police.rate_pkt_ps) {
187 NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second");
190 /* Convert bytes per second to Mbps */
191 rate = entry->police.rate_bytes_ps * 8;
192 rate = max_t(u32, rate / 1000000, 1);
193 err = otx2_set_matchall_egress_rate(nic, entry->police.burst, rate);
196 nic->flags |= OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED;
199 NL_SET_ERR_MSG_MOD(extack,
200 "Only police action is supported with Egress MATCHALL offload");
207 static int otx2_tc_egress_matchall_delete(struct otx2_nic *nic,
208 struct tc_cls_matchall_offload *cls)
210 struct netlink_ext_ack *extack = cls->common.extack;
213 if (nic->flags & OTX2_FLAG_INTF_DOWN) {
214 NL_SET_ERR_MSG_MOD(extack, "Interface not initialized");
218 err = otx2_set_matchall_egress_rate(nic, 0, 0);
219 nic->flags &= ~OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED;
223 static int otx2_tc_parse_actions(struct otx2_nic *nic,
224 struct flow_action *flow_action,
225 struct npc_install_flow_req *req)
227 struct flow_action_entry *act;
228 struct net_device *target;
229 struct otx2_nic *priv;
232 if (!flow_action_has_entries(flow_action)) {
233 netdev_info(nic->netdev, "no tc actions specified");
237 flow_action_for_each(i, act, flow_action) {
239 case FLOW_ACTION_DROP:
240 req->op = NIX_RX_ACTIONOP_DROP;
242 case FLOW_ACTION_ACCEPT:
243 req->op = NIX_RX_ACTION_DEFAULT;
245 case FLOW_ACTION_REDIRECT_INGRESS:
247 priv = netdev_priv(target);
248 /* npc_install_flow_req doesn't support passing a target pcifunc */
249 if (rvu_get_pf(nic->pcifunc) != rvu_get_pf(priv->pcifunc)) {
250 netdev_info(nic->netdev,
251 "can't redirect to other pf/vf\n");
254 req->vf = priv->pcifunc & RVU_PFVF_FUNC_MASK;
255 req->op = NIX_RX_ACTION_DEFAULT;
257 case FLOW_ACTION_VLAN_POP:
258 req->vtag0_valid = true;
259 /* use RX_VTAG_TYPE7 which is initialized to strip vlan tag */
260 req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE7;
270 static int otx2_tc_prepare_flow(struct otx2_nic *nic,
271 struct flow_cls_offload *f,
272 struct npc_install_flow_req *req)
274 struct flow_msg *flow_spec = &req->packet;
275 struct flow_msg *flow_mask = &req->mask;
276 struct flow_dissector *dissector;
277 struct flow_rule *rule;
280 rule = flow_cls_offload_flow_rule(f);
281 dissector = rule->match.dissector;
283 if ((dissector->used_keys &
284 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
285 BIT(FLOW_DISSECTOR_KEY_BASIC) |
286 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
287 BIT(FLOW_DISSECTOR_KEY_VLAN) |
288 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
289 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
290 BIT(FLOW_DISSECTOR_KEY_PORTS) |
291 BIT(FLOW_DISSECTOR_KEY_IP)))) {
292 netdev_info(nic->netdev, "unsupported flow used key 0x%x",
293 dissector->used_keys);
297 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
298 struct flow_match_basic match;
300 flow_rule_match_basic(rule, &match);
302 /* All EtherTypes can be matched, no hw limitation */
303 flow_spec->etype = match.key->n_proto;
304 flow_mask->etype = match.mask->n_proto;
305 req->features |= BIT_ULL(NPC_ETYPE);
307 if (match.mask->ip_proto &&
308 (match.key->ip_proto != IPPROTO_TCP &&
309 match.key->ip_proto != IPPROTO_UDP &&
310 match.key->ip_proto != IPPROTO_SCTP &&
311 match.key->ip_proto != IPPROTO_ICMP &&
312 match.key->ip_proto != IPPROTO_ICMPV6)) {
313 netdev_info(nic->netdev,
314 "ip_proto=0x%x not supported\n",
315 match.key->ip_proto);
318 if (match.mask->ip_proto)
319 ip_proto = match.key->ip_proto;
321 if (ip_proto == IPPROTO_UDP)
322 req->features |= BIT_ULL(NPC_IPPROTO_UDP);
323 else if (ip_proto == IPPROTO_TCP)
324 req->features |= BIT_ULL(NPC_IPPROTO_TCP);
325 else if (ip_proto == IPPROTO_SCTP)
326 req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
327 else if (ip_proto == IPPROTO_ICMP)
328 req->features |= BIT_ULL(NPC_IPPROTO_ICMP);
329 else if (ip_proto == IPPROTO_ICMPV6)
330 req->features |= BIT_ULL(NPC_IPPROTO_ICMP6);
333 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
334 struct flow_match_eth_addrs match;
336 flow_rule_match_eth_addrs(rule, &match);
337 if (!is_zero_ether_addr(match.mask->src)) {
338 netdev_err(nic->netdev, "src mac match not supported\n");
342 if (!is_zero_ether_addr(match.mask->dst)) {
343 ether_addr_copy(flow_spec->dmac, (u8 *)&match.key->dst);
344 ether_addr_copy(flow_mask->dmac,
345 (u8 *)&match.mask->dst);
346 req->features |= BIT_ULL(NPC_DMAC);
350 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
351 struct flow_match_ip match;
353 flow_rule_match_ip(rule, &match);
354 if ((ntohs(flow_spec->etype) != ETH_P_IP) &&
356 netdev_err(nic->netdev, "tos not supported\n");
359 if (match.mask->ttl) {
360 netdev_err(nic->netdev, "ttl not supported\n");
363 flow_spec->tos = match.key->tos;
364 flow_mask->tos = match.mask->tos;
365 req->features |= BIT_ULL(NPC_TOS);
368 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
369 struct flow_match_vlan match;
370 u16 vlan_tci, vlan_tci_mask;
372 flow_rule_match_vlan(rule, &match);
374 if (ntohs(match.key->vlan_tpid) != ETH_P_8021Q) {
375 netdev_err(nic->netdev, "vlan tpid 0x%x not supported\n",
376 ntohs(match.key->vlan_tpid));
380 if (match.mask->vlan_id ||
381 match.mask->vlan_dei ||
382 match.mask->vlan_priority) {
383 vlan_tci = match.key->vlan_id |
384 match.key->vlan_dei << 12 |
385 match.key->vlan_priority << 13;
387 vlan_tci_mask = match.mask->vlan_id |
388 match.key->vlan_dei << 12 |
389 match.key->vlan_priority << 13;
391 flow_spec->vlan_tci = htons(vlan_tci);
392 flow_mask->vlan_tci = htons(vlan_tci_mask);
393 req->features |= BIT_ULL(NPC_OUTER_VID);
397 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
398 struct flow_match_ipv4_addrs match;
400 flow_rule_match_ipv4_addrs(rule, &match);
402 flow_spec->ip4dst = match.key->dst;
403 flow_mask->ip4dst = match.mask->dst;
404 req->features |= BIT_ULL(NPC_DIP_IPV4);
406 flow_spec->ip4src = match.key->src;
407 flow_mask->ip4src = match.mask->src;
408 req->features |= BIT_ULL(NPC_SIP_IPV4);
409 } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
410 struct flow_match_ipv6_addrs match;
412 flow_rule_match_ipv6_addrs(rule, &match);
414 if (ipv6_addr_loopback(&match.key->dst) ||
415 ipv6_addr_loopback(&match.key->src)) {
416 netdev_err(nic->netdev,
417 "Flow matching on IPv6 loopback addr is not supported\n");
421 if (!ipv6_addr_any(&match.mask->dst)) {
422 memcpy(&flow_spec->ip6dst,
423 (struct in6_addr *)&match.key->dst,
424 sizeof(flow_spec->ip6dst));
425 memcpy(&flow_mask->ip6dst,
426 (struct in6_addr *)&match.mask->dst,
427 sizeof(flow_spec->ip6dst));
428 req->features |= BIT_ULL(NPC_DIP_IPV6);
431 if (!ipv6_addr_any(&match.mask->src)) {
432 memcpy(&flow_spec->ip6src,
433 (struct in6_addr *)&match.key->src,
434 sizeof(flow_spec->ip6src));
435 memcpy(&flow_mask->ip6src,
436 (struct in6_addr *)&match.mask->src,
437 sizeof(flow_spec->ip6src));
438 req->features |= BIT_ULL(NPC_SIP_IPV6);
442 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
443 struct flow_match_ports match;
445 flow_rule_match_ports(rule, &match);
447 flow_spec->dport = match.key->dst;
448 flow_mask->dport = match.mask->dst;
449 if (ip_proto == IPPROTO_UDP)
450 req->features |= BIT_ULL(NPC_DPORT_UDP);
451 else if (ip_proto == IPPROTO_TCP)
452 req->features |= BIT_ULL(NPC_DPORT_TCP);
453 else if (ip_proto == IPPROTO_SCTP)
454 req->features |= BIT_ULL(NPC_DPORT_SCTP);
456 flow_spec->sport = match.key->src;
457 flow_mask->sport = match.mask->src;
458 if (ip_proto == IPPROTO_UDP)
459 req->features |= BIT_ULL(NPC_SPORT_UDP);
460 else if (ip_proto == IPPROTO_TCP)
461 req->features |= BIT_ULL(NPC_SPORT_TCP);
462 else if (ip_proto == IPPROTO_SCTP)
463 req->features |= BIT_ULL(NPC_SPORT_SCTP);
466 return otx2_tc_parse_actions(nic, &rule->action, req);
469 static int otx2_del_mcam_flow_entry(struct otx2_nic *nic, u16 entry)
471 struct npc_delete_flow_req *req;
474 mutex_lock(&nic->mbox.lock);
475 req = otx2_mbox_alloc_msg_npc_delete_flow(&nic->mbox);
477 mutex_unlock(&nic->mbox.lock);
483 /* Send message to AF */
484 err = otx2_sync_mbox_msg(&nic->mbox);
486 netdev_err(nic->netdev, "Failed to delete MCAM flow entry %d\n",
488 mutex_unlock(&nic->mbox.lock);
491 mutex_unlock(&nic->mbox.lock);
496 static int otx2_tc_del_flow(struct otx2_nic *nic,
497 struct flow_cls_offload *tc_flow_cmd)
499 struct otx2_tc_info *tc_info = &nic->tc_info;
500 struct otx2_tc_flow *flow_node;
502 flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
503 &tc_flow_cmd->cookie,
504 tc_info->flow_ht_params);
506 netdev_err(nic->netdev, "tc flow not found for cookie 0x%lx\n",
507 tc_flow_cmd->cookie);
511 otx2_del_mcam_flow_entry(nic, flow_node->entry);
513 WARN_ON(rhashtable_remove_fast(&nic->tc_info.flow_table,
515 nic->tc_info.flow_ht_params));
516 kfree_rcu(flow_node, rcu);
518 clear_bit(flow_node->bitpos, tc_info->tc_entries_bitmap);
519 tc_info->num_entries--;
524 static int otx2_tc_add_flow(struct otx2_nic *nic,
525 struct flow_cls_offload *tc_flow_cmd)
527 struct otx2_tc_info *tc_info = &nic->tc_info;
528 struct otx2_tc_flow *new_node, *old_node;
529 struct npc_install_flow_req *req;
532 if (!(nic->flags & OTX2_FLAG_TC_FLOWER_SUPPORT))
535 /* allocate memory for the new flow and it's node */
536 new_node = kzalloc(sizeof(*new_node), GFP_KERNEL);
539 spin_lock_init(&new_node->lock);
540 new_node->cookie = tc_flow_cmd->cookie;
542 mutex_lock(&nic->mbox.lock);
543 req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox);
545 mutex_unlock(&nic->mbox.lock);
549 rc = otx2_tc_prepare_flow(nic, tc_flow_cmd, req);
551 otx2_mbox_reset(&nic->mbox.mbox, 0);
552 mutex_unlock(&nic->mbox.lock);
556 /* If a flow exists with the same cookie, delete it */
557 old_node = rhashtable_lookup_fast(&tc_info->flow_table,
558 &tc_flow_cmd->cookie,
559 tc_info->flow_ht_params);
561 otx2_tc_del_flow(nic, tc_flow_cmd);
563 if (bitmap_full(tc_info->tc_entries_bitmap, nic->flow_cfg->tc_max_flows)) {
564 netdev_err(nic->netdev, "Not enough MCAM space to add the flow\n");
565 otx2_mbox_reset(&nic->mbox.mbox, 0);
566 mutex_unlock(&nic->mbox.lock);
570 new_node->bitpos = find_first_zero_bit(tc_info->tc_entries_bitmap,
571 nic->flow_cfg->tc_max_flows);
572 req->channel = nic->hw.rx_chan_base;
573 req->entry = nic->flow_cfg->entry[nic->flow_cfg->tc_flower_offset +
574 nic->flow_cfg->tc_max_flows - new_node->bitpos];
575 req->intf = NIX_INTF_RX;
577 new_node->entry = req->entry;
579 /* Send message to AF */
580 rc = otx2_sync_mbox_msg(&nic->mbox);
582 netdev_err(nic->netdev, "Failed to install MCAM flow entry\n");
583 mutex_unlock(&nic->mbox.lock);
586 mutex_unlock(&nic->mbox.lock);
588 /* add new flow to flow-table */
589 rc = rhashtable_insert_fast(&nic->tc_info.flow_table, &new_node->node,
590 nic->tc_info.flow_ht_params);
592 otx2_del_mcam_flow_entry(nic, req->entry);
593 kfree_rcu(new_node, rcu);
597 set_bit(new_node->bitpos, tc_info->tc_entries_bitmap);
598 tc_info->num_entries++;
603 static int otx2_tc_get_flow_stats(struct otx2_nic *nic,
604 struct flow_cls_offload *tc_flow_cmd)
606 struct otx2_tc_info *tc_info = &nic->tc_info;
607 struct npc_mcam_get_stats_req *req;
608 struct npc_mcam_get_stats_rsp *rsp;
609 struct otx2_tc_flow_stats *stats;
610 struct otx2_tc_flow *flow_node;
613 flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
614 &tc_flow_cmd->cookie,
615 tc_info->flow_ht_params);
617 netdev_info(nic->netdev, "tc flow not found for cookie %lx",
618 tc_flow_cmd->cookie);
622 mutex_lock(&nic->mbox.lock);
624 req = otx2_mbox_alloc_msg_npc_mcam_entry_stats(&nic->mbox);
626 mutex_unlock(&nic->mbox.lock);
630 req->entry = flow_node->entry;
632 err = otx2_sync_mbox_msg(&nic->mbox);
634 netdev_err(nic->netdev, "Failed to get stats for MCAM flow entry %d\n",
636 mutex_unlock(&nic->mbox.lock);
640 rsp = (struct npc_mcam_get_stats_rsp *)otx2_mbox_get_rsp
641 (&nic->mbox.mbox, 0, &req->hdr);
643 mutex_unlock(&nic->mbox.lock);
647 mutex_unlock(&nic->mbox.lock);
652 stats = &flow_node->stats;
654 spin_lock(&flow_node->lock);
655 flow_stats_update(&tc_flow_cmd->stats, 0x0, rsp->stat - stats->pkts, 0x0, 0x0,
656 FLOW_ACTION_HW_STATS_IMMEDIATE);
657 stats->pkts = rsp->stat;
658 spin_unlock(&flow_node->lock);
663 static int otx2_setup_tc_cls_flower(struct otx2_nic *nic,
664 struct flow_cls_offload *cls_flower)
666 switch (cls_flower->command) {
667 case FLOW_CLS_REPLACE:
668 return otx2_tc_add_flow(nic, cls_flower);
669 case FLOW_CLS_DESTROY:
670 return otx2_tc_del_flow(nic, cls_flower);
672 return otx2_tc_get_flow_stats(nic, cls_flower);
678 static int otx2_setup_tc_block_ingress_cb(enum tc_setup_type type,
679 void *type_data, void *cb_priv)
681 struct otx2_nic *nic = cb_priv;
683 if (!tc_cls_can_offload_and_chain0(nic->netdev, type_data))
687 case TC_SETUP_CLSFLOWER:
688 return otx2_setup_tc_cls_flower(nic, type_data);
696 static int otx2_setup_tc_egress_matchall(struct otx2_nic *nic,
697 struct tc_cls_matchall_offload *cls_matchall)
699 switch (cls_matchall->command) {
700 case TC_CLSMATCHALL_REPLACE:
701 return otx2_tc_egress_matchall_install(nic, cls_matchall);
702 case TC_CLSMATCHALL_DESTROY:
703 return otx2_tc_egress_matchall_delete(nic, cls_matchall);
704 case TC_CLSMATCHALL_STATS:
712 static int otx2_setup_tc_block_egress_cb(enum tc_setup_type type,
713 void *type_data, void *cb_priv)
715 struct otx2_nic *nic = cb_priv;
717 if (!tc_cls_can_offload_and_chain0(nic->netdev, type_data))
721 case TC_SETUP_CLSMATCHALL:
722 return otx2_setup_tc_egress_matchall(nic, type_data);
730 static LIST_HEAD(otx2_block_cb_list);
732 static int otx2_setup_tc_block(struct net_device *netdev,
733 struct flow_block_offload *f)
735 struct otx2_nic *nic = netdev_priv(netdev);
742 if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) {
743 cb = otx2_setup_tc_block_ingress_cb;
745 } else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) {
746 cb = otx2_setup_tc_block_egress_cb;
752 return flow_block_cb_setup_simple(f, &otx2_block_cb_list, cb,
756 int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type,
761 return otx2_setup_tc_block(netdev, type_data);
767 static const struct rhashtable_params tc_flow_ht_params = {
768 .head_offset = offsetof(struct otx2_tc_flow, node),
769 .key_offset = offsetof(struct otx2_tc_flow, cookie),
770 .key_len = sizeof(((struct otx2_tc_flow *)0)->cookie),
771 .automatic_shrinking = true,
774 int otx2_init_tc(struct otx2_nic *nic)
776 struct otx2_tc_info *tc = &nic->tc_info;
778 tc->flow_ht_params = tc_flow_ht_params;
779 return rhashtable_init(&tc->flow_table, &tc->flow_ht_params);
782 void otx2_shutdown_tc(struct otx2_nic *nic)
784 struct otx2_tc_info *tc = &nic->tc_info;
786 rhashtable_destroy(&tc->flow_table);