1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright 2020, NXP Semiconductors
5 #include "sja1105_vl.h"
7 struct sja1105_rule *sja1105_rule_find(struct sja1105_private *priv,
10 struct sja1105_rule *rule;
12 list_for_each_entry(rule, &priv->flow_block.rules, list)
13 if (rule->cookie == cookie)
19 static int sja1105_find_free_l2_policer(struct sja1105_private *priv)
23 for (i = 0; i < SJA1105_NUM_L2_POLICERS; i++)
24 if (!priv->flow_block.l2_policer_used[i])
30 static int sja1105_setup_bcast_policer(struct sja1105_private *priv,
31 struct netlink_ext_ack *extack,
32 unsigned long cookie, int port,
33 u64 rate_bytes_per_sec,
36 struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
37 struct sja1105_l2_policing_entry *policing;
38 struct dsa_switch *ds = priv->ds;
39 bool new_rule = false;
44 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
48 rule->cookie = cookie;
49 rule->type = SJA1105_RULE_BCAST_POLICER;
50 rule->bcast_pol.sharindx = sja1105_find_free_l2_policer(priv);
51 rule->key.type = SJA1105_KEY_BCAST;
55 if (rule->bcast_pol.sharindx == -1) {
56 NL_SET_ERR_MSG_MOD(extack, "No more L2 policers free");
61 policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
63 if (policing[(ds->num_ports * SJA1105_NUM_TC) + port].sharindx != port) {
64 NL_SET_ERR_MSG_MOD(extack,
65 "Port already has a broadcast policer");
70 rule->port_mask |= BIT(port);
72 /* Make the broadcast policers of all ports attached to this block
73 * point to the newly allocated policer
75 for_each_set_bit(p, &rule->port_mask, SJA1105_MAX_NUM_PORTS) {
76 int bcast = (ds->num_ports * SJA1105_NUM_TC) + p;
78 policing[bcast].sharindx = rule->bcast_pol.sharindx;
81 policing[rule->bcast_pol.sharindx].rate = div_u64(rate_bytes_per_sec *
83 policing[rule->bcast_pol.sharindx].smax = burst;
85 /* TODO: support per-flow MTU */
86 policing[rule->bcast_pol.sharindx].maxlen = VLAN_ETH_FRAME_LEN +
89 rc = sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
92 if (rc == 0 && new_rule) {
93 priv->flow_block.l2_policer_used[rule->bcast_pol.sharindx] = true;
94 list_add(&rule->list, &priv->flow_block.rules);
95 } else if (new_rule) {
102 static int sja1105_setup_tc_policer(struct sja1105_private *priv,
103 struct netlink_ext_ack *extack,
104 unsigned long cookie, int port, int tc,
105 u64 rate_bytes_per_sec,
108 struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
109 struct sja1105_l2_policing_entry *policing;
110 bool new_rule = false;
115 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
119 rule->cookie = cookie;
120 rule->type = SJA1105_RULE_TC_POLICER;
121 rule->tc_pol.sharindx = sja1105_find_free_l2_policer(priv);
122 rule->key.type = SJA1105_KEY_TC;
123 rule->key.tc.pcp = tc;
127 if (rule->tc_pol.sharindx == -1) {
128 NL_SET_ERR_MSG_MOD(extack, "No more L2 policers free");
133 policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
135 if (policing[(port * SJA1105_NUM_TC) + tc].sharindx != port) {
136 NL_SET_ERR_MSG_MOD(extack,
137 "Port-TC pair already has an L2 policer");
142 rule->port_mask |= BIT(port);
144 /* Make the policers for traffic class @tc of all ports attached to
145 * this block point to the newly allocated policer
147 for_each_set_bit(p, &rule->port_mask, SJA1105_MAX_NUM_PORTS) {
148 int index = (p * SJA1105_NUM_TC) + tc;
150 policing[index].sharindx = rule->tc_pol.sharindx;
153 policing[rule->tc_pol.sharindx].rate = div_u64(rate_bytes_per_sec *
155 policing[rule->tc_pol.sharindx].smax = burst;
157 /* TODO: support per-flow MTU */
158 policing[rule->tc_pol.sharindx].maxlen = VLAN_ETH_FRAME_LEN +
161 rc = sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
164 if (rc == 0 && new_rule) {
165 priv->flow_block.l2_policer_used[rule->tc_pol.sharindx] = true;
166 list_add(&rule->list, &priv->flow_block.rules);
167 } else if (new_rule) {
174 static int sja1105_flower_policer(struct sja1105_private *priv, int port,
175 struct netlink_ext_ack *extack,
176 unsigned long cookie,
177 struct sja1105_key *key,
178 u64 rate_bytes_per_sec,
182 case SJA1105_KEY_BCAST:
183 return sja1105_setup_bcast_policer(priv, extack, cookie, port,
184 rate_bytes_per_sec, burst);
186 return sja1105_setup_tc_policer(priv, extack, cookie, port,
187 key->tc.pcp, rate_bytes_per_sec,
190 NL_SET_ERR_MSG_MOD(extack, "Unknown keys for policing");
195 static int sja1105_flower_parse_key(struct sja1105_private *priv,
196 struct netlink_ext_ack *extack,
197 struct flow_cls_offload *cls,
198 struct sja1105_key *key)
200 struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
201 struct flow_dissector *dissector = rule->match.dissector;
202 bool is_bcast_dmac = false;
207 if (dissector->used_keys &
208 ~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
209 BIT(FLOW_DISSECTOR_KEY_CONTROL) |
210 BIT(FLOW_DISSECTOR_KEY_VLAN) |
211 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS))) {
212 NL_SET_ERR_MSG_MOD(extack,
213 "Unsupported keys used");
217 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
218 struct flow_match_basic match;
220 flow_rule_match_basic(rule, &match);
221 if (match.key->n_proto) {
222 NL_SET_ERR_MSG_MOD(extack,
223 "Matching on protocol not supported");
228 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
229 u8 bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
230 u8 null[] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
231 struct flow_match_eth_addrs match;
233 flow_rule_match_eth_addrs(rule, &match);
235 if (!ether_addr_equal_masked(match.key->src, null,
237 NL_SET_ERR_MSG_MOD(extack,
238 "Matching on source MAC not supported");
242 if (!ether_addr_equal(match.mask->dst, bcast)) {
243 NL_SET_ERR_MSG_MOD(extack,
244 "Masked matching on MAC not supported");
248 dmac = ether_addr_to_u64(match.key->dst);
249 is_bcast_dmac = ether_addr_equal(match.key->dst, bcast);
252 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
253 struct flow_match_vlan match;
255 flow_rule_match_vlan(rule, &match);
257 if (match.mask->vlan_id &&
258 match.mask->vlan_id != VLAN_VID_MASK) {
259 NL_SET_ERR_MSG_MOD(extack,
260 "Masked matching on VID is not supported");
264 if (match.mask->vlan_priority &&
265 match.mask->vlan_priority != 0x7) {
266 NL_SET_ERR_MSG_MOD(extack,
267 "Masked matching on PCP is not supported");
271 if (match.mask->vlan_id)
272 vid = match.key->vlan_id;
273 if (match.mask->vlan_priority)
274 pcp = match.key->vlan_priority;
277 if (is_bcast_dmac && vid == U16_MAX && pcp == U16_MAX) {
278 key->type = SJA1105_KEY_BCAST;
281 if (dmac == U64_MAX && vid == U16_MAX && pcp != U16_MAX) {
282 key->type = SJA1105_KEY_TC;
286 if (dmac != U64_MAX && vid != U16_MAX && pcp != U16_MAX) {
287 key->type = SJA1105_KEY_VLAN_AWARE_VL;
293 if (dmac != U64_MAX) {
294 key->type = SJA1105_KEY_VLAN_UNAWARE_VL;
299 NL_SET_ERR_MSG_MOD(extack, "Not matching on any known key");
303 int sja1105_cls_flower_add(struct dsa_switch *ds, int port,
304 struct flow_cls_offload *cls, bool ingress)
306 struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
307 struct netlink_ext_ack *extack = cls->common.extack;
308 struct sja1105_private *priv = ds->priv;
309 const struct flow_action_entry *act;
310 unsigned long cookie = cls->cookie;
311 bool routing_rule = false;
312 struct sja1105_key key;
313 bool gate_rule = false;
314 bool vl_rule = false;
317 rc = sja1105_flower_parse_key(priv, extack, cls, &key);
321 flow_action_for_each(i, act, &rule->action) {
323 case FLOW_ACTION_POLICE:
324 if (act->police.rate_pkt_ps) {
325 NL_SET_ERR_MSG_MOD(extack,
326 "QoS offload not support packets per second");
331 rc = sja1105_flower_policer(priv, port, extack, cookie,
333 act->police.rate_bytes_ps,
338 case FLOW_ACTION_TRAP: {
339 int cpu = dsa_upstream_port(ds, port);
344 rc = sja1105_vl_redirect(priv, port, extack, cookie,
345 &key, BIT(cpu), true);
350 case FLOW_ACTION_REDIRECT: {
351 struct dsa_port *to_dp;
353 to_dp = dsa_port_from_netdev(act->dev);
355 NL_SET_ERR_MSG_MOD(extack,
356 "Destination not a switch port");
363 rc = sja1105_vl_redirect(priv, port, extack, cookie,
364 &key, BIT(to_dp->index), true);
369 case FLOW_ACTION_DROP:
372 rc = sja1105_vl_redirect(priv, port, extack, cookie,
377 case FLOW_ACTION_GATE:
381 rc = sja1105_vl_gate(priv, port, extack, cookie,
382 &key, act->gate.index,
386 act->gate.cycletimeext,
387 act->gate.num_entries,
393 NL_SET_ERR_MSG_MOD(extack,
394 "Action not supported");
400 if (vl_rule && !rc) {
401 /* Delay scheduling configuration until DESTPORTS has been
402 * populated by all other actions.
406 NL_SET_ERR_MSG_MOD(extack,
407 "Can only offload gate action together with redirect or trap");
410 rc = sja1105_init_scheduling(priv);
415 rc = sja1105_static_config_reload(priv, SJA1105_VIRTUAL_LINKS);
422 int sja1105_cls_flower_del(struct dsa_switch *ds, int port,
423 struct flow_cls_offload *cls, bool ingress)
425 struct sja1105_private *priv = ds->priv;
426 struct sja1105_rule *rule = sja1105_rule_find(priv, cls->cookie);
427 struct sja1105_l2_policing_entry *policing;
433 if (rule->type == SJA1105_RULE_VL)
434 return sja1105_vl_delete(priv, port, rule, cls->common.extack);
436 policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
438 if (rule->type == SJA1105_RULE_BCAST_POLICER) {
439 int bcast = (ds->num_ports * SJA1105_NUM_TC) + port;
441 old_sharindx = policing[bcast].sharindx;
442 policing[bcast].sharindx = port;
443 } else if (rule->type == SJA1105_RULE_TC_POLICER) {
444 int index = (port * SJA1105_NUM_TC) + rule->key.tc.pcp;
446 old_sharindx = policing[index].sharindx;
447 policing[index].sharindx = port;
452 rule->port_mask &= ~BIT(port);
453 if (!rule->port_mask) {
454 priv->flow_block.l2_policer_used[old_sharindx] = false;
455 list_del(&rule->list);
459 return sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
462 int sja1105_cls_flower_stats(struct dsa_switch *ds, int port,
463 struct flow_cls_offload *cls, bool ingress)
465 struct sja1105_private *priv = ds->priv;
466 struct sja1105_rule *rule = sja1105_rule_find(priv, cls->cookie);
472 if (rule->type != SJA1105_RULE_VL)
475 rc = sja1105_vl_stats(priv, port, rule, &cls->stats,
483 void sja1105_flower_setup(struct dsa_switch *ds)
485 struct sja1105_private *priv = ds->priv;
488 INIT_LIST_HEAD(&priv->flow_block.rules);
490 for (port = 0; port < ds->num_ports; port++)
491 priv->flow_block.l2_policer_used[port] = true;
494 void sja1105_flower_teardown(struct dsa_switch *ds)
496 struct sja1105_private *priv = ds->priv;
497 struct sja1105_rule *rule;
498 struct list_head *pos, *n;
500 list_for_each_safe(pos, n, &priv->flow_block.rules) {
501 rule = list_entry(pos, struct sja1105_rule, list);
502 list_del(&rule->list);