1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright 2020, NXP Semiconductors
4 #include <net/tc_act/tc_gate.h>
5 #include <linux/dsa/8021q.h>
6 #include "sja1105_vl.h"
8 #define SJA1105_SIZE_VL_STATUS 8
10 /* The switch flow classification core implements TTEthernet, which 'thinks' in
11 * terms of Virtual Links (VL), a concept borrowed from ARINC 664 part 7.
12 * However it also has one other operating mode (VLLUPFORMAT=0) where it acts
13 * somewhat closer to a pre-standard implementation of IEEE 802.1Qci
14 * (Per-Stream Filtering and Policing), which is what the driver is going to be
18 * Key = {DMAC && VLANID +---------+ Key = { (DMAC[47:16] & VLMASK ==
19 * && VLAN PCP | | VLMARKER)
20 * && INGRESS PORT} +---------+ (both fixed)
21 * (exact match, | && DMAC[15:0] == VLID
22 * all specified in rule) | (specified in rule)
25 * 0 (PSFP) / \ 1 (ARINC664)
26 * +-----------/ VLLUPFORMAT \----------+
29 * 0 (forwarding) v ------------ |
31 * / \ 1 (QoS classification) |
32 * +---/ ISCRITICAL \-----------+ |
33 * | \ (per rule) / | |
34 * | \ / VLID taken from VLID taken from
35 * v ------------ index of rule contents of rule
36 * select that matched that matched
38 * | +---------+--------+
44 * | +--------------| |
45 * | | select TYPE +---------+
47 * | 0 (rate ------------ 1 (time
48 * | constrained) / \ triggered)
49 * | +------/ TYPE \------------+
50 * | | \ (per VLID) / |
52 * | VL Policing ------------ VL Policing
53 * | (indexed by VLID) (indexed by VLID)
54 * | +---------+ +---------+
55 * | | TYPE=0 | | TYPE=1 |
56 * | +---------+ +---------+
57 * | select SHARINDX select SHARINDX to
58 * | to rate-limit re-enter VL Forwarding
59 * | groups of VL's with new VLID for egress
62 * | select MAXLEN -> exceed => drop select MAXLEN -> exceed => drop
65 * | VL Forwarding VL Forwarding
66 * | (indexed by SHARINDX) (indexed by SHARINDX)
67 * | +---------+ +---------+
68 * | | TYPE=0 | | TYPE=1 |
69 * | +---------+ +---------+
70 * | select PRIORITY, select PRIORITY,
71 * | PARTITION, DESTPORTS PARTITION, DESTPORTS
74 * | VL Policing VL Policing
75 * | (indexed by SHARINDX) (indexed by SHARINDX)
76 * | +---------+ +---------+
77 * | | TYPE=0 | | TYPE=1 |
78 * | +---------+ +---------+
81 * | select BAG, -> exceed => drop |
83 * | | ----------------------------------------------
84 * | | / Reception Window is open for this VL \
85 * | | / (the Schedule Table executes an entry i \
86 * | | / M <= i < N, for which these conditions hold): \ no
88 * | | |yes \ WINST[M] == 1 && WINSTINDEX[M] == VLID / |
89 * | | | \ WINEND[N] == 1 && WINSTINDEX[N] == VLID / |
91 * | | | \ (the VL window has opened and not yet closed)/ |
92 * | | | ---------------------------------------------- |
94 * | | dispatch to DESTPORTS when the Schedule Table drop
95 * | | executes an entry i with TXEN == 1 && VLINDEX == i
97 * dispatch immediately to DESTPORTS
99 * The per-port classification key is always composed of {DMAC, VID, PCP} and
100 * is non-maskable. This 'looks like' the NULL stream identification function
101 * from IEEE 802.1CB clause 6, except for the extra VLAN PCP. When the switch
102 * ports operate as VLAN-unaware, we do allow the user to not specify the VLAN
103 * ID and PCP, and then the port-based defaults will be used.
105 * In TTEthernet, routing is something that needs to be done manually for each
106 * Virtual Link. So the flow action must always include one of:
107 * a. 'redirect', 'trap' or 'drop': select the egress port list
108 * Additionally, the following actions may be applied on a Virtual Link,
109 * turning it into 'critical' traffic:
110 * b. 'police': turn it into a rate-constrained VL, with bandwidth limitation
111 * given by the maximum frame length, bandwidth allocation gap (BAG) and
113 * c. 'gate': turn it into a time-triggered VL, which can be only be received
114 * and forwarded according to a given schedule.
117 static bool sja1105_vl_key_lower(struct sja1105_vl_lookup_entry *a,
118 struct sja1105_vl_lookup_entry *b)
120 if (a->macaddr < b->macaddr)
122 if (a->macaddr > b->macaddr)
124 if (a->vlanid < b->vlanid)
126 if (a->vlanid > b->vlanid)
128 if (a->port < b->port)
130 if (a->port > b->port)
132 if (a->vlanprior < b->vlanprior)
134 if (a->vlanprior > b->vlanprior)
140 static int sja1105_init_virtual_links(struct sja1105_private *priv,
141 struct netlink_ext_ack *extack)
143 struct sja1105_vl_policing_entry *vl_policing;
144 struct sja1105_vl_forwarding_entry *vl_fwd;
145 struct sja1105_vl_lookup_entry *vl_lookup;
146 bool have_critical_virtual_links = false;
147 struct sja1105_table *table;
148 struct sja1105_rule *rule;
149 int num_virtual_links = 0;
150 int max_sharindx = 0;
153 /* Figure out the dimensioning of the problem */
154 list_for_each_entry(rule, &priv->flow_block.rules, list) {
155 if (rule->type != SJA1105_RULE_VL)
157 /* Each VL lookup entry matches on a single ingress port */
158 num_virtual_links += hweight_long(rule->port_mask);
160 if (rule->vl.type != SJA1105_VL_NONCRITICAL)
161 have_critical_virtual_links = true;
162 if (max_sharindx < rule->vl.sharindx)
163 max_sharindx = rule->vl.sharindx;
166 if (num_virtual_links > SJA1105_MAX_VL_LOOKUP_COUNT) {
167 NL_SET_ERR_MSG_MOD(extack, "Not enough VL entries available");
171 if (max_sharindx + 1 > SJA1105_MAX_VL_LOOKUP_COUNT) {
172 NL_SET_ERR_MSG_MOD(extack, "Policer index out of range");
176 max_sharindx = max_t(int, num_virtual_links, max_sharindx) + 1;
178 /* Discard previous VL Lookup Table */
179 table = &priv->static_config.tables[BLK_IDX_VL_LOOKUP];
180 if (table->entry_count) {
181 kfree(table->entries);
182 table->entry_count = 0;
185 /* Discard previous VL Policing Table */
186 table = &priv->static_config.tables[BLK_IDX_VL_POLICING];
187 if (table->entry_count) {
188 kfree(table->entries);
189 table->entry_count = 0;
192 /* Discard previous VL Forwarding Table */
193 table = &priv->static_config.tables[BLK_IDX_VL_FORWARDING];
194 if (table->entry_count) {
195 kfree(table->entries);
196 table->entry_count = 0;
199 /* Discard previous VL Forwarding Parameters Table */
200 table = &priv->static_config.tables[BLK_IDX_VL_FORWARDING_PARAMS];
201 if (table->entry_count) {
202 kfree(table->entries);
203 table->entry_count = 0;
207 if (!num_virtual_links)
210 /* Pre-allocate space in the static config tables */
212 /* VL Lookup Table */
213 table = &priv->static_config.tables[BLK_IDX_VL_LOOKUP];
214 table->entries = kcalloc(num_virtual_links,
215 table->ops->unpacked_entry_size,
219 table->entry_count = num_virtual_links;
220 vl_lookup = table->entries;
224 list_for_each_entry(rule, &priv->flow_block.rules, list) {
227 if (rule->type != SJA1105_RULE_VL)
230 for_each_set_bit(port, &rule->port_mask, SJA1105_NUM_PORTS) {
231 vl_lookup[k].format = SJA1105_VL_FORMAT_PSFP;
232 vl_lookup[k].port = port;
233 vl_lookup[k].macaddr = rule->key.vl.dmac;
234 if (rule->key.type == SJA1105_KEY_VLAN_AWARE_VL) {
235 vl_lookup[k].vlanid = rule->key.vl.vid;
236 vl_lookup[k].vlanprior = rule->key.vl.pcp;
238 u16 vid = dsa_8021q_rx_vid(priv->ds, port);
240 vl_lookup[k].vlanid = vid;
241 vl_lookup[k].vlanprior = 0;
243 /* For critical VLs, the DESTPORTS mask is taken from
244 * the VL Forwarding Table, so no point in putting it
245 * in the VL Lookup Table
247 if (rule->vl.type == SJA1105_VL_NONCRITICAL)
248 vl_lookup[k].destports = rule->vl.destports;
250 vl_lookup[k].iscritical = true;
251 vl_lookup[k].flow_cookie = rule->cookie;
256 /* UM10944.pdf chapter 4.2.3 VL Lookup table:
257 * "the entries in the VL Lookup table must be sorted in ascending
258 * order (i.e. the smallest value must be loaded first) according to
259 * the following sort order: MACADDR, VLANID, PORT, VLANPRIOR."
261 for (i = 0; i < num_virtual_links; i++) {
262 struct sja1105_vl_lookup_entry *a = &vl_lookup[i];
264 for (j = i + 1; j < num_virtual_links; j++) {
265 struct sja1105_vl_lookup_entry *b = &vl_lookup[j];
267 if (sja1105_vl_key_lower(b, a)) {
268 struct sja1105_vl_lookup_entry tmp = *a;
276 if (!have_critical_virtual_links)
279 /* VL Policing Table */
280 table = &priv->static_config.tables[BLK_IDX_VL_POLICING];
281 table->entries = kcalloc(max_sharindx, table->ops->unpacked_entry_size,
285 table->entry_count = max_sharindx;
286 vl_policing = table->entries;
288 /* VL Forwarding Table */
289 table = &priv->static_config.tables[BLK_IDX_VL_FORWARDING];
290 table->entries = kcalloc(max_sharindx, table->ops->unpacked_entry_size,
294 table->entry_count = max_sharindx;
295 vl_fwd = table->entries;
297 /* VL Forwarding Parameters Table */
298 table = &priv->static_config.tables[BLK_IDX_VL_FORWARDING_PARAMS];
299 table->entries = kcalloc(1, table->ops->unpacked_entry_size,
303 table->entry_count = 1;
305 for (i = 0; i < num_virtual_links; i++) {
306 unsigned long cookie = vl_lookup[i].flow_cookie;
307 struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
309 if (rule->vl.type == SJA1105_VL_NONCRITICAL)
311 if (rule->vl.type == SJA1105_VL_TIME_TRIGGERED) {
312 int sharindx = rule->vl.sharindx;
314 vl_policing[i].type = 1;
315 vl_policing[i].sharindx = sharindx;
316 vl_policing[i].maxlen = rule->vl.maxlen;
317 vl_policing[sharindx].type = 1;
320 vl_fwd[sharindx].type = 1;
321 vl_fwd[sharindx].priority = rule->vl.ipv;
322 vl_fwd[sharindx].partition = 0;
323 vl_fwd[sharindx].destports = rule->vl.destports;
327 sja1105_frame_memory_partitioning(priv);
332 int sja1105_vl_redirect(struct sja1105_private *priv, int port,
333 struct netlink_ext_ack *extack, unsigned long cookie,
334 struct sja1105_key *key, unsigned long destports,
337 struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
340 if (priv->vlan_state == SJA1105_VLAN_UNAWARE &&
341 key->type != SJA1105_KEY_VLAN_UNAWARE_VL) {
342 NL_SET_ERR_MSG_MOD(extack,
343 "Can only redirect based on DMAC");
345 } else if (key->type != SJA1105_KEY_VLAN_AWARE_VL) {
346 NL_SET_ERR_MSG_MOD(extack,
347 "Can only redirect based on {DMAC, VID, PCP}");
352 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
356 rule->cookie = cookie;
357 rule->type = SJA1105_RULE_VL;
359 list_add(&rule->list, &priv->flow_block.rules);
362 rule->port_mask |= BIT(port);
364 rule->vl.destports |= destports;
366 rule->vl.destports = destports;
368 rc = sja1105_init_virtual_links(priv, extack);
370 rule->port_mask &= ~BIT(port);
371 if (!rule->port_mask) {
372 list_del(&rule->list);
380 int sja1105_vl_delete(struct sja1105_private *priv, int port,
381 struct sja1105_rule *rule, struct netlink_ext_ack *extack)
385 rule->port_mask &= ~BIT(port);
386 if (!rule->port_mask) {
387 list_del(&rule->list);
391 rc = sja1105_init_virtual_links(priv, extack);
395 return sja1105_static_config_reload(priv, SJA1105_VIRTUAL_LINKS);
398 /* Insert into the global gate list, sorted by gate action time. */
399 static int sja1105_insert_gate_entry(struct sja1105_gating_config *gating_cfg,
400 struct sja1105_rule *rule,
401 u8 gate_state, s64 entry_time,
402 struct netlink_ext_ack *extack)
404 struct sja1105_gate_entry *e;
407 e = kzalloc(sizeof(*e), GFP_KERNEL);
412 e->gate_state = gate_state;
413 e->interval = entry_time;
415 if (list_empty(&gating_cfg->entries)) {
416 list_add(&e->list, &gating_cfg->entries);
418 struct sja1105_gate_entry *p;
420 list_for_each_entry(p, &gating_cfg->entries, list) {
421 if (p->interval == e->interval) {
422 NL_SET_ERR_MSG_MOD(extack,
428 if (e->interval < p->interval)
431 list_add(&e->list, p->list.prev);
434 gating_cfg->num_entries++;
442 /* The gate entries contain absolute times in their e->interval field. Convert
443 * that to proper intervals (i.e. "0, 5, 10, 15" to "5, 5, 5, 5").
446 sja1105_gating_cfg_time_to_interval(struct sja1105_gating_config *gating_cfg,
449 struct sja1105_gate_entry *last_e;
450 struct sja1105_gate_entry *e;
451 struct list_head *prev;
453 list_for_each_entry(e, &gating_cfg->entries, list) {
454 struct sja1105_gate_entry *p;
458 if (prev == &gating_cfg->entries)
461 p = list_entry(prev, struct sja1105_gate_entry, list);
462 p->interval = e->interval - p->interval;
464 last_e = list_last_entry(&gating_cfg->entries,
465 struct sja1105_gate_entry, list);
466 if (last_e->list.prev != &gating_cfg->entries)
467 last_e->interval = cycle_time - last_e->interval;
470 static void sja1105_free_gating_config(struct sja1105_gating_config *gating_cfg)
472 struct sja1105_gate_entry *e, *n;
474 list_for_each_entry_safe(e, n, &gating_cfg->entries, list) {
480 static int sja1105_compose_gating_subschedule(struct sja1105_private *priv,
481 struct netlink_ext_ack *extack)
483 struct sja1105_gating_config *gating_cfg = &priv->tas_data.gating_cfg;
484 struct sja1105_rule *rule;
485 s64 max_cycle_time = 0;
486 s64 its_base_time = 0;
489 list_for_each_entry(rule, &priv->flow_block.rules, list) {
490 if (rule->type != SJA1105_RULE_VL)
492 if (rule->vl.type != SJA1105_VL_TIME_TRIGGERED)
495 if (max_cycle_time < rule->vl.cycle_time) {
496 max_cycle_time = rule->vl.cycle_time;
497 its_base_time = rule->vl.base_time;
504 dev_dbg(priv->ds->dev, "max_cycle_time %lld its_base_time %lld\n",
505 max_cycle_time, its_base_time);
507 sja1105_free_gating_config(gating_cfg);
509 gating_cfg->base_time = its_base_time;
510 gating_cfg->cycle_time = max_cycle_time;
511 gating_cfg->num_entries = 0;
513 list_for_each_entry(rule, &priv->flow_block.rules, list) {
517 if (rule->type != SJA1105_RULE_VL)
519 if (rule->vl.type != SJA1105_VL_TIME_TRIGGERED)
522 /* Calculate the difference between this gating schedule's
523 * base time, and the base time of the gating schedule with the
524 * longest cycle time. We call it the relative base time (rbt).
526 rbt = future_base_time(rule->vl.base_time, rule->vl.cycle_time,
528 rbt -= its_base_time;
532 for (i = 0; i < rule->vl.num_entries; i++) {
533 u8 gate_state = rule->vl.entries[i].gate_state;
534 s64 entry_time = time;
536 while (entry_time < max_cycle_time) {
537 rc = sja1105_insert_gate_entry(gating_cfg, rule,
544 entry_time += rule->vl.cycle_time;
546 time += rule->vl.entries[i].interval;
550 sja1105_gating_cfg_time_to_interval(gating_cfg, max_cycle_time);
554 sja1105_free_gating_config(gating_cfg);
558 int sja1105_vl_gate(struct sja1105_private *priv, int port,
559 struct netlink_ext_ack *extack, unsigned long cookie,
560 struct sja1105_key *key, u32 index, s32 prio,
561 u64 base_time, u64 cycle_time, u64 cycle_time_ext,
562 u32 num_entries, struct action_gate_entry *entries)
564 struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
569 if (cycle_time_ext) {
570 NL_SET_ERR_MSG_MOD(extack,
571 "Cycle time extension not supported");
575 div_s64_rem(base_time, sja1105_delta_to_ns(1), &rem);
577 NL_SET_ERR_MSG_MOD(extack,
578 "Base time must be multiple of 200 ns");
582 div_s64_rem(cycle_time, sja1105_delta_to_ns(1), &rem);
584 NL_SET_ERR_MSG_MOD(extack,
585 "Cycle time must be multiple of 200 ns");
589 if (priv->vlan_state == SJA1105_VLAN_UNAWARE &&
590 key->type != SJA1105_KEY_VLAN_UNAWARE_VL) {
591 dev_err(priv->ds->dev, "1: vlan state %d key type %d\n",
592 priv->vlan_state, key->type);
593 NL_SET_ERR_MSG_MOD(extack,
594 "Can only gate based on DMAC");
596 } else if (key->type != SJA1105_KEY_VLAN_AWARE_VL) {
597 dev_err(priv->ds->dev, "2: vlan state %d key type %d\n",
598 priv->vlan_state, key->type);
599 NL_SET_ERR_MSG_MOD(extack,
600 "Can only gate based on {DMAC, VID, PCP}");
605 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
609 list_add(&rule->list, &priv->flow_block.rules);
610 rule->cookie = cookie;
611 rule->type = SJA1105_RULE_VL;
613 rule->vl.type = SJA1105_VL_TIME_TRIGGERED;
614 rule->vl.sharindx = index;
615 rule->vl.base_time = base_time;
616 rule->vl.cycle_time = cycle_time;
617 rule->vl.num_entries = num_entries;
618 rule->vl.entries = kcalloc(num_entries,
619 sizeof(struct action_gate_entry),
621 if (!rule->vl.entries) {
626 for (i = 0; i < num_entries; i++) {
627 div_s64_rem(entries[i].interval,
628 sja1105_delta_to_ns(1), &rem);
630 NL_SET_ERR_MSG_MOD(extack,
631 "Interval must be multiple of 200 ns");
636 if (!entries[i].interval) {
637 NL_SET_ERR_MSG_MOD(extack,
638 "Interval cannot be zero");
643 if (ns_to_sja1105_delta(entries[i].interval) >
644 SJA1105_TAS_MAX_DELTA) {
645 NL_SET_ERR_MSG_MOD(extack,
646 "Maximum interval is 52 ms");
651 if (entries[i].maxoctets != -1) {
652 NL_SET_ERR_MSG_MOD(extack,
653 "Cannot offload IntervalOctetMax");
659 ipv = entries[i].ipv;
660 } else if (ipv != entries[i].ipv) {
661 NL_SET_ERR_MSG_MOD(extack,
662 "Only support a single IPV per VL");
667 rule->vl.entries[i] = entries[i];
671 if (key->type == SJA1105_KEY_VLAN_AWARE_VL)
677 /* TODO: support per-flow MTU */
678 rule->vl.maxlen = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN;
682 rule->port_mask |= BIT(port);
684 rc = sja1105_compose_gating_subschedule(priv, extack);
688 rc = sja1105_init_virtual_links(priv, extack);
692 if (sja1105_gating_check_conflicts(priv, -1, extack)) {
693 NL_SET_ERR_MSG_MOD(extack, "Conflict with tc-taprio schedule");
700 rule->port_mask &= ~BIT(port);
701 if (!rule->port_mask) {
702 list_del(&rule->list);
703 kfree(rule->vl.entries);
711 static int sja1105_find_vlid(struct sja1105_private *priv, int port,
712 struct sja1105_key *key)
714 struct sja1105_vl_lookup_entry *vl_lookup;
715 struct sja1105_table *table;
718 if (WARN_ON(key->type != SJA1105_KEY_VLAN_AWARE_VL &&
719 key->type != SJA1105_KEY_VLAN_UNAWARE_VL))
722 table = &priv->static_config.tables[BLK_IDX_VL_LOOKUP];
723 vl_lookup = table->entries;
725 for (i = 0; i < table->entry_count; i++) {
726 if (key->type == SJA1105_KEY_VLAN_AWARE_VL) {
727 if (vl_lookup[i].port == port &&
728 vl_lookup[i].macaddr == key->vl.dmac &&
729 vl_lookup[i].vlanid == key->vl.vid &&
730 vl_lookup[i].vlanprior == key->vl.pcp)
733 if (vl_lookup[i].port == port &&
734 vl_lookup[i].macaddr == key->vl.dmac)
742 int sja1105_vl_stats(struct sja1105_private *priv, int port,
743 struct sja1105_rule *rule, struct flow_stats *stats,
744 struct netlink_ext_ack *extack)
746 const struct sja1105_regs *regs = priv->info->regs;
747 u8 buf[SJA1105_SIZE_VL_STATUS] = {0};
754 if (rule->vl.type != SJA1105_VL_TIME_TRIGGERED)
757 vlid = sja1105_find_vlid(priv, port, &rule->key);
761 rc = sja1105_xfer_buf(priv, SPI_READ, regs->vl_status + 2 * vlid, buf,
762 SJA1105_SIZE_VL_STATUS);
764 NL_SET_ERR_MSG_MOD(extack, "SPI access failed");
768 sja1105_unpack(buf, &timingerr, 31, 16, SJA1105_SIZE_VL_STATUS);
769 sja1105_unpack(buf, &unreleased, 15, 0, SJA1105_SIZE_VL_STATUS);
770 sja1105_unpack(buf, &lengtherr, 47, 32, SJA1105_SIZE_VL_STATUS);
772 pkts = timingerr + unreleased + lengtherr;
774 flow_stats_update(stats, 0, pkts - rule->vl.stats.pkts,
775 jiffies - rule->vl.stats.lastused,
776 FLOW_ACTION_HW_STATS_IMMEDIATE);
778 rule->vl.stats.pkts = pkts;
779 rule->vl.stats.lastused = jiffies;