Merge tag 'ecryptfs-5.13-rc1-updates' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-microblaze.git] / drivers / net / dsa / sja1105 / sja1105_main.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Sensor-Technik Wiedemann GmbH
3  * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
4  */
5
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
8 #include <linux/delay.h>
9 #include <linux/module.h>
10 #include <linux/printk.h>
11 #include <linux/spi/spi.h>
12 #include <linux/errno.h>
13 #include <linux/gpio/consumer.h>
14 #include <linux/phylink.h>
15 #include <linux/of.h>
16 #include <linux/of_net.h>
17 #include <linux/of_mdio.h>
18 #include <linux/of_device.h>
19 #include <linux/netdev_features.h>
20 #include <linux/netdevice.h>
21 #include <linux/if_bridge.h>
22 #include <linux/if_ether.h>
23 #include <linux/dsa/8021q.h>
24 #include "sja1105.h"
25 #include "sja1105_sgmii.h"
26 #include "sja1105_tas.h"
27
28 #define SJA1105_UNKNOWN_MULTICAST       0x010000000000ull
29
30 static const struct dsa_switch_ops sja1105_switch_ops;
31
32 static void sja1105_hw_reset(struct gpio_desc *gpio, unsigned int pulse_len,
33                              unsigned int startup_delay)
34 {
35         gpiod_set_value_cansleep(gpio, 1);
36         /* Wait for minimum reset pulse length */
37         msleep(pulse_len);
38         gpiod_set_value_cansleep(gpio, 0);
39         /* Wait until chip is ready after reset */
40         msleep(startup_delay);
41 }
42
43 static void
44 sja1105_port_allow_traffic(struct sja1105_l2_forwarding_entry *l2_fwd,
45                            int from, int to, bool allow)
46 {
47         if (allow)
48                 l2_fwd[from].reach_port |= BIT(to);
49         else
50                 l2_fwd[from].reach_port &= ~BIT(to);
51 }
52
53 static bool sja1105_can_forward(struct sja1105_l2_forwarding_entry *l2_fwd,
54                                 int from, int to)
55 {
56         return !!(l2_fwd[from].reach_port & BIT(to));
57 }
58
59 /* Structure used to temporarily transport device tree
60  * settings into sja1105_setup
61  */
62 struct sja1105_dt_port {
63         phy_interface_t phy_mode;
64         sja1105_mii_role_t role;
65 };
66
67 static int sja1105_init_mac_settings(struct sja1105_private *priv)
68 {
69         struct sja1105_mac_config_entry default_mac = {
70                 /* Enable all 8 priority queues on egress.
71                  * Every queue i holds top[i] - base[i] frames.
72                  * Sum of top[i] - base[i] is 511 (max hardware limit).
73                  */
74                 .top  = {0x3F, 0x7F, 0xBF, 0xFF, 0x13F, 0x17F, 0x1BF, 0x1FF},
75                 .base = {0x0, 0x40, 0x80, 0xC0, 0x100, 0x140, 0x180, 0x1C0},
76                 .enabled = {true, true, true, true, true, true, true, true},
77                 /* Keep standard IFG of 12 bytes on egress. */
78                 .ifg = 0,
79                 /* Always put the MAC speed in automatic mode, where it can be
80                  * adjusted at runtime by PHYLINK.
81                  */
82                 .speed = SJA1105_SPEED_AUTO,
83                 /* No static correction for 1-step 1588 events */
84                 .tp_delin = 0,
85                 .tp_delout = 0,
86                 /* Disable aging for critical TTEthernet traffic */
87                 .maxage = 0xFF,
88                 /* Internal VLAN (pvid) to apply to untagged ingress */
89                 .vlanprio = 0,
90                 .vlanid = 1,
91                 .ing_mirr = false,
92                 .egr_mirr = false,
93                 /* Don't drop traffic with other EtherType than ETH_P_IP */
94                 .drpnona664 = false,
95                 /* Don't drop double-tagged traffic */
96                 .drpdtag = false,
97                 /* Don't drop untagged traffic */
98                 .drpuntag = false,
99                 /* Don't retag 802.1p (VID 0) traffic with the pvid */
100                 .retag = false,
101                 /* Disable learning and I/O on user ports by default -
102                  * STP will enable it.
103                  */
104                 .dyn_learn = false,
105                 .egress = false,
106                 .ingress = false,
107         };
108         struct sja1105_mac_config_entry *mac;
109         struct sja1105_table *table;
110         int i;
111
112         table = &priv->static_config.tables[BLK_IDX_MAC_CONFIG];
113
114         /* Discard previous MAC Configuration Table */
115         if (table->entry_count) {
116                 kfree(table->entries);
117                 table->entry_count = 0;
118         }
119
120         table->entries = kcalloc(SJA1105_NUM_PORTS,
121                                  table->ops->unpacked_entry_size, GFP_KERNEL);
122         if (!table->entries)
123                 return -ENOMEM;
124
125         table->entry_count = SJA1105_NUM_PORTS;
126
127         mac = table->entries;
128
129         for (i = 0; i < SJA1105_NUM_PORTS; i++) {
130                 mac[i] = default_mac;
131                 if (i == dsa_upstream_port(priv->ds, i)) {
132                         /* STP doesn't get called for CPU port, so we need to
133                          * set the I/O parameters statically.
134                          */
135                         mac[i].dyn_learn = true;
136                         mac[i].ingress = true;
137                         mac[i].egress = true;
138                 }
139         }
140
141         return 0;
142 }
143
144 static bool sja1105_supports_sgmii(struct sja1105_private *priv, int port)
145 {
146         if (priv->info->part_no != SJA1105R_PART_NO &&
147             priv->info->part_no != SJA1105S_PART_NO)
148                 return false;
149
150         if (port != SJA1105_SGMII_PORT)
151                 return false;
152
153         if (dsa_is_unused_port(priv->ds, port))
154                 return false;
155
156         return true;
157 }
158
159 static int sja1105_init_mii_settings(struct sja1105_private *priv,
160                                      struct sja1105_dt_port *ports)
161 {
162         struct device *dev = &priv->spidev->dev;
163         struct sja1105_xmii_params_entry *mii;
164         struct sja1105_table *table;
165         int i;
166
167         table = &priv->static_config.tables[BLK_IDX_XMII_PARAMS];
168
169         /* Discard previous xMII Mode Parameters Table */
170         if (table->entry_count) {
171                 kfree(table->entries);
172                 table->entry_count = 0;
173         }
174
175         table->entries = kcalloc(SJA1105_MAX_XMII_PARAMS_COUNT,
176                                  table->ops->unpacked_entry_size, GFP_KERNEL);
177         if (!table->entries)
178                 return -ENOMEM;
179
180         /* Override table based on PHYLINK DT bindings */
181         table->entry_count = SJA1105_MAX_XMII_PARAMS_COUNT;
182
183         mii = table->entries;
184
185         for (i = 0; i < SJA1105_NUM_PORTS; i++) {
186                 if (dsa_is_unused_port(priv->ds, i))
187                         continue;
188
189                 switch (ports[i].phy_mode) {
190                 case PHY_INTERFACE_MODE_MII:
191                         mii->xmii_mode[i] = XMII_MODE_MII;
192                         break;
193                 case PHY_INTERFACE_MODE_RMII:
194                         mii->xmii_mode[i] = XMII_MODE_RMII;
195                         break;
196                 case PHY_INTERFACE_MODE_RGMII:
197                 case PHY_INTERFACE_MODE_RGMII_ID:
198                 case PHY_INTERFACE_MODE_RGMII_RXID:
199                 case PHY_INTERFACE_MODE_RGMII_TXID:
200                         mii->xmii_mode[i] = XMII_MODE_RGMII;
201                         break;
202                 case PHY_INTERFACE_MODE_SGMII:
203                         if (!sja1105_supports_sgmii(priv, i))
204                                 return -EINVAL;
205                         mii->xmii_mode[i] = XMII_MODE_SGMII;
206                         break;
207                 default:
208                         dev_err(dev, "Unsupported PHY mode %s!\n",
209                                 phy_modes(ports[i].phy_mode));
210                 }
211
212                 /* Even though the SerDes port is able to drive SGMII autoneg
213                  * like a PHY would, from the perspective of the XMII tables,
214                  * the SGMII port should always be put in MAC mode.
215                  */
216                 if (ports[i].phy_mode == PHY_INTERFACE_MODE_SGMII)
217                         mii->phy_mac[i] = XMII_MAC;
218                 else
219                         mii->phy_mac[i] = ports[i].role;
220         }
221         return 0;
222 }
223
224 static int sja1105_init_static_fdb(struct sja1105_private *priv)
225 {
226         struct sja1105_l2_lookup_entry *l2_lookup;
227         struct sja1105_table *table;
228         int port;
229
230         table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP];
231
232         /* We only populate the FDB table through dynamic L2 Address Lookup
233          * entries, except for a special entry at the end which is a catch-all
234          * for unknown multicast and will be used to control flooding domain.
235          */
236         if (table->entry_count) {
237                 kfree(table->entries);
238                 table->entry_count = 0;
239         }
240
241         if (!priv->info->can_limit_mcast_flood)
242                 return 0;
243
244         table->entries = kcalloc(1, table->ops->unpacked_entry_size,
245                                  GFP_KERNEL);
246         if (!table->entries)
247                 return -ENOMEM;
248
249         table->entry_count = 1;
250         l2_lookup = table->entries;
251
252         /* All L2 multicast addresses have an odd first octet */
253         l2_lookup[0].macaddr = SJA1105_UNKNOWN_MULTICAST;
254         l2_lookup[0].mask_macaddr = SJA1105_UNKNOWN_MULTICAST;
255         l2_lookup[0].lockeds = true;
256         l2_lookup[0].index = SJA1105_MAX_L2_LOOKUP_COUNT - 1;
257
258         /* Flood multicast to every port by default */
259         for (port = 0; port < priv->ds->num_ports; port++)
260                 if (!dsa_is_unused_port(priv->ds, port))
261                         l2_lookup[0].destports |= BIT(port);
262
263         return 0;
264 }
265
266 static int sja1105_init_l2_lookup_params(struct sja1105_private *priv)
267 {
268         struct sja1105_table *table;
269         u64 max_fdb_entries = SJA1105_MAX_L2_LOOKUP_COUNT / SJA1105_NUM_PORTS;
270         struct sja1105_l2_lookup_params_entry default_l2_lookup_params = {
271                 /* Learned FDB entries are forgotten after 300 seconds */
272                 .maxage = SJA1105_AGEING_TIME_MS(300000),
273                 /* All entries within a FDB bin are available for learning */
274                 .dyn_tbsz = SJA1105ET_FDB_BIN_SIZE,
275                 /* And the P/Q/R/S equivalent setting: */
276                 .start_dynspc = 0,
277                 .maxaddrp = {max_fdb_entries, max_fdb_entries, max_fdb_entries,
278                              max_fdb_entries, max_fdb_entries, },
279                 /* 2^8 + 2^5 + 2^3 + 2^2 + 2^1 + 1 in Koopman notation */
280                 .poly = 0x97,
281                 /* This selects between Independent VLAN Learning (IVL) and
282                  * Shared VLAN Learning (SVL)
283                  */
284                 .shared_learn = true,
285                 /* Don't discard management traffic based on ENFPORT -
286                  * we don't perform SMAC port enforcement anyway, so
287                  * what we are setting here doesn't matter.
288                  */
289                 .no_enf_hostprt = false,
290                 /* Don't learn SMAC for mac_fltres1 and mac_fltres0.
291                  * Maybe correlate with no_linklocal_learn from bridge driver?
292                  */
293                 .no_mgmt_learn = true,
294                 /* P/Q/R/S only */
295                 .use_static = true,
296                 /* Dynamically learned FDB entries can overwrite other (older)
297                  * dynamic FDB entries
298                  */
299                 .owr_dyn = true,
300                 .drpnolearn = true,
301         };
302
303         table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS];
304
305         if (table->entry_count) {
306                 kfree(table->entries);
307                 table->entry_count = 0;
308         }
309
310         table->entries = kcalloc(SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT,
311                                  table->ops->unpacked_entry_size, GFP_KERNEL);
312         if (!table->entries)
313                 return -ENOMEM;
314
315         table->entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT;
316
317         /* This table only has a single entry */
318         ((struct sja1105_l2_lookup_params_entry *)table->entries)[0] =
319                                 default_l2_lookup_params;
320
321         return 0;
322 }
323
324 static int sja1105_init_static_vlan(struct sja1105_private *priv)
325 {
326         struct sja1105_table *table;
327         struct sja1105_vlan_lookup_entry pvid = {
328                 .ving_mirr = 0,
329                 .vegr_mirr = 0,
330                 .vmemb_port = 0,
331                 .vlan_bc = 0,
332                 .tag_port = 0,
333                 .vlanid = 1,
334         };
335         struct dsa_switch *ds = priv->ds;
336         int port;
337
338         table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
339
340         /* The static VLAN table will only contain the initial pvid of 1.
341          * All other VLANs are to be configured through dynamic entries,
342          * and kept in the static configuration table as backing memory.
343          */
344         if (table->entry_count) {
345                 kfree(table->entries);
346                 table->entry_count = 0;
347         }
348
349         table->entries = kzalloc(table->ops->unpacked_entry_size,
350                                  GFP_KERNEL);
351         if (!table->entries)
352                 return -ENOMEM;
353
354         table->entry_count = 1;
355
356         /* VLAN 1: all DT-defined ports are members; no restrictions on
357          * forwarding; always transmit as untagged.
358          */
359         for (port = 0; port < ds->num_ports; port++) {
360                 struct sja1105_bridge_vlan *v;
361
362                 if (dsa_is_unused_port(ds, port))
363                         continue;
364
365                 pvid.vmemb_port |= BIT(port);
366                 pvid.vlan_bc |= BIT(port);
367                 pvid.tag_port &= ~BIT(port);
368
369                 /* Let traffic that don't need dsa_8021q (e.g. STP, PTP) be
370                  * transmitted as untagged.
371                  */
372                 v = kzalloc(sizeof(*v), GFP_KERNEL);
373                 if (!v)
374                         return -ENOMEM;
375
376                 v->port = port;
377                 v->vid = 1;
378                 v->untagged = true;
379                 if (dsa_is_cpu_port(ds, port))
380                         v->pvid = true;
381                 list_add(&v->list, &priv->dsa_8021q_vlans);
382         }
383
384         ((struct sja1105_vlan_lookup_entry *)table->entries)[0] = pvid;
385         return 0;
386 }
387
388 static int sja1105_init_l2_forwarding(struct sja1105_private *priv)
389 {
390         struct sja1105_l2_forwarding_entry *l2fwd;
391         struct sja1105_table *table;
392         int i, j;
393
394         table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING];
395
396         if (table->entry_count) {
397                 kfree(table->entries);
398                 table->entry_count = 0;
399         }
400
401         table->entries = kcalloc(SJA1105_MAX_L2_FORWARDING_COUNT,
402                                  table->ops->unpacked_entry_size, GFP_KERNEL);
403         if (!table->entries)
404                 return -ENOMEM;
405
406         table->entry_count = SJA1105_MAX_L2_FORWARDING_COUNT;
407
408         l2fwd = table->entries;
409
410         /* First 5 entries define the forwarding rules */
411         for (i = 0; i < SJA1105_NUM_PORTS; i++) {
412                 unsigned int upstream = dsa_upstream_port(priv->ds, i);
413
414                 for (j = 0; j < SJA1105_NUM_TC; j++)
415                         l2fwd[i].vlan_pmap[j] = j;
416
417                 /* All ports start up with egress flooding enabled,
418                  * including the CPU port.
419                  */
420                 priv->ucast_egress_floods |= BIT(i);
421                 priv->bcast_egress_floods |= BIT(i);
422
423                 if (i == upstream)
424                         continue;
425
426                 sja1105_port_allow_traffic(l2fwd, i, upstream, true);
427                 sja1105_port_allow_traffic(l2fwd, upstream, i, true);
428
429                 l2fwd[i].bc_domain = BIT(upstream);
430                 l2fwd[i].fl_domain = BIT(upstream);
431
432                 l2fwd[upstream].bc_domain |= BIT(i);
433                 l2fwd[upstream].fl_domain |= BIT(i);
434         }
435         /* Next 8 entries define VLAN PCP mapping from ingress to egress.
436          * Create a one-to-one mapping.
437          */
438         for (i = 0; i < SJA1105_NUM_TC; i++)
439                 for (j = 0; j < SJA1105_NUM_PORTS; j++)
440                         l2fwd[SJA1105_NUM_PORTS + i].vlan_pmap[j] = i;
441
442         return 0;
443 }
444
445 static int sja1105_init_l2_forwarding_params(struct sja1105_private *priv)
446 {
447         struct sja1105_l2_forwarding_params_entry default_l2fwd_params = {
448                 /* Disallow dynamic reconfiguration of vlan_pmap */
449                 .max_dynp = 0,
450                 /* Use a single memory partition for all ingress queues */
451                 .part_spc = { SJA1105_MAX_FRAME_MEMORY, 0, 0, 0, 0, 0, 0, 0 },
452         };
453         struct sja1105_table *table;
454
455         table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING_PARAMS];
456
457         if (table->entry_count) {
458                 kfree(table->entries);
459                 table->entry_count = 0;
460         }
461
462         table->entries = kcalloc(SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT,
463                                  table->ops->unpacked_entry_size, GFP_KERNEL);
464         if (!table->entries)
465                 return -ENOMEM;
466
467         table->entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT;
468
469         /* This table only has a single entry */
470         ((struct sja1105_l2_forwarding_params_entry *)table->entries)[0] =
471                                 default_l2fwd_params;
472
473         return 0;
474 }
475
476 void sja1105_frame_memory_partitioning(struct sja1105_private *priv)
477 {
478         struct sja1105_l2_forwarding_params_entry *l2_fwd_params;
479         struct sja1105_vl_forwarding_params_entry *vl_fwd_params;
480         struct sja1105_table *table;
481         int max_mem;
482
483         /* VLAN retagging is implemented using a loopback port that consumes
484          * frame buffers. That leaves less for us.
485          */
486         if (priv->vlan_state == SJA1105_VLAN_BEST_EFFORT)
487                 max_mem = SJA1105_MAX_FRAME_MEMORY_RETAGGING;
488         else
489                 max_mem = SJA1105_MAX_FRAME_MEMORY;
490
491         table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING_PARAMS];
492         l2_fwd_params = table->entries;
493         l2_fwd_params->part_spc[0] = max_mem;
494
495         /* If we have any critical-traffic virtual links, we need to reserve
496          * some frame buffer memory for them. At the moment, hardcode the value
497          * at 100 blocks of 128 bytes of memory each. This leaves 829 blocks
498          * remaining for best-effort traffic. TODO: figure out a more flexible
499          * way to perform the frame buffer partitioning.
500          */
501         if (!priv->static_config.tables[BLK_IDX_VL_FORWARDING].entry_count)
502                 return;
503
504         table = &priv->static_config.tables[BLK_IDX_VL_FORWARDING_PARAMS];
505         vl_fwd_params = table->entries;
506
507         l2_fwd_params->part_spc[0] -= SJA1105_VL_FRAME_MEMORY;
508         vl_fwd_params->partspc[0] = SJA1105_VL_FRAME_MEMORY;
509 }
510
511 static int sja1105_init_general_params(struct sja1105_private *priv)
512 {
513         struct sja1105_general_params_entry default_general_params = {
514                 /* Allow dynamic changing of the mirror port */
515                 .mirr_ptacu = true,
516                 .switchid = priv->ds->index,
517                 /* Priority queue for link-local management frames
518                  * (both ingress to and egress from CPU - PTP, STP etc)
519                  */
520                 .hostprio = 7,
521                 .mac_fltres1 = SJA1105_LINKLOCAL_FILTER_A,
522                 .mac_flt1    = SJA1105_LINKLOCAL_FILTER_A_MASK,
523                 .incl_srcpt1 = false,
524                 .send_meta1  = false,
525                 .mac_fltres0 = SJA1105_LINKLOCAL_FILTER_B,
526                 .mac_flt0    = SJA1105_LINKLOCAL_FILTER_B_MASK,
527                 .incl_srcpt0 = false,
528                 .send_meta0  = false,
529                 /* The destination for traffic matching mac_fltres1 and
530                  * mac_fltres0 on all ports except host_port. Such traffic
531                  * receieved on host_port itself would be dropped, except
532                  * by installing a temporary 'management route'
533                  */
534                 .host_port = dsa_upstream_port(priv->ds, 0),
535                 /* Default to an invalid value */
536                 .mirr_port = SJA1105_NUM_PORTS,
537                 /* Link-local traffic received on casc_port will be forwarded
538                  * to host_port without embedding the source port and device ID
539                  * info in the destination MAC address (presumably because it
540                  * is a cascaded port and a downstream SJA switch already did
541                  * that). Default to an invalid port (to disable the feature)
542                  * and overwrite this if we find any DSA (cascaded) ports.
543                  */
544                 .casc_port = SJA1105_NUM_PORTS,
545                 /* No TTEthernet */
546                 .vllupformat = SJA1105_VL_FORMAT_PSFP,
547                 .vlmarker = 0,
548                 .vlmask = 0,
549                 /* Only update correctionField for 1-step PTP (L2 transport) */
550                 .ignore2stf = 0,
551                 /* Forcefully disable VLAN filtering by telling
552                  * the switch that VLAN has a different EtherType.
553                  */
554                 .tpid = ETH_P_SJA1105,
555                 .tpid2 = ETH_P_SJA1105,
556         };
557         struct sja1105_table *table;
558
559         table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
560
561         if (table->entry_count) {
562                 kfree(table->entries);
563                 table->entry_count = 0;
564         }
565
566         table->entries = kcalloc(SJA1105_MAX_GENERAL_PARAMS_COUNT,
567                                  table->ops->unpacked_entry_size, GFP_KERNEL);
568         if (!table->entries)
569                 return -ENOMEM;
570
571         table->entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT;
572
573         /* This table only has a single entry */
574         ((struct sja1105_general_params_entry *)table->entries)[0] =
575                                 default_general_params;
576
577         return 0;
578 }
579
580 static int sja1105_init_avb_params(struct sja1105_private *priv)
581 {
582         struct sja1105_avb_params_entry *avb;
583         struct sja1105_table *table;
584
585         table = &priv->static_config.tables[BLK_IDX_AVB_PARAMS];
586
587         /* Discard previous AVB Parameters Table */
588         if (table->entry_count) {
589                 kfree(table->entries);
590                 table->entry_count = 0;
591         }
592
593         table->entries = kcalloc(SJA1105_MAX_AVB_PARAMS_COUNT,
594                                  table->ops->unpacked_entry_size, GFP_KERNEL);
595         if (!table->entries)
596                 return -ENOMEM;
597
598         table->entry_count = SJA1105_MAX_AVB_PARAMS_COUNT;
599
600         avb = table->entries;
601
602         /* Configure the MAC addresses for meta frames */
603         avb->destmeta = SJA1105_META_DMAC;
604         avb->srcmeta  = SJA1105_META_SMAC;
605         /* On P/Q/R/S, configure the direction of the PTP_CLK pin as input by
606          * default. This is because there might be boards with a hardware
607          * layout where enabling the pin as output might cause an electrical
608          * clash. On E/T the pin is always an output, which the board designers
609          * probably already knew, so even if there are going to be electrical
610          * issues, there's nothing we can do.
611          */
612         avb->cas_master = false;
613
614         return 0;
615 }
616
617 /* The L2 policing table is 2-stage. The table is looked up for each frame
618  * according to the ingress port, whether it was broadcast or not, and the
619  * classified traffic class (given by VLAN PCP). This portion of the lookup is
620  * fixed, and gives access to the SHARINDX, an indirection register pointing
621  * within the policing table itself, which is used to resolve the policer that
622  * will be used for this frame.
623  *
624  *  Stage 1                              Stage 2
625  * +------------+--------+              +---------------------------------+
626  * |Port 0 TC 0 |SHARINDX|              | Policer 0: Rate, Burst, MTU     |
627  * +------------+--------+              +---------------------------------+
628  * |Port 0 TC 1 |SHARINDX|              | Policer 1: Rate, Burst, MTU     |
629  * +------------+--------+              +---------------------------------+
630  *    ...                               | Policer 2: Rate, Burst, MTU     |
631  * +------------+--------+              +---------------------------------+
632  * |Port 0 TC 7 |SHARINDX|              | Policer 3: Rate, Burst, MTU     |
633  * +------------+--------+              +---------------------------------+
634  * |Port 1 TC 0 |SHARINDX|              | Policer 4: Rate, Burst, MTU     |
635  * +------------+--------+              +---------------------------------+
636  *    ...                               | Policer 5: Rate, Burst, MTU     |
637  * +------------+--------+              +---------------------------------+
638  * |Port 1 TC 7 |SHARINDX|              | Policer 6: Rate, Burst, MTU     |
639  * +------------+--------+              +---------------------------------+
640  *    ...                               | Policer 7: Rate, Burst, MTU     |
641  * +------------+--------+              +---------------------------------+
642  * |Port 4 TC 7 |SHARINDX|                 ...
643  * +------------+--------+
644  * |Port 0 BCAST|SHARINDX|                 ...
645  * +------------+--------+
646  * |Port 1 BCAST|SHARINDX|                 ...
647  * +------------+--------+
648  *    ...                                  ...
649  * +------------+--------+              +---------------------------------+
650  * |Port 4 BCAST|SHARINDX|              | Policer 44: Rate, Burst, MTU    |
651  * +------------+--------+              +---------------------------------+
652  *
653  * In this driver, we shall use policers 0-4 as statically alocated port
654  * (matchall) policers. So we need to make the SHARINDX for all lookups
655  * corresponding to this ingress port (8 VLAN PCP lookups and 1 broadcast
656  * lookup) equal.
657  * The remaining policers (40) shall be dynamically allocated for flower
658  * policers, where the key is either vlan_prio or dst_mac ff:ff:ff:ff:ff:ff.
659  */
660 #define SJA1105_RATE_MBPS(speed) (((speed) * 64000) / 1000)
661
662 static int sja1105_init_l2_policing(struct sja1105_private *priv)
663 {
664         struct sja1105_l2_policing_entry *policing;
665         struct sja1105_table *table;
666         int port, tc;
667
668         table = &priv->static_config.tables[BLK_IDX_L2_POLICING];
669
670         /* Discard previous L2 Policing Table */
671         if (table->entry_count) {
672                 kfree(table->entries);
673                 table->entry_count = 0;
674         }
675
676         table->entries = kcalloc(SJA1105_MAX_L2_POLICING_COUNT,
677                                  table->ops->unpacked_entry_size, GFP_KERNEL);
678         if (!table->entries)
679                 return -ENOMEM;
680
681         table->entry_count = SJA1105_MAX_L2_POLICING_COUNT;
682
683         policing = table->entries;
684
685         /* Setup shared indices for the matchall policers */
686         for (port = 0; port < SJA1105_NUM_PORTS; port++) {
687                 int bcast = (SJA1105_NUM_PORTS * SJA1105_NUM_TC) + port;
688
689                 for (tc = 0; tc < SJA1105_NUM_TC; tc++)
690                         policing[port * SJA1105_NUM_TC + tc].sharindx = port;
691
692                 policing[bcast].sharindx = port;
693         }
694
695         /* Setup the matchall policer parameters */
696         for (port = 0; port < SJA1105_NUM_PORTS; port++) {
697                 int mtu = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN;
698
699                 if (dsa_is_cpu_port(priv->ds, port))
700                         mtu += VLAN_HLEN;
701
702                 policing[port].smax = 65535; /* Burst size in bytes */
703                 policing[port].rate = SJA1105_RATE_MBPS(1000);
704                 policing[port].maxlen = mtu;
705                 policing[port].partition = 0;
706         }
707
708         return 0;
709 }
710
711 static int sja1105_static_config_load(struct sja1105_private *priv,
712                                       struct sja1105_dt_port *ports)
713 {
714         int rc;
715
716         sja1105_static_config_free(&priv->static_config);
717         rc = sja1105_static_config_init(&priv->static_config,
718                                         priv->info->static_ops,
719                                         priv->info->device_id);
720         if (rc)
721                 return rc;
722
723         /* Build static configuration */
724         rc = sja1105_init_mac_settings(priv);
725         if (rc < 0)
726                 return rc;
727         rc = sja1105_init_mii_settings(priv, ports);
728         if (rc < 0)
729                 return rc;
730         rc = sja1105_init_static_fdb(priv);
731         if (rc < 0)
732                 return rc;
733         rc = sja1105_init_static_vlan(priv);
734         if (rc < 0)
735                 return rc;
736         rc = sja1105_init_l2_lookup_params(priv);
737         if (rc < 0)
738                 return rc;
739         rc = sja1105_init_l2_forwarding(priv);
740         if (rc < 0)
741                 return rc;
742         rc = sja1105_init_l2_forwarding_params(priv);
743         if (rc < 0)
744                 return rc;
745         rc = sja1105_init_l2_policing(priv);
746         if (rc < 0)
747                 return rc;
748         rc = sja1105_init_general_params(priv);
749         if (rc < 0)
750                 return rc;
751         rc = sja1105_init_avb_params(priv);
752         if (rc < 0)
753                 return rc;
754
755         /* Send initial configuration to hardware via SPI */
756         return sja1105_static_config_upload(priv);
757 }
758
759 static int sja1105_parse_rgmii_delays(struct sja1105_private *priv,
760                                       const struct sja1105_dt_port *ports)
761 {
762         int i;
763
764         for (i = 0; i < SJA1105_NUM_PORTS; i++) {
765                 if (ports[i].role == XMII_MAC)
766                         continue;
767
768                 if (ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_RXID ||
769                     ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
770                         priv->rgmii_rx_delay[i] = true;
771
772                 if (ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_TXID ||
773                     ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
774                         priv->rgmii_tx_delay[i] = true;
775
776                 if ((priv->rgmii_rx_delay[i] || priv->rgmii_tx_delay[i]) &&
777                      !priv->info->setup_rgmii_delay)
778                         return -EINVAL;
779         }
780         return 0;
781 }
782
783 static int sja1105_parse_ports_node(struct sja1105_private *priv,
784                                     struct sja1105_dt_port *ports,
785                                     struct device_node *ports_node)
786 {
787         struct device *dev = &priv->spidev->dev;
788         struct device_node *child;
789
790         for_each_available_child_of_node(ports_node, child) {
791                 struct device_node *phy_node;
792                 phy_interface_t phy_mode;
793                 u32 index;
794                 int err;
795
796                 /* Get switch port number from DT */
797                 if (of_property_read_u32(child, "reg", &index) < 0) {
798                         dev_err(dev, "Port number not defined in device tree "
799                                 "(property \"reg\")\n");
800                         of_node_put(child);
801                         return -ENODEV;
802                 }
803
804                 /* Get PHY mode from DT */
805                 err = of_get_phy_mode(child, &phy_mode);
806                 if (err) {
807                         dev_err(dev, "Failed to read phy-mode or "
808                                 "phy-interface-type property for port %d\n",
809                                 index);
810                         of_node_put(child);
811                         return -ENODEV;
812                 }
813                 ports[index].phy_mode = phy_mode;
814
815                 phy_node = of_parse_phandle(child, "phy-handle", 0);
816                 if (!phy_node) {
817                         if (!of_phy_is_fixed_link(child)) {
818                                 dev_err(dev, "phy-handle or fixed-link "
819                                         "properties missing!\n");
820                                 of_node_put(child);
821                                 return -ENODEV;
822                         }
823                         /* phy-handle is missing, but fixed-link isn't.
824                          * So it's a fixed link. Default to PHY role.
825                          */
826                         ports[index].role = XMII_PHY;
827                 } else {
828                         /* phy-handle present => put port in MAC role */
829                         ports[index].role = XMII_MAC;
830                         of_node_put(phy_node);
831                 }
832
833                 /* The MAC/PHY role can be overridden with explicit bindings */
834                 if (of_property_read_bool(child, "sja1105,role-mac"))
835                         ports[index].role = XMII_MAC;
836                 else if (of_property_read_bool(child, "sja1105,role-phy"))
837                         ports[index].role = XMII_PHY;
838         }
839
840         return 0;
841 }
842
843 static int sja1105_parse_dt(struct sja1105_private *priv,
844                             struct sja1105_dt_port *ports)
845 {
846         struct device *dev = &priv->spidev->dev;
847         struct device_node *switch_node = dev->of_node;
848         struct device_node *ports_node;
849         int rc;
850
851         ports_node = of_get_child_by_name(switch_node, "ports");
852         if (!ports_node) {
853                 dev_err(dev, "Incorrect bindings: absent \"ports\" node\n");
854                 return -ENODEV;
855         }
856
857         rc = sja1105_parse_ports_node(priv, ports, ports_node);
858         of_node_put(ports_node);
859
860         return rc;
861 }
862
863 static int sja1105_sgmii_read(struct sja1105_private *priv, int pcs_reg)
864 {
865         const struct sja1105_regs *regs = priv->info->regs;
866         u32 val;
867         int rc;
868
869         rc = sja1105_xfer_u32(priv, SPI_READ, regs->sgmii + pcs_reg, &val,
870                               NULL);
871         if (rc < 0)
872                 return rc;
873
874         return val;
875 }
876
877 static int sja1105_sgmii_write(struct sja1105_private *priv, int pcs_reg,
878                                u16 pcs_val)
879 {
880         const struct sja1105_regs *regs = priv->info->regs;
881         u32 val = pcs_val;
882         int rc;
883
884         rc = sja1105_xfer_u32(priv, SPI_WRITE, regs->sgmii + pcs_reg, &val,
885                               NULL);
886         if (rc < 0)
887                 return rc;
888
889         return val;
890 }
891
892 static void sja1105_sgmii_pcs_config(struct sja1105_private *priv,
893                                      bool an_enabled, bool an_master)
894 {
895         u16 ac = SJA1105_AC_AUTONEG_MODE_SGMII;
896
897         /* DIGITAL_CONTROL_1: Enable vendor-specific MMD1, allow the PHY to
898          * stop the clock during LPI mode, make the MAC reconfigure
899          * autonomously after PCS autoneg is done, flush the internal FIFOs.
900          */
901         sja1105_sgmii_write(priv, SJA1105_DC1, SJA1105_DC1_EN_VSMMD1 |
902                                                SJA1105_DC1_CLOCK_STOP_EN |
903                                                SJA1105_DC1_MAC_AUTO_SW |
904                                                SJA1105_DC1_INIT);
905         /* DIGITAL_CONTROL_2: No polarity inversion for TX and RX lanes */
906         sja1105_sgmii_write(priv, SJA1105_DC2, SJA1105_DC2_TX_POL_INV_DISABLE);
907         /* AUTONEG_CONTROL: Use SGMII autoneg */
908         if (an_master)
909                 ac |= SJA1105_AC_PHY_MODE | SJA1105_AC_SGMII_LINK;
910         sja1105_sgmii_write(priv, SJA1105_AC, ac);
911         /* BASIC_CONTROL: enable in-band AN now, if requested. Otherwise,
912          * sja1105_sgmii_pcs_force_speed must be called later for the link
913          * to become operational.
914          */
915         if (an_enabled)
916                 sja1105_sgmii_write(priv, MII_BMCR,
917                                     BMCR_ANENABLE | BMCR_ANRESTART);
918 }
919
920 static void sja1105_sgmii_pcs_force_speed(struct sja1105_private *priv,
921                                           int speed)
922 {
923         int pcs_speed;
924
925         switch (speed) {
926         case SPEED_1000:
927                 pcs_speed = BMCR_SPEED1000;
928                 break;
929         case SPEED_100:
930                 pcs_speed = BMCR_SPEED100;
931                 break;
932         case SPEED_10:
933                 pcs_speed = BMCR_SPEED10;
934                 break;
935         default:
936                 dev_err(priv->ds->dev, "Invalid speed %d\n", speed);
937                 return;
938         }
939         sja1105_sgmii_write(priv, MII_BMCR, pcs_speed | BMCR_FULLDPLX);
940 }
941
942 /* Convert link speed from SJA1105 to ethtool encoding */
943 static int sja1105_speed[] = {
944         [SJA1105_SPEED_AUTO]            = SPEED_UNKNOWN,
945         [SJA1105_SPEED_10MBPS]          = SPEED_10,
946         [SJA1105_SPEED_100MBPS]         = SPEED_100,
947         [SJA1105_SPEED_1000MBPS]        = SPEED_1000,
948 };
949
950 /* Set link speed in the MAC configuration for a specific port. */
951 static int sja1105_adjust_port_config(struct sja1105_private *priv, int port,
952                                       int speed_mbps)
953 {
954         struct sja1105_xmii_params_entry *mii;
955         struct sja1105_mac_config_entry *mac;
956         struct device *dev = priv->ds->dev;
957         sja1105_phy_interface_t phy_mode;
958         sja1105_speed_t speed;
959         int rc;
960
961         /* On P/Q/R/S, one can read from the device via the MAC reconfiguration
962          * tables. On E/T, MAC reconfig tables are not readable, only writable.
963          * We have to *know* what the MAC looks like.  For the sake of keeping
964          * the code common, we'll use the static configuration tables as a
965          * reasonable approximation for both E/T and P/Q/R/S.
966          */
967         mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
968         mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries;
969
970         switch (speed_mbps) {
971         case SPEED_UNKNOWN:
972                 /* PHYLINK called sja1105_mac_config() to inform us about
973                  * the state->interface, but AN has not completed and the
974                  * speed is not yet valid. UM10944.pdf says that setting
975                  * SJA1105_SPEED_AUTO at runtime disables the port, so that is
976                  * ok for power consumption in case AN will never complete -
977                  * otherwise PHYLINK should come back with a new update.
978                  */
979                 speed = SJA1105_SPEED_AUTO;
980                 break;
981         case SPEED_10:
982                 speed = SJA1105_SPEED_10MBPS;
983                 break;
984         case SPEED_100:
985                 speed = SJA1105_SPEED_100MBPS;
986                 break;
987         case SPEED_1000:
988                 speed = SJA1105_SPEED_1000MBPS;
989                 break;
990         default:
991                 dev_err(dev, "Invalid speed %iMbps\n", speed_mbps);
992                 return -EINVAL;
993         }
994
995         /* Overwrite SJA1105_SPEED_AUTO from the static MAC configuration
996          * table, since this will be used for the clocking setup, and we no
997          * longer need to store it in the static config (already told hardware
998          * we want auto during upload phase).
999          * Actually for the SGMII port, the MAC is fixed at 1 Gbps and
1000          * we need to configure the PCS only (if even that).
1001          */
1002         if (sja1105_supports_sgmii(priv, port))
1003                 mac[port].speed = SJA1105_SPEED_1000MBPS;
1004         else
1005                 mac[port].speed = speed;
1006
1007         /* Write to the dynamic reconfiguration tables */
1008         rc = sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
1009                                           &mac[port], true);
1010         if (rc < 0) {
1011                 dev_err(dev, "Failed to write MAC config: %d\n", rc);
1012                 return rc;
1013         }
1014
1015         /* Reconfigure the PLLs for the RGMII interfaces (required 125 MHz at
1016          * gigabit, 25 MHz at 100 Mbps and 2.5 MHz at 10 Mbps). For MII and
1017          * RMII no change of the clock setup is required. Actually, changing
1018          * the clock setup does interrupt the clock signal for a certain time
1019          * which causes trouble for all PHYs relying on this signal.
1020          */
1021         phy_mode = mii->xmii_mode[port];
1022         if (phy_mode != XMII_MODE_RGMII)
1023                 return 0;
1024
1025         return sja1105_clocking_setup_port(priv, port);
1026 }
1027
1028 /* The SJA1105 MAC programming model is through the static config (the xMII
1029  * Mode table cannot be dynamically reconfigured), and we have to program
1030  * that early (earlier than PHYLINK calls us, anyway).
1031  * So just error out in case the connected PHY attempts to change the initial
1032  * system interface MII protocol from what is defined in the DT, at least for
1033  * now.
1034  */
1035 static bool sja1105_phy_mode_mismatch(struct sja1105_private *priv, int port,
1036                                       phy_interface_t interface)
1037 {
1038         struct sja1105_xmii_params_entry *mii;
1039         sja1105_phy_interface_t phy_mode;
1040
1041         mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries;
1042         phy_mode = mii->xmii_mode[port];
1043
1044         switch (interface) {
1045         case PHY_INTERFACE_MODE_MII:
1046                 return (phy_mode != XMII_MODE_MII);
1047         case PHY_INTERFACE_MODE_RMII:
1048                 return (phy_mode != XMII_MODE_RMII);
1049         case PHY_INTERFACE_MODE_RGMII:
1050         case PHY_INTERFACE_MODE_RGMII_ID:
1051         case PHY_INTERFACE_MODE_RGMII_RXID:
1052         case PHY_INTERFACE_MODE_RGMII_TXID:
1053                 return (phy_mode != XMII_MODE_RGMII);
1054         case PHY_INTERFACE_MODE_SGMII:
1055                 return (phy_mode != XMII_MODE_SGMII);
1056         default:
1057                 return true;
1058         }
1059 }
1060
1061 static void sja1105_mac_config(struct dsa_switch *ds, int port,
1062                                unsigned int mode,
1063                                const struct phylink_link_state *state)
1064 {
1065         struct sja1105_private *priv = ds->priv;
1066         bool is_sgmii = sja1105_supports_sgmii(priv, port);
1067
1068         if (sja1105_phy_mode_mismatch(priv, port, state->interface)) {
1069                 dev_err(ds->dev, "Changing PHY mode to %s not supported!\n",
1070                         phy_modes(state->interface));
1071                 return;
1072         }
1073
1074         if (phylink_autoneg_inband(mode) && !is_sgmii) {
1075                 dev_err(ds->dev, "In-band AN not supported!\n");
1076                 return;
1077         }
1078
1079         if (is_sgmii)
1080                 sja1105_sgmii_pcs_config(priv, phylink_autoneg_inband(mode),
1081                                          false);
1082 }
1083
1084 static void sja1105_mac_link_down(struct dsa_switch *ds, int port,
1085                                   unsigned int mode,
1086                                   phy_interface_t interface)
1087 {
1088         sja1105_inhibit_tx(ds->priv, BIT(port), true);
1089 }
1090
1091 static void sja1105_mac_link_up(struct dsa_switch *ds, int port,
1092                                 unsigned int mode,
1093                                 phy_interface_t interface,
1094                                 struct phy_device *phydev,
1095                                 int speed, int duplex,
1096                                 bool tx_pause, bool rx_pause)
1097 {
1098         struct sja1105_private *priv = ds->priv;
1099
1100         sja1105_adjust_port_config(priv, port, speed);
1101
1102         if (sja1105_supports_sgmii(priv, port) && !phylink_autoneg_inband(mode))
1103                 sja1105_sgmii_pcs_force_speed(priv, speed);
1104
1105         sja1105_inhibit_tx(priv, BIT(port), false);
1106 }
1107
1108 static void sja1105_phylink_validate(struct dsa_switch *ds, int port,
1109                                      unsigned long *supported,
1110                                      struct phylink_link_state *state)
1111 {
1112         /* Construct a new mask which exhaustively contains all link features
1113          * supported by the MAC, and then apply that (logical AND) to what will
1114          * be sent to the PHY for "marketing".
1115          */
1116         __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
1117         struct sja1105_private *priv = ds->priv;
1118         struct sja1105_xmii_params_entry *mii;
1119
1120         mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries;
1121
1122         /* include/linux/phylink.h says:
1123          *     When @state->interface is %PHY_INTERFACE_MODE_NA, phylink
1124          *     expects the MAC driver to return all supported link modes.
1125          */
1126         if (state->interface != PHY_INTERFACE_MODE_NA &&
1127             sja1105_phy_mode_mismatch(priv, port, state->interface)) {
1128                 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
1129                 return;
1130         }
1131
1132         /* The MAC does not support pause frames, and also doesn't
1133          * support half-duplex traffic modes.
1134          */
1135         phylink_set(mask, Autoneg);
1136         phylink_set(mask, MII);
1137         phylink_set(mask, 10baseT_Full);
1138         phylink_set(mask, 100baseT_Full);
1139         phylink_set(mask, 100baseT1_Full);
1140         if (mii->xmii_mode[port] == XMII_MODE_RGMII ||
1141             mii->xmii_mode[port] == XMII_MODE_SGMII)
1142                 phylink_set(mask, 1000baseT_Full);
1143
1144         bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
1145         bitmap_and(state->advertising, state->advertising, mask,
1146                    __ETHTOOL_LINK_MODE_MASK_NBITS);
1147 }
1148
1149 static int sja1105_mac_pcs_get_state(struct dsa_switch *ds, int port,
1150                                      struct phylink_link_state *state)
1151 {
1152         struct sja1105_private *priv = ds->priv;
1153         int ais;
1154
1155         /* Read the vendor-specific AUTONEG_INTR_STATUS register */
1156         ais = sja1105_sgmii_read(priv, SJA1105_AIS);
1157         if (ais < 0)
1158                 return ais;
1159
1160         switch (SJA1105_AIS_SPEED(ais)) {
1161         case 0:
1162                 state->speed = SPEED_10;
1163                 break;
1164         case 1:
1165                 state->speed = SPEED_100;
1166                 break;
1167         case 2:
1168                 state->speed = SPEED_1000;
1169                 break;
1170         default:
1171                 dev_err(ds->dev, "Invalid SGMII PCS speed %lu\n",
1172                         SJA1105_AIS_SPEED(ais));
1173         }
1174         state->duplex = SJA1105_AIS_DUPLEX_MODE(ais);
1175         state->an_complete = SJA1105_AIS_COMPLETE(ais);
1176         state->link = SJA1105_AIS_LINK_STATUS(ais);
1177
1178         return 0;
1179 }
1180
1181 static int
1182 sja1105_find_static_fdb_entry(struct sja1105_private *priv, int port,
1183                               const struct sja1105_l2_lookup_entry *requested)
1184 {
1185         struct sja1105_l2_lookup_entry *l2_lookup;
1186         struct sja1105_table *table;
1187         int i;
1188
1189         table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP];
1190         l2_lookup = table->entries;
1191
1192         for (i = 0; i < table->entry_count; i++)
1193                 if (l2_lookup[i].macaddr == requested->macaddr &&
1194                     l2_lookup[i].vlanid == requested->vlanid &&
1195                     l2_lookup[i].destports & BIT(port))
1196                         return i;
1197
1198         return -1;
1199 }
1200
1201 /* We want FDB entries added statically through the bridge command to persist
1202  * across switch resets, which are a common thing during normal SJA1105
1203  * operation. So we have to back them up in the static configuration tables
1204  * and hence apply them on next static config upload... yay!
1205  */
1206 static int
1207 sja1105_static_fdb_change(struct sja1105_private *priv, int port,
1208                           const struct sja1105_l2_lookup_entry *requested,
1209                           bool keep)
1210 {
1211         struct sja1105_l2_lookup_entry *l2_lookup;
1212         struct sja1105_table *table;
1213         int rc, match;
1214
1215         table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP];
1216
1217         match = sja1105_find_static_fdb_entry(priv, port, requested);
1218         if (match < 0) {
1219                 /* Can't delete a missing entry. */
1220                 if (!keep)
1221                         return 0;
1222
1223                 /* No match => new entry */
1224                 rc = sja1105_table_resize(table, table->entry_count + 1);
1225                 if (rc)
1226                         return rc;
1227
1228                 match = table->entry_count - 1;
1229         }
1230
1231         /* Assign pointer after the resize (it may be new memory) */
1232         l2_lookup = table->entries;
1233
1234         /* We have a match.
1235          * If the job was to add this FDB entry, it's already done (mostly
1236          * anyway, since the port forwarding mask may have changed, case in
1237          * which we update it).
1238          * Otherwise we have to delete it.
1239          */
1240         if (keep) {
1241                 l2_lookup[match] = *requested;
1242                 return 0;
1243         }
1244
1245         /* To remove, the strategy is to overwrite the element with
1246          * the last one, and then reduce the array size by 1
1247          */
1248         l2_lookup[match] = l2_lookup[table->entry_count - 1];
1249         return sja1105_table_resize(table, table->entry_count - 1);
1250 }
1251
1252 /* First-generation switches have a 4-way set associative TCAM that
1253  * holds the FDB entries. An FDB index spans from 0 to 1023 and is comprised of
1254  * a "bin" (grouping of 4 entries) and a "way" (an entry within a bin).
1255  * For the placement of a newly learnt FDB entry, the switch selects the bin
1256  * based on a hash function, and the way within that bin incrementally.
1257  */
1258 static int sja1105et_fdb_index(int bin, int way)
1259 {
1260         return bin * SJA1105ET_FDB_BIN_SIZE + way;
1261 }
1262
1263 static int sja1105et_is_fdb_entry_in_bin(struct sja1105_private *priv, int bin,
1264                                          const u8 *addr, u16 vid,
1265                                          struct sja1105_l2_lookup_entry *match,
1266                                          int *last_unused)
1267 {
1268         int way;
1269
1270         for (way = 0; way < SJA1105ET_FDB_BIN_SIZE; way++) {
1271                 struct sja1105_l2_lookup_entry l2_lookup = {0};
1272                 int index = sja1105et_fdb_index(bin, way);
1273
1274                 /* Skip unused entries, optionally marking them
1275                  * into the return value
1276                  */
1277                 if (sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
1278                                                 index, &l2_lookup)) {
1279                         if (last_unused)
1280                                 *last_unused = way;
1281                         continue;
1282                 }
1283
1284                 if (l2_lookup.macaddr == ether_addr_to_u64(addr) &&
1285                     l2_lookup.vlanid == vid) {
1286                         if (match)
1287                                 *match = l2_lookup;
1288                         return way;
1289                 }
1290         }
1291         /* Return an invalid entry index if not found */
1292         return -1;
1293 }
1294
1295 int sja1105et_fdb_add(struct dsa_switch *ds, int port,
1296                       const unsigned char *addr, u16 vid)
1297 {
1298         struct sja1105_l2_lookup_entry l2_lookup = {0};
1299         struct sja1105_private *priv = ds->priv;
1300         struct device *dev = ds->dev;
1301         int last_unused = -1;
1302         int bin, way, rc;
1303
1304         bin = sja1105et_fdb_hash(priv, addr, vid);
1305
1306         way = sja1105et_is_fdb_entry_in_bin(priv, bin, addr, vid,
1307                                             &l2_lookup, &last_unused);
1308         if (way >= 0) {
1309                 /* We have an FDB entry. Is our port in the destination
1310                  * mask? If yes, we need to do nothing. If not, we need
1311                  * to rewrite the entry by adding this port to it.
1312                  */
1313                 if (l2_lookup.destports & BIT(port))
1314                         return 0;
1315                 l2_lookup.destports |= BIT(port);
1316         } else {
1317                 int index = sja1105et_fdb_index(bin, way);
1318
1319                 /* We don't have an FDB entry. We construct a new one and
1320                  * try to find a place for it within the FDB table.
1321                  */
1322                 l2_lookup.macaddr = ether_addr_to_u64(addr);
1323                 l2_lookup.destports = BIT(port);
1324                 l2_lookup.vlanid = vid;
1325
1326                 if (last_unused >= 0) {
1327                         way = last_unused;
1328                 } else {
1329                         /* Bin is full, need to evict somebody.
1330                          * Choose victim at random. If you get these messages
1331                          * often, you may need to consider changing the
1332                          * distribution function:
1333                          * static_config[BLK_IDX_L2_LOOKUP_PARAMS].entries->poly
1334                          */
1335                         get_random_bytes(&way, sizeof(u8));
1336                         way %= SJA1105ET_FDB_BIN_SIZE;
1337                         dev_warn(dev, "Warning, FDB bin %d full while adding entry for %pM. Evicting entry %u.\n",
1338                                  bin, addr, way);
1339                         /* Evict entry */
1340                         sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
1341                                                      index, NULL, false);
1342                 }
1343         }
1344         l2_lookup.index = sja1105et_fdb_index(bin, way);
1345
1346         rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
1347                                           l2_lookup.index, &l2_lookup,
1348                                           true);
1349         if (rc < 0)
1350                 return rc;
1351
1352         return sja1105_static_fdb_change(priv, port, &l2_lookup, true);
1353 }
1354
1355 int sja1105et_fdb_del(struct dsa_switch *ds, int port,
1356                       const unsigned char *addr, u16 vid)
1357 {
1358         struct sja1105_l2_lookup_entry l2_lookup = {0};
1359         struct sja1105_private *priv = ds->priv;
1360         int index, bin, way, rc;
1361         bool keep;
1362
1363         bin = sja1105et_fdb_hash(priv, addr, vid);
1364         way = sja1105et_is_fdb_entry_in_bin(priv, bin, addr, vid,
1365                                             &l2_lookup, NULL);
1366         if (way < 0)
1367                 return 0;
1368         index = sja1105et_fdb_index(bin, way);
1369
1370         /* We have an FDB entry. Is our port in the destination mask? If yes,
1371          * we need to remove it. If the resulting port mask becomes empty, we
1372          * need to completely evict the FDB entry.
1373          * Otherwise we just write it back.
1374          */
1375         l2_lookup.destports &= ~BIT(port);
1376
1377         if (l2_lookup.destports)
1378                 keep = true;
1379         else
1380                 keep = false;
1381
1382         rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
1383                                           index, &l2_lookup, keep);
1384         if (rc < 0)
1385                 return rc;
1386
1387         return sja1105_static_fdb_change(priv, port, &l2_lookup, keep);
1388 }
1389
1390 int sja1105pqrs_fdb_add(struct dsa_switch *ds, int port,
1391                         const unsigned char *addr, u16 vid)
1392 {
1393         struct sja1105_l2_lookup_entry l2_lookup = {0};
1394         struct sja1105_private *priv = ds->priv;
1395         int rc, i;
1396
1397         /* Search for an existing entry in the FDB table */
1398         l2_lookup.macaddr = ether_addr_to_u64(addr);
1399         l2_lookup.vlanid = vid;
1400         l2_lookup.iotag = SJA1105_S_TAG;
1401         l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0);
1402         if (priv->vlan_state != SJA1105_VLAN_UNAWARE) {
1403                 l2_lookup.mask_vlanid = VLAN_VID_MASK;
1404                 l2_lookup.mask_iotag = BIT(0);
1405         } else {
1406                 l2_lookup.mask_vlanid = 0;
1407                 l2_lookup.mask_iotag = 0;
1408         }
1409         l2_lookup.destports = BIT(port);
1410
1411         rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
1412                                          SJA1105_SEARCH, &l2_lookup);
1413         if (rc == 0) {
1414                 /* Found and this port is already in the entry's
1415                  * port mask => job done
1416                  */
1417                 if (l2_lookup.destports & BIT(port))
1418                         return 0;
1419                 /* l2_lookup.index is populated by the switch in case it
1420                  * found something.
1421                  */
1422                 l2_lookup.destports |= BIT(port);
1423                 goto skip_finding_an_index;
1424         }
1425
1426         /* Not found, so try to find an unused spot in the FDB.
1427          * This is slightly inefficient because the strategy is knock-knock at
1428          * every possible position from 0 to 1023.
1429          */
1430         for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) {
1431                 rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
1432                                                  i, NULL);
1433                 if (rc < 0)
1434                         break;
1435         }
1436         if (i == SJA1105_MAX_L2_LOOKUP_COUNT) {
1437                 dev_err(ds->dev, "FDB is full, cannot add entry.\n");
1438                 return -EINVAL;
1439         }
1440         l2_lookup.lockeds = true;
1441         l2_lookup.index = i;
1442
1443 skip_finding_an_index:
1444         rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
1445                                           l2_lookup.index, &l2_lookup,
1446                                           true);
1447         if (rc < 0)
1448                 return rc;
1449
1450         return sja1105_static_fdb_change(priv, port, &l2_lookup, true);
1451 }
1452
1453 int sja1105pqrs_fdb_del(struct dsa_switch *ds, int port,
1454                         const unsigned char *addr, u16 vid)
1455 {
1456         struct sja1105_l2_lookup_entry l2_lookup = {0};
1457         struct sja1105_private *priv = ds->priv;
1458         bool keep;
1459         int rc;
1460
1461         l2_lookup.macaddr = ether_addr_to_u64(addr);
1462         l2_lookup.vlanid = vid;
1463         l2_lookup.iotag = SJA1105_S_TAG;
1464         l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0);
1465         if (priv->vlan_state != SJA1105_VLAN_UNAWARE) {
1466                 l2_lookup.mask_vlanid = VLAN_VID_MASK;
1467                 l2_lookup.mask_iotag = BIT(0);
1468         } else {
1469                 l2_lookup.mask_vlanid = 0;
1470                 l2_lookup.mask_iotag = 0;
1471         }
1472         l2_lookup.destports = BIT(port);
1473
1474         rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
1475                                          SJA1105_SEARCH, &l2_lookup);
1476         if (rc < 0)
1477                 return 0;
1478
1479         l2_lookup.destports &= ~BIT(port);
1480
1481         /* Decide whether we remove just this port from the FDB entry,
1482          * or if we remove it completely.
1483          */
1484         if (l2_lookup.destports)
1485                 keep = true;
1486         else
1487                 keep = false;
1488
1489         rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
1490                                           l2_lookup.index, &l2_lookup, keep);
1491         if (rc < 0)
1492                 return rc;
1493
1494         return sja1105_static_fdb_change(priv, port, &l2_lookup, keep);
1495 }
1496
1497 static int sja1105_fdb_add(struct dsa_switch *ds, int port,
1498                            const unsigned char *addr, u16 vid)
1499 {
1500         struct sja1105_private *priv = ds->priv;
1501
1502         /* dsa_8021q is in effect when the bridge's vlan_filtering isn't,
1503          * so the switch still does some VLAN processing internally.
1504          * But Shared VLAN Learning (SVL) is also active, and it will take
1505          * care of autonomous forwarding between the unique pvid's of each
1506          * port.  Here we just make sure that users can't add duplicate FDB
1507          * entries when in this mode - the actual VID doesn't matter except
1508          * for what gets printed in 'bridge fdb show'.  In the case of zero,
1509          * no VID gets printed at all.
1510          */
1511         if (priv->vlan_state != SJA1105_VLAN_FILTERING_FULL)
1512                 vid = 0;
1513
1514         return priv->info->fdb_add_cmd(ds, port, addr, vid);
1515 }
1516
1517 static int sja1105_fdb_del(struct dsa_switch *ds, int port,
1518                            const unsigned char *addr, u16 vid)
1519 {
1520         struct sja1105_private *priv = ds->priv;
1521
1522         if (priv->vlan_state != SJA1105_VLAN_FILTERING_FULL)
1523                 vid = 0;
1524
1525         return priv->info->fdb_del_cmd(ds, port, addr, vid);
1526 }
1527
1528 static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
1529                             dsa_fdb_dump_cb_t *cb, void *data)
1530 {
1531         struct sja1105_private *priv = ds->priv;
1532         struct device *dev = ds->dev;
1533         int i;
1534
1535         for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) {
1536                 struct sja1105_l2_lookup_entry l2_lookup = {0};
1537                 u8 macaddr[ETH_ALEN];
1538                 int rc;
1539
1540                 rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
1541                                                  i, &l2_lookup);
1542                 /* No fdb entry at i, not an issue */
1543                 if (rc == -ENOENT)
1544                         continue;
1545                 if (rc) {
1546                         dev_err(dev, "Failed to dump FDB: %d\n", rc);
1547                         return rc;
1548                 }
1549
1550                 /* FDB dump callback is per port. This means we have to
1551                  * disregard a valid entry if it's not for this port, even if
1552                  * only to revisit it later. This is inefficient because the
1553                  * 1024-sized FDB table needs to be traversed 4 times through
1554                  * SPI during a 'bridge fdb show' command.
1555                  */
1556                 if (!(l2_lookup.destports & BIT(port)))
1557                         continue;
1558
1559                 /* We need to hide the FDB entry for unknown multicast */
1560                 if (l2_lookup.macaddr == SJA1105_UNKNOWN_MULTICAST &&
1561                     l2_lookup.mask_macaddr == SJA1105_UNKNOWN_MULTICAST)
1562                         continue;
1563
1564                 u64_to_ether_addr(l2_lookup.macaddr, macaddr);
1565
1566                 /* We need to hide the dsa_8021q VLANs from the user. */
1567                 if (priv->vlan_state == SJA1105_VLAN_UNAWARE)
1568                         l2_lookup.vlanid = 0;
1569                 cb(macaddr, l2_lookup.vlanid, l2_lookup.lockeds, data);
1570         }
1571         return 0;
1572 }
1573
1574 static int sja1105_mdb_add(struct dsa_switch *ds, int port,
1575                            const struct switchdev_obj_port_mdb *mdb)
1576 {
1577         return sja1105_fdb_add(ds, port, mdb->addr, mdb->vid);
1578 }
1579
1580 static int sja1105_mdb_del(struct dsa_switch *ds, int port,
1581                            const struct switchdev_obj_port_mdb *mdb)
1582 {
1583         return sja1105_fdb_del(ds, port, mdb->addr, mdb->vid);
1584 }
1585
1586 /* Common function for unicast and broadcast flood configuration.
1587  * Flooding is configured between each {ingress, egress} port pair, and since
1588  * the bridge's semantics are those of "egress flooding", it means we must
1589  * enable flooding towards this port from all ingress ports that are in the
1590  * same forwarding domain.
1591  */
1592 static int sja1105_manage_flood_domains(struct sja1105_private *priv)
1593 {
1594         struct sja1105_l2_forwarding_entry *l2_fwd;
1595         struct dsa_switch *ds = priv->ds;
1596         int from, to, rc;
1597
1598         l2_fwd = priv->static_config.tables[BLK_IDX_L2_FORWARDING].entries;
1599
1600         for (from = 0; from < ds->num_ports; from++) {
1601                 u64 fl_domain = 0, bc_domain = 0;
1602
1603                 for (to = 0; to < priv->ds->num_ports; to++) {
1604                         if (!sja1105_can_forward(l2_fwd, from, to))
1605                                 continue;
1606
1607                         if (priv->ucast_egress_floods & BIT(to))
1608                                 fl_domain |= BIT(to);
1609                         if (priv->bcast_egress_floods & BIT(to))
1610                                 bc_domain |= BIT(to);
1611                 }
1612
1613                 /* Nothing changed, nothing to do */
1614                 if (l2_fwd[from].fl_domain == fl_domain &&
1615                     l2_fwd[from].bc_domain == bc_domain)
1616                         continue;
1617
1618                 l2_fwd[from].fl_domain = fl_domain;
1619                 l2_fwd[from].bc_domain = bc_domain;
1620
1621                 rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_FORWARDING,
1622                                                   from, &l2_fwd[from], true);
1623                 if (rc < 0)
1624                         return rc;
1625         }
1626
1627         return 0;
1628 }
1629
1630 static int sja1105_bridge_member(struct dsa_switch *ds, int port,
1631                                  struct net_device *br, bool member)
1632 {
1633         struct sja1105_l2_forwarding_entry *l2_fwd;
1634         struct sja1105_private *priv = ds->priv;
1635         int i, rc;
1636
1637         l2_fwd = priv->static_config.tables[BLK_IDX_L2_FORWARDING].entries;
1638
1639         for (i = 0; i < SJA1105_NUM_PORTS; i++) {
1640                 /* Add this port to the forwarding matrix of the
1641                  * other ports in the same bridge, and viceversa.
1642                  */
1643                 if (!dsa_is_user_port(ds, i))
1644                         continue;
1645                 /* For the ports already under the bridge, only one thing needs
1646                  * to be done, and that is to add this port to their
1647                  * reachability domain. So we can perform the SPI write for
1648                  * them immediately. However, for this port itself (the one
1649                  * that is new to the bridge), we need to add all other ports
1650                  * to its reachability domain. So we do that incrementally in
1651                  * this loop, and perform the SPI write only at the end, once
1652                  * the domain contains all other bridge ports.
1653                  */
1654                 if (i == port)
1655                         continue;
1656                 if (dsa_to_port(ds, i)->bridge_dev != br)
1657                         continue;
1658                 sja1105_port_allow_traffic(l2_fwd, i, port, member);
1659                 sja1105_port_allow_traffic(l2_fwd, port, i, member);
1660
1661                 rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_FORWARDING,
1662                                                   i, &l2_fwd[i], true);
1663                 if (rc < 0)
1664                         return rc;
1665         }
1666
1667         rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_FORWARDING,
1668                                           port, &l2_fwd[port], true);
1669         if (rc)
1670                 return rc;
1671
1672         return sja1105_manage_flood_domains(priv);
1673 }
1674
1675 static void sja1105_bridge_stp_state_set(struct dsa_switch *ds, int port,
1676                                          u8 state)
1677 {
1678         struct sja1105_private *priv = ds->priv;
1679         struct sja1105_mac_config_entry *mac;
1680
1681         mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
1682
1683         switch (state) {
1684         case BR_STATE_DISABLED:
1685         case BR_STATE_BLOCKING:
1686                 /* From UM10944 description of DRPDTAG (why put this there?):
1687                  * "Management traffic flows to the port regardless of the state
1688                  * of the INGRESS flag". So BPDUs are still be allowed to pass.
1689                  * At the moment no difference between DISABLED and BLOCKING.
1690                  */
1691                 mac[port].ingress   = false;
1692                 mac[port].egress    = false;
1693                 mac[port].dyn_learn = false;
1694                 break;
1695         case BR_STATE_LISTENING:
1696                 mac[port].ingress   = true;
1697                 mac[port].egress    = false;
1698                 mac[port].dyn_learn = false;
1699                 break;
1700         case BR_STATE_LEARNING:
1701                 mac[port].ingress   = true;
1702                 mac[port].egress    = false;
1703                 mac[port].dyn_learn = !!(priv->learn_ena & BIT(port));
1704                 break;
1705         case BR_STATE_FORWARDING:
1706                 mac[port].ingress   = true;
1707                 mac[port].egress    = true;
1708                 mac[port].dyn_learn = !!(priv->learn_ena & BIT(port));
1709                 break;
1710         default:
1711                 dev_err(ds->dev, "invalid STP state: %d\n", state);
1712                 return;
1713         }
1714
1715         sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
1716                                      &mac[port], true);
1717 }
1718
1719 static int sja1105_bridge_join(struct dsa_switch *ds, int port,
1720                                struct net_device *br)
1721 {
1722         return sja1105_bridge_member(ds, port, br, true);
1723 }
1724
1725 static void sja1105_bridge_leave(struct dsa_switch *ds, int port,
1726                                  struct net_device *br)
1727 {
1728         sja1105_bridge_member(ds, port, br, false);
1729 }
1730
1731 #define BYTES_PER_KBIT (1000LL / 8)
1732
1733 static int sja1105_find_unused_cbs_shaper(struct sja1105_private *priv)
1734 {
1735         int i;
1736
1737         for (i = 0; i < priv->info->num_cbs_shapers; i++)
1738                 if (!priv->cbs[i].idle_slope && !priv->cbs[i].send_slope)
1739                         return i;
1740
1741         return -1;
1742 }
1743
1744 static int sja1105_delete_cbs_shaper(struct sja1105_private *priv, int port,
1745                                      int prio)
1746 {
1747         int i;
1748
1749         for (i = 0; i < priv->info->num_cbs_shapers; i++) {
1750                 struct sja1105_cbs_entry *cbs = &priv->cbs[i];
1751
1752                 if (cbs->port == port && cbs->prio == prio) {
1753                         memset(cbs, 0, sizeof(*cbs));
1754                         return sja1105_dynamic_config_write(priv, BLK_IDX_CBS,
1755                                                             i, cbs, true);
1756                 }
1757         }
1758
1759         return 0;
1760 }
1761
1762 static int sja1105_setup_tc_cbs(struct dsa_switch *ds, int port,
1763                                 struct tc_cbs_qopt_offload *offload)
1764 {
1765         struct sja1105_private *priv = ds->priv;
1766         struct sja1105_cbs_entry *cbs;
1767         int index;
1768
1769         if (!offload->enable)
1770                 return sja1105_delete_cbs_shaper(priv, port, offload->queue);
1771
1772         index = sja1105_find_unused_cbs_shaper(priv);
1773         if (index < 0)
1774                 return -ENOSPC;
1775
1776         cbs = &priv->cbs[index];
1777         cbs->port = port;
1778         cbs->prio = offload->queue;
1779         /* locredit and sendslope are negative by definition. In hardware,
1780          * positive values must be provided, and the negative sign is implicit.
1781          */
1782         cbs->credit_hi = offload->hicredit;
1783         cbs->credit_lo = abs(offload->locredit);
1784         /* User space is in kbits/sec, hardware in bytes/sec */
1785         cbs->idle_slope = offload->idleslope * BYTES_PER_KBIT;
1786         cbs->send_slope = abs(offload->sendslope * BYTES_PER_KBIT);
1787         /* Convert the negative values from 64-bit 2's complement
1788          * to 32-bit 2's complement (for the case of 0x80000000 whose
1789          * negative is still negative).
1790          */
1791         cbs->credit_lo &= GENMASK_ULL(31, 0);
1792         cbs->send_slope &= GENMASK_ULL(31, 0);
1793
1794         return sja1105_dynamic_config_write(priv, BLK_IDX_CBS, index, cbs,
1795                                             true);
1796 }
1797
1798 static int sja1105_reload_cbs(struct sja1105_private *priv)
1799 {
1800         int rc = 0, i;
1801
1802         for (i = 0; i < priv->info->num_cbs_shapers; i++) {
1803                 struct sja1105_cbs_entry *cbs = &priv->cbs[i];
1804
1805                 if (!cbs->idle_slope && !cbs->send_slope)
1806                         continue;
1807
1808                 rc = sja1105_dynamic_config_write(priv, BLK_IDX_CBS, i, cbs,
1809                                                   true);
1810                 if (rc)
1811                         break;
1812         }
1813
1814         return rc;
1815 }
1816
1817 static const char * const sja1105_reset_reasons[] = {
1818         [SJA1105_VLAN_FILTERING] = "VLAN filtering",
1819         [SJA1105_RX_HWTSTAMPING] = "RX timestamping",
1820         [SJA1105_AGEING_TIME] = "Ageing time",
1821         [SJA1105_SCHEDULING] = "Time-aware scheduling",
1822         [SJA1105_BEST_EFFORT_POLICING] = "Best-effort policing",
1823         [SJA1105_VIRTUAL_LINKS] = "Virtual links",
1824 };
1825
1826 /* For situations where we need to change a setting at runtime that is only
1827  * available through the static configuration, resetting the switch in order
1828  * to upload the new static config is unavoidable. Back up the settings we
1829  * modify at runtime (currently only MAC) and restore them after uploading,
1830  * such that this operation is relatively seamless.
1831  */
1832 int sja1105_static_config_reload(struct sja1105_private *priv,
1833                                  enum sja1105_reset_reason reason)
1834 {
1835         struct ptp_system_timestamp ptp_sts_before;
1836         struct ptp_system_timestamp ptp_sts_after;
1837         struct sja1105_mac_config_entry *mac;
1838         int speed_mbps[SJA1105_NUM_PORTS];
1839         struct dsa_switch *ds = priv->ds;
1840         s64 t1, t2, t3, t4;
1841         s64 t12, t34;
1842         u16 bmcr = 0;
1843         int rc, i;
1844         s64 now;
1845
1846         mutex_lock(&priv->mgmt_lock);
1847
1848         mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
1849
1850         /* Back up the dynamic link speed changed by sja1105_adjust_port_config
1851          * in order to temporarily restore it to SJA1105_SPEED_AUTO - which the
1852          * switch wants to see in the static config in order to allow us to
1853          * change it through the dynamic interface later.
1854          */
1855         for (i = 0; i < SJA1105_NUM_PORTS; i++) {
1856                 speed_mbps[i] = sja1105_speed[mac[i].speed];
1857                 mac[i].speed = SJA1105_SPEED_AUTO;
1858         }
1859
1860         if (sja1105_supports_sgmii(priv, SJA1105_SGMII_PORT))
1861                 bmcr = sja1105_sgmii_read(priv, MII_BMCR);
1862
1863         /* No PTP operations can run right now */
1864         mutex_lock(&priv->ptp_data.lock);
1865
1866         rc = __sja1105_ptp_gettimex(ds, &now, &ptp_sts_before);
1867         if (rc < 0)
1868                 goto out_unlock_ptp;
1869
1870         /* Reset switch and send updated static configuration */
1871         rc = sja1105_static_config_upload(priv);
1872         if (rc < 0)
1873                 goto out_unlock_ptp;
1874
1875         rc = __sja1105_ptp_settime(ds, 0, &ptp_sts_after);
1876         if (rc < 0)
1877                 goto out_unlock_ptp;
1878
1879         t1 = timespec64_to_ns(&ptp_sts_before.pre_ts);
1880         t2 = timespec64_to_ns(&ptp_sts_before.post_ts);
1881         t3 = timespec64_to_ns(&ptp_sts_after.pre_ts);
1882         t4 = timespec64_to_ns(&ptp_sts_after.post_ts);
1883         /* Mid point, corresponds to pre-reset PTPCLKVAL */
1884         t12 = t1 + (t2 - t1) / 2;
1885         /* Mid point, corresponds to post-reset PTPCLKVAL, aka 0 */
1886         t34 = t3 + (t4 - t3) / 2;
1887         /* Advance PTPCLKVAL by the time it took since its readout */
1888         now += (t34 - t12);
1889
1890         __sja1105_ptp_adjtime(ds, now);
1891
1892 out_unlock_ptp:
1893         mutex_unlock(&priv->ptp_data.lock);
1894
1895         dev_info(priv->ds->dev,
1896                  "Reset switch and programmed static config. Reason: %s\n",
1897                  sja1105_reset_reasons[reason]);
1898
1899         /* Configure the CGU (PLLs) for MII and RMII PHYs.
1900          * For these interfaces there is no dynamic configuration
1901          * needed, since PLLs have same settings at all speeds.
1902          */
1903         rc = sja1105_clocking_setup(priv);
1904         if (rc < 0)
1905                 goto out;
1906
1907         for (i = 0; i < SJA1105_NUM_PORTS; i++) {
1908                 rc = sja1105_adjust_port_config(priv, i, speed_mbps[i]);
1909                 if (rc < 0)
1910                         goto out;
1911         }
1912
1913         if (sja1105_supports_sgmii(priv, SJA1105_SGMII_PORT)) {
1914                 bool an_enabled = !!(bmcr & BMCR_ANENABLE);
1915
1916                 sja1105_sgmii_pcs_config(priv, an_enabled, false);
1917
1918                 if (!an_enabled) {
1919                         int speed = SPEED_UNKNOWN;
1920
1921                         if (bmcr & BMCR_SPEED1000)
1922                                 speed = SPEED_1000;
1923                         else if (bmcr & BMCR_SPEED100)
1924                                 speed = SPEED_100;
1925                         else
1926                                 speed = SPEED_10;
1927
1928                         sja1105_sgmii_pcs_force_speed(priv, speed);
1929                 }
1930         }
1931
1932         rc = sja1105_reload_cbs(priv);
1933         if (rc < 0)
1934                 goto out;
1935 out:
1936         mutex_unlock(&priv->mgmt_lock);
1937
1938         return rc;
1939 }
1940
1941 static int sja1105_pvid_apply(struct sja1105_private *priv, int port, u16 pvid)
1942 {
1943         struct sja1105_mac_config_entry *mac;
1944
1945         mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
1946
1947         mac[port].vlanid = pvid;
1948
1949         return sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
1950                                            &mac[port], true);
1951 }
1952
1953 static int sja1105_crosschip_bridge_join(struct dsa_switch *ds,
1954                                          int tree_index, int sw_index,
1955                                          int other_port, struct net_device *br)
1956 {
1957         struct dsa_switch *other_ds = dsa_switch_find(tree_index, sw_index);
1958         struct sja1105_private *other_priv = other_ds->priv;
1959         struct sja1105_private *priv = ds->priv;
1960         int port, rc;
1961
1962         if (other_ds->ops != &sja1105_switch_ops)
1963                 return 0;
1964
1965         for (port = 0; port < ds->num_ports; port++) {
1966                 if (!dsa_is_user_port(ds, port))
1967                         continue;
1968                 if (dsa_to_port(ds, port)->bridge_dev != br)
1969                         continue;
1970
1971                 rc = dsa_8021q_crosschip_bridge_join(priv->dsa_8021q_ctx,
1972                                                      port,
1973                                                      other_priv->dsa_8021q_ctx,
1974                                                      other_port);
1975                 if (rc)
1976                         return rc;
1977
1978                 rc = dsa_8021q_crosschip_bridge_join(other_priv->dsa_8021q_ctx,
1979                                                      other_port,
1980                                                      priv->dsa_8021q_ctx,
1981                                                      port);
1982                 if (rc)
1983                         return rc;
1984         }
1985
1986         return 0;
1987 }
1988
1989 static void sja1105_crosschip_bridge_leave(struct dsa_switch *ds,
1990                                            int tree_index, int sw_index,
1991                                            int other_port,
1992                                            struct net_device *br)
1993 {
1994         struct dsa_switch *other_ds = dsa_switch_find(tree_index, sw_index);
1995         struct sja1105_private *other_priv = other_ds->priv;
1996         struct sja1105_private *priv = ds->priv;
1997         int port;
1998
1999         if (other_ds->ops != &sja1105_switch_ops)
2000                 return;
2001
2002         for (port = 0; port < ds->num_ports; port++) {
2003                 if (!dsa_is_user_port(ds, port))
2004                         continue;
2005                 if (dsa_to_port(ds, port)->bridge_dev != br)
2006                         continue;
2007
2008                 dsa_8021q_crosschip_bridge_leave(priv->dsa_8021q_ctx, port,
2009                                                  other_priv->dsa_8021q_ctx,
2010                                                  other_port);
2011
2012                 dsa_8021q_crosschip_bridge_leave(other_priv->dsa_8021q_ctx,
2013                                                  other_port,
2014                                                  priv->dsa_8021q_ctx, port);
2015         }
2016 }
2017
2018 static int sja1105_setup_8021q_tagging(struct dsa_switch *ds, bool enabled)
2019 {
2020         struct sja1105_private *priv = ds->priv;
2021         int rc;
2022
2023         rc = dsa_8021q_setup(priv->dsa_8021q_ctx, enabled);
2024         if (rc)
2025                 return rc;
2026
2027         dev_info(ds->dev, "%s switch tagging\n",
2028                  enabled ? "Enabled" : "Disabled");
2029         return 0;
2030 }
2031
2032 static enum dsa_tag_protocol
2033 sja1105_get_tag_protocol(struct dsa_switch *ds, int port,
2034                          enum dsa_tag_protocol mp)
2035 {
2036         return DSA_TAG_PROTO_SJA1105;
2037 }
2038
2039 static int sja1105_find_free_subvlan(u16 *subvlan_map, bool pvid)
2040 {
2041         int subvlan;
2042
2043         if (pvid)
2044                 return 0;
2045
2046         for (subvlan = 1; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++)
2047                 if (subvlan_map[subvlan] == VLAN_N_VID)
2048                         return subvlan;
2049
2050         return -1;
2051 }
2052
2053 static int sja1105_find_subvlan(u16 *subvlan_map, u16 vid)
2054 {
2055         int subvlan;
2056
2057         for (subvlan = 0; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++)
2058                 if (subvlan_map[subvlan] == vid)
2059                         return subvlan;
2060
2061         return -1;
2062 }
2063
2064 static int sja1105_find_committed_subvlan(struct sja1105_private *priv,
2065                                           int port, u16 vid)
2066 {
2067         struct sja1105_port *sp = &priv->ports[port];
2068
2069         return sja1105_find_subvlan(sp->subvlan_map, vid);
2070 }
2071
2072 static void sja1105_init_subvlan_map(u16 *subvlan_map)
2073 {
2074         int subvlan;
2075
2076         for (subvlan = 0; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++)
2077                 subvlan_map[subvlan] = VLAN_N_VID;
2078 }
2079
2080 static void sja1105_commit_subvlan_map(struct sja1105_private *priv, int port,
2081                                        u16 *subvlan_map)
2082 {
2083         struct sja1105_port *sp = &priv->ports[port];
2084         int subvlan;
2085
2086         for (subvlan = 0; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++)
2087                 sp->subvlan_map[subvlan] = subvlan_map[subvlan];
2088 }
2089
2090 static int sja1105_is_vlan_configured(struct sja1105_private *priv, u16 vid)
2091 {
2092         struct sja1105_vlan_lookup_entry *vlan;
2093         int count, i;
2094
2095         vlan = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entries;
2096         count = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entry_count;
2097
2098         for (i = 0; i < count; i++)
2099                 if (vlan[i].vlanid == vid)
2100                         return i;
2101
2102         /* Return an invalid entry index if not found */
2103         return -1;
2104 }
2105
2106 static int
2107 sja1105_find_retagging_entry(struct sja1105_retagging_entry *retagging,
2108                              int count, int from_port, u16 from_vid,
2109                              u16 to_vid)
2110 {
2111         int i;
2112
2113         for (i = 0; i < count; i++)
2114                 if (retagging[i].ing_port == BIT(from_port) &&
2115                     retagging[i].vlan_ing == from_vid &&
2116                     retagging[i].vlan_egr == to_vid)
2117                         return i;
2118
2119         /* Return an invalid entry index if not found */
2120         return -1;
2121 }
2122
2123 static int sja1105_commit_vlans(struct sja1105_private *priv,
2124                                 struct sja1105_vlan_lookup_entry *new_vlan,
2125                                 struct sja1105_retagging_entry *new_retagging,
2126                                 int num_retagging)
2127 {
2128         struct sja1105_retagging_entry *retagging;
2129         struct sja1105_vlan_lookup_entry *vlan;
2130         struct sja1105_table *table;
2131         int num_vlans = 0;
2132         int rc, i, k = 0;
2133
2134         /* VLAN table */
2135         table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
2136         vlan = table->entries;
2137
2138         for (i = 0; i < VLAN_N_VID; i++) {
2139                 int match = sja1105_is_vlan_configured(priv, i);
2140
2141                 if (new_vlan[i].vlanid != VLAN_N_VID)
2142                         num_vlans++;
2143
2144                 if (new_vlan[i].vlanid == VLAN_N_VID && match >= 0) {
2145                         /* Was there before, no longer is. Delete */
2146                         dev_dbg(priv->ds->dev, "Deleting VLAN %d\n", i);
2147                         rc = sja1105_dynamic_config_write(priv,
2148                                                           BLK_IDX_VLAN_LOOKUP,
2149                                                           i, &vlan[match], false);
2150                         if (rc < 0)
2151                                 return rc;
2152                 } else if (new_vlan[i].vlanid != VLAN_N_VID) {
2153                         /* Nothing changed, don't do anything */
2154                         if (match >= 0 &&
2155                             vlan[match].vlanid == new_vlan[i].vlanid &&
2156                             vlan[match].tag_port == new_vlan[i].tag_port &&
2157                             vlan[match].vlan_bc == new_vlan[i].vlan_bc &&
2158                             vlan[match].vmemb_port == new_vlan[i].vmemb_port)
2159                                 continue;
2160                         /* Update entry */
2161                         dev_dbg(priv->ds->dev, "Updating VLAN %d\n", i);
2162                         rc = sja1105_dynamic_config_write(priv,
2163                                                           BLK_IDX_VLAN_LOOKUP,
2164                                                           i, &new_vlan[i],
2165                                                           true);
2166                         if (rc < 0)
2167                                 return rc;
2168                 }
2169         }
2170
2171         if (table->entry_count)
2172                 kfree(table->entries);
2173
2174         table->entries = kcalloc(num_vlans, table->ops->unpacked_entry_size,
2175                                  GFP_KERNEL);
2176         if (!table->entries)
2177                 return -ENOMEM;
2178
2179         table->entry_count = num_vlans;
2180         vlan = table->entries;
2181
2182         for (i = 0; i < VLAN_N_VID; i++) {
2183                 if (new_vlan[i].vlanid == VLAN_N_VID)
2184                         continue;
2185                 vlan[k++] = new_vlan[i];
2186         }
2187
2188         /* VLAN Retagging Table */
2189         table = &priv->static_config.tables[BLK_IDX_RETAGGING];
2190         retagging = table->entries;
2191
2192         for (i = 0; i < table->entry_count; i++) {
2193                 rc = sja1105_dynamic_config_write(priv, BLK_IDX_RETAGGING,
2194                                                   i, &retagging[i], false);
2195                 if (rc)
2196                         return rc;
2197         }
2198
2199         if (table->entry_count)
2200                 kfree(table->entries);
2201
2202         table->entries = kcalloc(num_retagging, table->ops->unpacked_entry_size,
2203                                  GFP_KERNEL);
2204         if (!table->entries)
2205                 return -ENOMEM;
2206
2207         table->entry_count = num_retagging;
2208         retagging = table->entries;
2209
2210         for (i = 0; i < num_retagging; i++) {
2211                 retagging[i] = new_retagging[i];
2212
2213                 /* Update entry */
2214                 rc = sja1105_dynamic_config_write(priv, BLK_IDX_RETAGGING,
2215                                                   i, &retagging[i], true);
2216                 if (rc < 0)
2217                         return rc;
2218         }
2219
2220         return 0;
2221 }
2222
2223 struct sja1105_crosschip_vlan {
2224         struct list_head list;
2225         u16 vid;
2226         bool untagged;
2227         int port;
2228         int other_port;
2229         struct dsa_8021q_context *other_ctx;
2230 };
2231
2232 struct sja1105_crosschip_switch {
2233         struct list_head list;
2234         struct dsa_8021q_context *other_ctx;
2235 };
2236
2237 static int sja1105_commit_pvid(struct sja1105_private *priv)
2238 {
2239         struct sja1105_bridge_vlan *v;
2240         struct list_head *vlan_list;
2241         int rc = 0;
2242
2243         if (priv->vlan_state == SJA1105_VLAN_FILTERING_FULL)
2244                 vlan_list = &priv->bridge_vlans;
2245         else
2246                 vlan_list = &priv->dsa_8021q_vlans;
2247
2248         list_for_each_entry(v, vlan_list, list) {
2249                 if (v->pvid) {
2250                         rc = sja1105_pvid_apply(priv, v->port, v->vid);
2251                         if (rc)
2252                                 break;
2253                 }
2254         }
2255
2256         return rc;
2257 }
2258
2259 static int
2260 sja1105_build_bridge_vlans(struct sja1105_private *priv,
2261                            struct sja1105_vlan_lookup_entry *new_vlan)
2262 {
2263         struct sja1105_bridge_vlan *v;
2264
2265         if (priv->vlan_state == SJA1105_VLAN_UNAWARE)
2266                 return 0;
2267
2268         list_for_each_entry(v, &priv->bridge_vlans, list) {
2269                 int match = v->vid;
2270
2271                 new_vlan[match].vlanid = v->vid;
2272                 new_vlan[match].vmemb_port |= BIT(v->port);
2273                 new_vlan[match].vlan_bc |= BIT(v->port);
2274                 if (!v->untagged)
2275                         new_vlan[match].tag_port |= BIT(v->port);
2276         }
2277
2278         return 0;
2279 }
2280
2281 static int
2282 sja1105_build_dsa_8021q_vlans(struct sja1105_private *priv,
2283                               struct sja1105_vlan_lookup_entry *new_vlan)
2284 {
2285         struct sja1105_bridge_vlan *v;
2286
2287         if (priv->vlan_state == SJA1105_VLAN_FILTERING_FULL)
2288                 return 0;
2289
2290         list_for_each_entry(v, &priv->dsa_8021q_vlans, list) {
2291                 int match = v->vid;
2292
2293                 new_vlan[match].vlanid = v->vid;
2294                 new_vlan[match].vmemb_port |= BIT(v->port);
2295                 new_vlan[match].vlan_bc |= BIT(v->port);
2296                 if (!v->untagged)
2297                         new_vlan[match].tag_port |= BIT(v->port);
2298         }
2299
2300         return 0;
2301 }
2302
2303 static int sja1105_build_subvlans(struct sja1105_private *priv,
2304                                   u16 subvlan_map[][DSA_8021Q_N_SUBVLAN],
2305                                   struct sja1105_vlan_lookup_entry *new_vlan,
2306                                   struct sja1105_retagging_entry *new_retagging,
2307                                   int *num_retagging)
2308 {
2309         struct sja1105_bridge_vlan *v;
2310         int k = *num_retagging;
2311
2312         if (priv->vlan_state != SJA1105_VLAN_BEST_EFFORT)
2313                 return 0;
2314
2315         list_for_each_entry(v, &priv->bridge_vlans, list) {
2316                 int upstream = dsa_upstream_port(priv->ds, v->port);
2317                 int match, subvlan;
2318                 u16 rx_vid;
2319
2320                 /* Only sub-VLANs on user ports need to be applied.
2321                  * Bridge VLANs also include VLANs added automatically
2322                  * by DSA on the CPU port.
2323                  */
2324                 if (!dsa_is_user_port(priv->ds, v->port))
2325                         continue;
2326
2327                 subvlan = sja1105_find_subvlan(subvlan_map[v->port],
2328                                                v->vid);
2329                 if (subvlan < 0) {
2330                         subvlan = sja1105_find_free_subvlan(subvlan_map[v->port],
2331                                                             v->pvid);
2332                         if (subvlan < 0) {
2333                                 dev_err(priv->ds->dev, "No more free subvlans\n");
2334                                 return -ENOSPC;
2335                         }
2336                 }
2337
2338                 rx_vid = dsa_8021q_rx_vid_subvlan(priv->ds, v->port, subvlan);
2339
2340                 /* @v->vid on @v->port needs to be retagged to @rx_vid
2341                  * on @upstream. Assume @v->vid on @v->port and on
2342                  * @upstream was already configured by the previous
2343                  * iteration over bridge_vlans.
2344                  */
2345                 match = rx_vid;
2346                 new_vlan[match].vlanid = rx_vid;
2347                 new_vlan[match].vmemb_port |= BIT(v->port);
2348                 new_vlan[match].vmemb_port |= BIT(upstream);
2349                 new_vlan[match].vlan_bc |= BIT(v->port);
2350                 new_vlan[match].vlan_bc |= BIT(upstream);
2351                 /* The "untagged" flag is set the same as for the
2352                  * original VLAN
2353                  */
2354                 if (!v->untagged)
2355                         new_vlan[match].tag_port |= BIT(v->port);
2356                 /* But it's always tagged towards the CPU */
2357                 new_vlan[match].tag_port |= BIT(upstream);
2358
2359                 /* The Retagging Table generates packet *clones* with
2360                  * the new VLAN. This is a very odd hardware quirk
2361                  * which we need to suppress by dropping the original
2362                  * packet.
2363                  * Deny egress of the original VLAN towards the CPU
2364                  * port. This will force the switch to drop it, and
2365                  * we'll see only the retagged packets.
2366                  */
2367                 match = v->vid;
2368                 new_vlan[match].vlan_bc &= ~BIT(upstream);
2369
2370                 /* And the retagging itself */
2371                 new_retagging[k].vlan_ing = v->vid;
2372                 new_retagging[k].vlan_egr = rx_vid;
2373                 new_retagging[k].ing_port = BIT(v->port);
2374                 new_retagging[k].egr_port = BIT(upstream);
2375                 if (k++ == SJA1105_MAX_RETAGGING_COUNT) {
2376                         dev_err(priv->ds->dev, "No more retagging rules\n");
2377                         return -ENOSPC;
2378                 }
2379
2380                 subvlan_map[v->port][subvlan] = v->vid;
2381         }
2382
2383         *num_retagging = k;
2384
2385         return 0;
2386 }
2387
2388 /* Sadly, in crosschip scenarios where the CPU port is also the link to another
2389  * switch, we should retag backwards (the dsa_8021q vid to the original vid) on
2390  * the CPU port of neighbour switches.
2391  */
2392 static int
2393 sja1105_build_crosschip_subvlans(struct sja1105_private *priv,
2394                                  struct sja1105_vlan_lookup_entry *new_vlan,
2395                                  struct sja1105_retagging_entry *new_retagging,
2396                                  int *num_retagging)
2397 {
2398         struct sja1105_crosschip_vlan *tmp, *pos;
2399         struct dsa_8021q_crosschip_link *c;
2400         struct sja1105_bridge_vlan *v, *w;
2401         struct list_head crosschip_vlans;
2402         int k = *num_retagging;
2403         int rc = 0;
2404
2405         if (priv->vlan_state != SJA1105_VLAN_BEST_EFFORT)
2406                 return 0;
2407
2408         INIT_LIST_HEAD(&crosschip_vlans);
2409
2410         list_for_each_entry(c, &priv->dsa_8021q_ctx->crosschip_links, list) {
2411                 struct sja1105_private *other_priv = c->other_ctx->ds->priv;
2412
2413                 if (other_priv->vlan_state == SJA1105_VLAN_FILTERING_FULL)
2414                         continue;
2415
2416                 /* Crosschip links are also added to the CPU ports.
2417                  * Ignore those.
2418                  */
2419                 if (!dsa_is_user_port(priv->ds, c->port))
2420                         continue;
2421                 if (!dsa_is_user_port(c->other_ctx->ds, c->other_port))
2422                         continue;
2423
2424                 /* Search for VLANs on the remote port */
2425                 list_for_each_entry(v, &other_priv->bridge_vlans, list) {
2426                         bool already_added = false;
2427                         bool we_have_it = false;
2428
2429                         if (v->port != c->other_port)
2430                                 continue;
2431
2432                         /* If @v is a pvid on @other_ds, it does not need
2433                          * re-retagging, because its SVL field is 0 and we
2434                          * already allow that, via the dsa_8021q crosschip
2435                          * links.
2436                          */
2437                         if (v->pvid)
2438                                 continue;
2439
2440                         /* Search for the VLAN on our local port */
2441                         list_for_each_entry(w, &priv->bridge_vlans, list) {
2442                                 if (w->port == c->port && w->vid == v->vid) {
2443                                         we_have_it = true;
2444                                         break;
2445                                 }
2446                         }
2447
2448                         if (!we_have_it)
2449                                 continue;
2450
2451                         list_for_each_entry(tmp, &crosschip_vlans, list) {
2452                                 if (tmp->vid == v->vid &&
2453                                     tmp->untagged == v->untagged &&
2454                                     tmp->port == c->port &&
2455                                     tmp->other_port == v->port &&
2456                                     tmp->other_ctx == c->other_ctx) {
2457                                         already_added = true;
2458                                         break;
2459                                 }
2460                         }
2461
2462                         if (already_added)
2463                                 continue;
2464
2465                         tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
2466                         if (!tmp) {
2467                                 dev_err(priv->ds->dev, "Failed to allocate memory\n");
2468                                 rc = -ENOMEM;
2469                                 goto out;
2470                         }
2471                         tmp->vid = v->vid;
2472                         tmp->port = c->port;
2473                         tmp->other_port = v->port;
2474                         tmp->other_ctx = c->other_ctx;
2475                         tmp->untagged = v->untagged;
2476                         list_add(&tmp->list, &crosschip_vlans);
2477                 }
2478         }
2479
2480         list_for_each_entry(tmp, &crosschip_vlans, list) {
2481                 struct sja1105_private *other_priv = tmp->other_ctx->ds->priv;
2482                 int upstream = dsa_upstream_port(priv->ds, tmp->port);
2483                 int match, subvlan;
2484                 u16 rx_vid;
2485
2486                 subvlan = sja1105_find_committed_subvlan(other_priv,
2487                                                          tmp->other_port,
2488                                                          tmp->vid);
2489                 /* If this happens, it's a bug. The neighbour switch does not
2490                  * have a subvlan for tmp->vid on tmp->other_port, but it
2491                  * should, since we already checked for its vlan_state.
2492                  */
2493                 if (WARN_ON(subvlan < 0)) {
2494                         rc = -EINVAL;
2495                         goto out;
2496                 }
2497
2498                 rx_vid = dsa_8021q_rx_vid_subvlan(tmp->other_ctx->ds,
2499                                                   tmp->other_port,
2500                                                   subvlan);
2501
2502                 /* The @rx_vid retagged from @tmp->vid on
2503                  * {@tmp->other_ds, @tmp->other_port} needs to be
2504                  * re-retagged to @tmp->vid on the way back to us.
2505                  *
2506                  * Assume the original @tmp->vid is already configured
2507                  * on this local switch, otherwise we wouldn't be
2508                  * retagging its subvlan on the other switch in the
2509                  * first place. We just need to add a reverse retagging
2510                  * rule for @rx_vid and install @rx_vid on our ports.
2511                  */
2512                 match = rx_vid;
2513                 new_vlan[match].vlanid = rx_vid;
2514                 new_vlan[match].vmemb_port |= BIT(tmp->port);
2515                 new_vlan[match].vmemb_port |= BIT(upstream);
2516                 /* The "untagged" flag is set the same as for the
2517                  * original VLAN. And towards the CPU, it doesn't
2518                  * really matter, because @rx_vid will only receive
2519                  * traffic on that port. For consistency with other dsa_8021q
2520                  * VLANs, we'll keep the CPU port tagged.
2521                  */
2522                 if (!tmp->untagged)
2523                         new_vlan[match].tag_port |= BIT(tmp->port);
2524                 new_vlan[match].tag_port |= BIT(upstream);
2525                 /* Deny egress of @rx_vid towards our front-panel port.
2526                  * This will force the switch to drop it, and we'll see
2527                  * only the re-retagged packets (having the original,
2528                  * pre-initial-retagging, VLAN @tmp->vid).
2529                  */
2530                 new_vlan[match].vlan_bc &= ~BIT(tmp->port);
2531
2532                 /* On reverse retagging, the same ingress VLAN goes to multiple
2533                  * ports. So we have an opportunity to create composite rules
2534                  * to not waste the limited space in the retagging table.
2535                  */
2536                 k = sja1105_find_retagging_entry(new_retagging, *num_retagging,
2537                                                  upstream, rx_vid, tmp->vid);
2538                 if (k < 0) {
2539                         if (*num_retagging == SJA1105_MAX_RETAGGING_COUNT) {
2540                                 dev_err(priv->ds->dev, "No more retagging rules\n");
2541                                 rc = -ENOSPC;
2542                                 goto out;
2543                         }
2544                         k = (*num_retagging)++;
2545                 }
2546                 /* And the retagging itself */
2547                 new_retagging[k].vlan_ing = rx_vid;
2548                 new_retagging[k].vlan_egr = tmp->vid;
2549                 new_retagging[k].ing_port = BIT(upstream);
2550                 new_retagging[k].egr_port |= BIT(tmp->port);
2551         }
2552
2553 out:
2554         list_for_each_entry_safe(tmp, pos, &crosschip_vlans, list) {
2555                 list_del(&tmp->list);
2556                 kfree(tmp);
2557         }
2558
2559         return rc;
2560 }
2561
2562 static int sja1105_build_vlan_table(struct sja1105_private *priv, bool notify);
2563
2564 static int sja1105_notify_crosschip_switches(struct sja1105_private *priv)
2565 {
2566         struct sja1105_crosschip_switch *s, *pos;
2567         struct list_head crosschip_switches;
2568         struct dsa_8021q_crosschip_link *c;
2569         int rc = 0;
2570
2571         INIT_LIST_HEAD(&crosschip_switches);
2572
2573         list_for_each_entry(c, &priv->dsa_8021q_ctx->crosschip_links, list) {
2574                 bool already_added = false;
2575
2576                 list_for_each_entry(s, &crosschip_switches, list) {
2577                         if (s->other_ctx == c->other_ctx) {
2578                                 already_added = true;
2579                                 break;
2580                         }
2581                 }
2582
2583                 if (already_added)
2584                         continue;
2585
2586                 s = kzalloc(sizeof(*s), GFP_KERNEL);
2587                 if (!s) {
2588                         dev_err(priv->ds->dev, "Failed to allocate memory\n");
2589                         rc = -ENOMEM;
2590                         goto out;
2591                 }
2592                 s->other_ctx = c->other_ctx;
2593                 list_add(&s->list, &crosschip_switches);
2594         }
2595
2596         list_for_each_entry(s, &crosschip_switches, list) {
2597                 struct sja1105_private *other_priv = s->other_ctx->ds->priv;
2598
2599                 rc = sja1105_build_vlan_table(other_priv, false);
2600                 if (rc)
2601                         goto out;
2602         }
2603
2604 out:
2605         list_for_each_entry_safe(s, pos, &crosschip_switches, list) {
2606                 list_del(&s->list);
2607                 kfree(s);
2608         }
2609
2610         return rc;
2611 }
2612
2613 static int sja1105_build_vlan_table(struct sja1105_private *priv, bool notify)
2614 {
2615         u16 subvlan_map[SJA1105_NUM_PORTS][DSA_8021Q_N_SUBVLAN];
2616         struct sja1105_retagging_entry *new_retagging;
2617         struct sja1105_vlan_lookup_entry *new_vlan;
2618         struct sja1105_table *table;
2619         int i, num_retagging = 0;
2620         int rc;
2621
2622         table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
2623         new_vlan = kcalloc(VLAN_N_VID,
2624                            table->ops->unpacked_entry_size, GFP_KERNEL);
2625         if (!new_vlan)
2626                 return -ENOMEM;
2627
2628         table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
2629         new_retagging = kcalloc(SJA1105_MAX_RETAGGING_COUNT,
2630                                 table->ops->unpacked_entry_size, GFP_KERNEL);
2631         if (!new_retagging) {
2632                 kfree(new_vlan);
2633                 return -ENOMEM;
2634         }
2635
2636         for (i = 0; i < VLAN_N_VID; i++)
2637                 new_vlan[i].vlanid = VLAN_N_VID;
2638
2639         for (i = 0; i < SJA1105_MAX_RETAGGING_COUNT; i++)
2640                 new_retagging[i].vlan_ing = VLAN_N_VID;
2641
2642         for (i = 0; i < priv->ds->num_ports; i++)
2643                 sja1105_init_subvlan_map(subvlan_map[i]);
2644
2645         /* Bridge VLANs */
2646         rc = sja1105_build_bridge_vlans(priv, new_vlan);
2647         if (rc)
2648                 goto out;
2649
2650         /* VLANs necessary for dsa_8021q operation, given to us by tag_8021q.c:
2651          * - RX VLANs
2652          * - TX VLANs
2653          * - Crosschip links
2654          */
2655         rc = sja1105_build_dsa_8021q_vlans(priv, new_vlan);
2656         if (rc)
2657                 goto out;
2658
2659         /* Private VLANs necessary for dsa_8021q operation, which we need to
2660          * determine on our own:
2661          * - Sub-VLANs
2662          * - Sub-VLANs of crosschip switches
2663          */
2664         rc = sja1105_build_subvlans(priv, subvlan_map, new_vlan, new_retagging,
2665                                     &num_retagging);
2666         if (rc)
2667                 goto out;
2668
2669         rc = sja1105_build_crosschip_subvlans(priv, new_vlan, new_retagging,
2670                                               &num_retagging);
2671         if (rc)
2672                 goto out;
2673
2674         rc = sja1105_commit_vlans(priv, new_vlan, new_retagging, num_retagging);
2675         if (rc)
2676                 goto out;
2677
2678         rc = sja1105_commit_pvid(priv);
2679         if (rc)
2680                 goto out;
2681
2682         for (i = 0; i < priv->ds->num_ports; i++)
2683                 sja1105_commit_subvlan_map(priv, i, subvlan_map[i]);
2684
2685         if (notify) {
2686                 rc = sja1105_notify_crosschip_switches(priv);
2687                 if (rc)
2688                         goto out;
2689         }
2690
2691 out:
2692         kfree(new_vlan);
2693         kfree(new_retagging);
2694
2695         return rc;
2696 }
2697
2698 /* The TPID setting belongs to the General Parameters table,
2699  * which can only be partially reconfigured at runtime (and not the TPID).
2700  * So a switch reset is required.
2701  */
2702 int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled,
2703                            struct netlink_ext_ack *extack)
2704 {
2705         struct sja1105_l2_lookup_params_entry *l2_lookup_params;
2706         struct sja1105_general_params_entry *general_params;
2707         struct sja1105_private *priv = ds->priv;
2708         enum sja1105_vlan_state state;
2709         struct sja1105_table *table;
2710         struct sja1105_rule *rule;
2711         bool want_tagging;
2712         u16 tpid, tpid2;
2713         int rc;
2714
2715         list_for_each_entry(rule, &priv->flow_block.rules, list) {
2716                 if (rule->type == SJA1105_RULE_VL) {
2717                         NL_SET_ERR_MSG_MOD(extack,
2718                                            "Cannot change VLAN filtering with active VL rules");
2719                         return -EBUSY;
2720                 }
2721         }
2722
2723         if (enabled) {
2724                 /* Enable VLAN filtering. */
2725                 tpid  = ETH_P_8021Q;
2726                 tpid2 = ETH_P_8021AD;
2727         } else {
2728                 /* Disable VLAN filtering. */
2729                 tpid  = ETH_P_SJA1105;
2730                 tpid2 = ETH_P_SJA1105;
2731         }
2732
2733         for (port = 0; port < ds->num_ports; port++) {
2734                 struct sja1105_port *sp = &priv->ports[port];
2735
2736                 if (enabled)
2737                         sp->xmit_tpid = priv->info->qinq_tpid;
2738                 else
2739                         sp->xmit_tpid = ETH_P_SJA1105;
2740         }
2741
2742         if (!enabled)
2743                 state = SJA1105_VLAN_UNAWARE;
2744         else if (priv->best_effort_vlan_filtering)
2745                 state = SJA1105_VLAN_BEST_EFFORT;
2746         else
2747                 state = SJA1105_VLAN_FILTERING_FULL;
2748
2749         if (priv->vlan_state == state)
2750                 return 0;
2751
2752         priv->vlan_state = state;
2753         want_tagging = (state == SJA1105_VLAN_UNAWARE ||
2754                         state == SJA1105_VLAN_BEST_EFFORT);
2755
2756         table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
2757         general_params = table->entries;
2758         /* EtherType used to identify inner tagged (C-tag) VLAN traffic */
2759         general_params->tpid = tpid;
2760         /* EtherType used to identify outer tagged (S-tag) VLAN traffic */
2761         general_params->tpid2 = tpid2;
2762         /* When VLAN filtering is on, we need to at least be able to
2763          * decode management traffic through the "backup plan".
2764          */
2765         general_params->incl_srcpt1 = enabled;
2766         general_params->incl_srcpt0 = enabled;
2767
2768         want_tagging = priv->best_effort_vlan_filtering || !enabled;
2769
2770         /* VLAN filtering => independent VLAN learning.
2771          * No VLAN filtering (or best effort) => shared VLAN learning.
2772          *
2773          * In shared VLAN learning mode, untagged traffic still gets
2774          * pvid-tagged, and the FDB table gets populated with entries
2775          * containing the "real" (pvid or from VLAN tag) VLAN ID.
2776          * However the switch performs a masked L2 lookup in the FDB,
2777          * effectively only looking up a frame's DMAC (and not VID) for the
2778          * forwarding decision.
2779          *
2780          * This is extremely convenient for us, because in modes with
2781          * vlan_filtering=0, dsa_8021q actually installs unique pvid's into
2782          * each front panel port. This is good for identification but breaks
2783          * learning badly - the VID of the learnt FDB entry is unique, aka
2784          * no frames coming from any other port are going to have it. So
2785          * for forwarding purposes, this is as though learning was broken
2786          * (all frames get flooded).
2787          */
2788         table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS];
2789         l2_lookup_params = table->entries;
2790         l2_lookup_params->shared_learn = want_tagging;
2791
2792         sja1105_frame_memory_partitioning(priv);
2793
2794         rc = sja1105_build_vlan_table(priv, false);
2795         if (rc)
2796                 return rc;
2797
2798         rc = sja1105_static_config_reload(priv, SJA1105_VLAN_FILTERING);
2799         if (rc)
2800                 NL_SET_ERR_MSG_MOD(extack, "Failed to change VLAN Ethertype");
2801
2802         /* Switch port identification based on 802.1Q is only passable
2803          * if we are not under a vlan_filtering bridge. So make sure
2804          * the two configurations are mutually exclusive (of course, the
2805          * user may know better, i.e. best_effort_vlan_filtering).
2806          */
2807         return sja1105_setup_8021q_tagging(ds, want_tagging);
2808 }
2809
2810 /* Returns number of VLANs added (0 or 1) on success,
2811  * or a negative error code.
2812  */
2813 static int sja1105_vlan_add_one(struct dsa_switch *ds, int port, u16 vid,
2814                                 u16 flags, struct list_head *vlan_list)
2815 {
2816         bool untagged = flags & BRIDGE_VLAN_INFO_UNTAGGED;
2817         bool pvid = flags & BRIDGE_VLAN_INFO_PVID;
2818         struct sja1105_bridge_vlan *v;
2819
2820         list_for_each_entry(v, vlan_list, list)
2821                 if (v->port == port && v->vid == vid &&
2822                     v->untagged == untagged && v->pvid == pvid)
2823                         /* Already added */
2824                         return 0;
2825
2826         v = kzalloc(sizeof(*v), GFP_KERNEL);
2827         if (!v) {
2828                 dev_err(ds->dev, "Out of memory while storing VLAN\n");
2829                 return -ENOMEM;
2830         }
2831
2832         v->port = port;
2833         v->vid = vid;
2834         v->untagged = untagged;
2835         v->pvid = pvid;
2836         list_add(&v->list, vlan_list);
2837
2838         return 1;
2839 }
2840
2841 /* Returns number of VLANs deleted (0 or 1) */
2842 static int sja1105_vlan_del_one(struct dsa_switch *ds, int port, u16 vid,
2843                                 struct list_head *vlan_list)
2844 {
2845         struct sja1105_bridge_vlan *v, *n;
2846
2847         list_for_each_entry_safe(v, n, vlan_list, list) {
2848                 if (v->port == port && v->vid == vid) {
2849                         list_del(&v->list);
2850                         kfree(v);
2851                         return 1;
2852                 }
2853         }
2854
2855         return 0;
2856 }
2857
2858 static int sja1105_vlan_add(struct dsa_switch *ds, int port,
2859                             const struct switchdev_obj_port_vlan *vlan,
2860                             struct netlink_ext_ack *extack)
2861 {
2862         struct sja1105_private *priv = ds->priv;
2863         bool vlan_table_changed = false;
2864         int rc;
2865
2866         /* If the user wants best-effort VLAN filtering (aka vlan_filtering
2867          * bridge plus tagging), be sure to at least deny alterations to the
2868          * configuration done by dsa_8021q.
2869          */
2870         if (priv->vlan_state != SJA1105_VLAN_FILTERING_FULL &&
2871             vid_is_dsa_8021q(vlan->vid)) {
2872                 NL_SET_ERR_MSG_MOD(extack,
2873                                    "Range 1024-3071 reserved for dsa_8021q operation");
2874                 return -EBUSY;
2875         }
2876
2877         rc = sja1105_vlan_add_one(ds, port, vlan->vid, vlan->flags,
2878                                   &priv->bridge_vlans);
2879         if (rc < 0)
2880                 return rc;
2881         if (rc > 0)
2882                 vlan_table_changed = true;
2883
2884         if (!vlan_table_changed)
2885                 return 0;
2886
2887         return sja1105_build_vlan_table(priv, true);
2888 }
2889
2890 static int sja1105_vlan_del(struct dsa_switch *ds, int port,
2891                             const struct switchdev_obj_port_vlan *vlan)
2892 {
2893         struct sja1105_private *priv = ds->priv;
2894         bool vlan_table_changed = false;
2895         int rc;
2896
2897         rc = sja1105_vlan_del_one(ds, port, vlan->vid, &priv->bridge_vlans);
2898         if (rc > 0)
2899                 vlan_table_changed = true;
2900
2901         if (!vlan_table_changed)
2902                 return 0;
2903
2904         return sja1105_build_vlan_table(priv, true);
2905 }
2906
2907 static int sja1105_dsa_8021q_vlan_add(struct dsa_switch *ds, int port, u16 vid,
2908                                       u16 flags)
2909 {
2910         struct sja1105_private *priv = ds->priv;
2911         int rc;
2912
2913         rc = sja1105_vlan_add_one(ds, port, vid, flags, &priv->dsa_8021q_vlans);
2914         if (rc <= 0)
2915                 return rc;
2916
2917         return sja1105_build_vlan_table(priv, true);
2918 }
2919
2920 static int sja1105_dsa_8021q_vlan_del(struct dsa_switch *ds, int port, u16 vid)
2921 {
2922         struct sja1105_private *priv = ds->priv;
2923         int rc;
2924
2925         rc = sja1105_vlan_del_one(ds, port, vid, &priv->dsa_8021q_vlans);
2926         if (!rc)
2927                 return 0;
2928
2929         return sja1105_build_vlan_table(priv, true);
2930 }
2931
2932 static const struct dsa_8021q_ops sja1105_dsa_8021q_ops = {
2933         .vlan_add       = sja1105_dsa_8021q_vlan_add,
2934         .vlan_del       = sja1105_dsa_8021q_vlan_del,
2935 };
2936
2937 /* The programming model for the SJA1105 switch is "all-at-once" via static
2938  * configuration tables. Some of these can be dynamically modified at runtime,
2939  * but not the xMII mode parameters table.
2940  * Furthermode, some PHYs may not have crystals for generating their clocks
2941  * (e.g. RMII). Instead, their 50MHz clock is supplied via the SJA1105 port's
2942  * ref_clk pin. So port clocking needs to be initialized early, before
2943  * connecting to PHYs is attempted, otherwise they won't respond through MDIO.
2944  * Setting correct PHY link speed does not matter now.
2945  * But dsa_slave_phy_setup is called later than sja1105_setup, so the PHY
2946  * bindings are not yet parsed by DSA core. We need to parse early so that we
2947  * can populate the xMII mode parameters table.
2948  */
2949 static int sja1105_setup(struct dsa_switch *ds)
2950 {
2951         struct sja1105_dt_port ports[SJA1105_NUM_PORTS];
2952         struct sja1105_private *priv = ds->priv;
2953         int rc;
2954
2955         rc = sja1105_parse_dt(priv, ports);
2956         if (rc < 0) {
2957                 dev_err(ds->dev, "Failed to parse DT: %d\n", rc);
2958                 return rc;
2959         }
2960
2961         /* Error out early if internal delays are required through DT
2962          * and we can't apply them.
2963          */
2964         rc = sja1105_parse_rgmii_delays(priv, ports);
2965         if (rc < 0) {
2966                 dev_err(ds->dev, "RGMII delay not supported\n");
2967                 return rc;
2968         }
2969
2970         rc = sja1105_ptp_clock_register(ds);
2971         if (rc < 0) {
2972                 dev_err(ds->dev, "Failed to register PTP clock: %d\n", rc);
2973                 return rc;
2974         }
2975         /* Create and send configuration down to device */
2976         rc = sja1105_static_config_load(priv, ports);
2977         if (rc < 0) {
2978                 dev_err(ds->dev, "Failed to load static config: %d\n", rc);
2979                 return rc;
2980         }
2981         /* Configure the CGU (PHY link modes and speeds) */
2982         rc = sja1105_clocking_setup(priv);
2983         if (rc < 0) {
2984                 dev_err(ds->dev, "Failed to configure MII clocking: %d\n", rc);
2985                 return rc;
2986         }
2987         /* On SJA1105, VLAN filtering per se is always enabled in hardware.
2988          * The only thing we can do to disable it is lie about what the 802.1Q
2989          * EtherType is.
2990          * So it will still try to apply VLAN filtering, but all ingress
2991          * traffic (except frames received with EtherType of ETH_P_SJA1105)
2992          * will be internally tagged with a distorted VLAN header where the
2993          * TPID is ETH_P_SJA1105, and the VLAN ID is the port pvid.
2994          */
2995         ds->vlan_filtering_is_global = true;
2996
2997         /* Advertise the 8 egress queues */
2998         ds->num_tx_queues = SJA1105_NUM_TC;
2999
3000         ds->mtu_enforcement_ingress = true;
3001
3002         priv->best_effort_vlan_filtering = true;
3003
3004         rc = sja1105_devlink_setup(ds);
3005         if (rc < 0)
3006                 return rc;
3007
3008         /* The DSA/switchdev model brings up switch ports in standalone mode by
3009          * default, and that means vlan_filtering is 0 since they're not under
3010          * a bridge, so it's safe to set up switch tagging at this time.
3011          */
3012         rtnl_lock();
3013         rc = sja1105_setup_8021q_tagging(ds, true);
3014         rtnl_unlock();
3015
3016         return rc;
3017 }
3018
3019 static void sja1105_teardown(struct dsa_switch *ds)
3020 {
3021         struct sja1105_private *priv = ds->priv;
3022         struct sja1105_bridge_vlan *v, *n;
3023         int port;
3024
3025         for (port = 0; port < SJA1105_NUM_PORTS; port++) {
3026                 struct sja1105_port *sp = &priv->ports[port];
3027
3028                 if (!dsa_is_user_port(ds, port))
3029                         continue;
3030
3031                 if (sp->xmit_worker)
3032                         kthread_destroy_worker(sp->xmit_worker);
3033         }
3034
3035         sja1105_devlink_teardown(ds);
3036         sja1105_flower_teardown(ds);
3037         sja1105_tas_teardown(ds);
3038         sja1105_ptp_clock_unregister(ds);
3039         sja1105_static_config_free(&priv->static_config);
3040
3041         list_for_each_entry_safe(v, n, &priv->dsa_8021q_vlans, list) {
3042                 list_del(&v->list);
3043                 kfree(v);
3044         }
3045
3046         list_for_each_entry_safe(v, n, &priv->bridge_vlans, list) {
3047                 list_del(&v->list);
3048                 kfree(v);
3049         }
3050 }
3051
3052 static void sja1105_port_disable(struct dsa_switch *ds, int port)
3053 {
3054         struct sja1105_private *priv = ds->priv;
3055         struct sja1105_port *sp = &priv->ports[port];
3056
3057         if (!dsa_is_user_port(ds, port))
3058                 return;
3059
3060         kthread_cancel_work_sync(&sp->xmit_work);
3061         skb_queue_purge(&sp->xmit_queue);
3062 }
3063
3064 static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot,
3065                              struct sk_buff *skb, bool takets)
3066 {
3067         struct sja1105_mgmt_entry mgmt_route = {0};
3068         struct sja1105_private *priv = ds->priv;
3069         struct ethhdr *hdr;
3070         int timeout = 10;
3071         int rc;
3072
3073         hdr = eth_hdr(skb);
3074
3075         mgmt_route.macaddr = ether_addr_to_u64(hdr->h_dest);
3076         mgmt_route.destports = BIT(port);
3077         mgmt_route.enfport = 1;
3078         mgmt_route.tsreg = 0;
3079         mgmt_route.takets = takets;
3080
3081         rc = sja1105_dynamic_config_write(priv, BLK_IDX_MGMT_ROUTE,
3082                                           slot, &mgmt_route, true);
3083         if (rc < 0) {
3084                 kfree_skb(skb);
3085                 return rc;
3086         }
3087
3088         /* Transfer skb to the host port. */
3089         dsa_enqueue_skb(skb, dsa_to_port(ds, port)->slave);
3090
3091         /* Wait until the switch has processed the frame */
3092         do {
3093                 rc = sja1105_dynamic_config_read(priv, BLK_IDX_MGMT_ROUTE,
3094                                                  slot, &mgmt_route);
3095                 if (rc < 0) {
3096                         dev_err_ratelimited(priv->ds->dev,
3097                                             "failed to poll for mgmt route\n");
3098                         continue;
3099                 }
3100
3101                 /* UM10944: The ENFPORT flag of the respective entry is
3102                  * cleared when a match is found. The host can use this
3103                  * flag as an acknowledgment.
3104                  */
3105                 cpu_relax();
3106         } while (mgmt_route.enfport && --timeout);
3107
3108         if (!timeout) {
3109                 /* Clean up the management route so that a follow-up
3110                  * frame may not match on it by mistake.
3111                  * This is only hardware supported on P/Q/R/S - on E/T it is
3112                  * a no-op and we are silently discarding the -EOPNOTSUPP.
3113                  */
3114                 sja1105_dynamic_config_write(priv, BLK_IDX_MGMT_ROUTE,
3115                                              slot, &mgmt_route, false);
3116                 dev_err_ratelimited(priv->ds->dev, "xmit timed out\n");
3117         }
3118
3119         return NETDEV_TX_OK;
3120 }
3121
3122 #define work_to_port(work) \
3123                 container_of((work), struct sja1105_port, xmit_work)
3124 #define tagger_to_sja1105(t) \
3125                 container_of((t), struct sja1105_private, tagger_data)
3126
3127 /* Deferred work is unfortunately necessary because setting up the management
3128  * route cannot be done from atomit context (SPI transfer takes a sleepable
3129  * lock on the bus)
3130  */
3131 static void sja1105_port_deferred_xmit(struct kthread_work *work)
3132 {
3133         struct sja1105_port *sp = work_to_port(work);
3134         struct sja1105_tagger_data *tagger_data = sp->data;
3135         struct sja1105_private *priv = tagger_to_sja1105(tagger_data);
3136         int port = sp - priv->ports;
3137         struct sk_buff *skb;
3138
3139         while ((skb = skb_dequeue(&sp->xmit_queue)) != NULL) {
3140                 struct sk_buff *clone = SJA1105_SKB_CB(skb)->clone;
3141
3142                 mutex_lock(&priv->mgmt_lock);
3143
3144                 sja1105_mgmt_xmit(priv->ds, port, 0, skb, !!clone);
3145
3146                 /* The clone, if there, was made by dsa_skb_tx_timestamp */
3147                 if (clone)
3148                         sja1105_ptp_txtstamp_skb(priv->ds, port, clone);
3149
3150                 mutex_unlock(&priv->mgmt_lock);
3151         }
3152 }
3153
3154 /* The MAXAGE setting belongs to the L2 Forwarding Parameters table,
3155  * which cannot be reconfigured at runtime. So a switch reset is required.
3156  */
3157 static int sja1105_set_ageing_time(struct dsa_switch *ds,
3158                                    unsigned int ageing_time)
3159 {
3160         struct sja1105_l2_lookup_params_entry *l2_lookup_params;
3161         struct sja1105_private *priv = ds->priv;
3162         struct sja1105_table *table;
3163         unsigned int maxage;
3164
3165         table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS];
3166         l2_lookup_params = table->entries;
3167
3168         maxage = SJA1105_AGEING_TIME_MS(ageing_time);
3169
3170         if (l2_lookup_params->maxage == maxage)
3171                 return 0;
3172
3173         l2_lookup_params->maxage = maxage;
3174
3175         return sja1105_static_config_reload(priv, SJA1105_AGEING_TIME);
3176 }
3177
3178 static int sja1105_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
3179 {
3180         struct sja1105_l2_policing_entry *policing;
3181         struct sja1105_private *priv = ds->priv;
3182
3183         new_mtu += VLAN_ETH_HLEN + ETH_FCS_LEN;
3184
3185         if (dsa_is_cpu_port(ds, port))
3186                 new_mtu += VLAN_HLEN;
3187
3188         policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
3189
3190         if (policing[port].maxlen == new_mtu)
3191                 return 0;
3192
3193         policing[port].maxlen = new_mtu;
3194
3195         return sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
3196 }
3197
3198 static int sja1105_get_max_mtu(struct dsa_switch *ds, int port)
3199 {
3200         return 2043 - VLAN_ETH_HLEN - ETH_FCS_LEN;
3201 }
3202
3203 static int sja1105_port_setup_tc(struct dsa_switch *ds, int port,
3204                                  enum tc_setup_type type,
3205                                  void *type_data)
3206 {
3207         switch (type) {
3208         case TC_SETUP_QDISC_TAPRIO:
3209                 return sja1105_setup_tc_taprio(ds, port, type_data);
3210         case TC_SETUP_QDISC_CBS:
3211                 return sja1105_setup_tc_cbs(ds, port, type_data);
3212         default:
3213                 return -EOPNOTSUPP;
3214         }
3215 }
3216
3217 /* We have a single mirror (@to) port, but can configure ingress and egress
3218  * mirroring on all other (@from) ports.
3219  * We need to allow mirroring rules only as long as the @to port is always the
3220  * same, and we need to unset the @to port from mirr_port only when there is no
3221  * mirroring rule that references it.
3222  */
3223 static int sja1105_mirror_apply(struct sja1105_private *priv, int from, int to,
3224                                 bool ingress, bool enabled)
3225 {
3226         struct sja1105_general_params_entry *general_params;
3227         struct sja1105_mac_config_entry *mac;
3228         struct sja1105_table *table;
3229         bool already_enabled;
3230         u64 new_mirr_port;
3231         int rc;
3232
3233         table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
3234         general_params = table->entries;
3235
3236         mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
3237
3238         already_enabled = (general_params->mirr_port != SJA1105_NUM_PORTS);
3239         if (already_enabled && enabled && general_params->mirr_port != to) {
3240                 dev_err(priv->ds->dev,
3241                         "Delete mirroring rules towards port %llu first\n",
3242                         general_params->mirr_port);
3243                 return -EBUSY;
3244         }
3245
3246         new_mirr_port = to;
3247         if (!enabled) {
3248                 bool keep = false;
3249                 int port;
3250
3251                 /* Anybody still referencing mirr_port? */
3252                 for (port = 0; port < SJA1105_NUM_PORTS; port++) {
3253                         if (mac[port].ing_mirr || mac[port].egr_mirr) {
3254                                 keep = true;
3255                                 break;
3256                         }
3257                 }
3258                 /* Unset already_enabled for next time */
3259                 if (!keep)
3260                         new_mirr_port = SJA1105_NUM_PORTS;
3261         }
3262         if (new_mirr_port != general_params->mirr_port) {
3263                 general_params->mirr_port = new_mirr_port;
3264
3265                 rc = sja1105_dynamic_config_write(priv, BLK_IDX_GENERAL_PARAMS,
3266                                                   0, general_params, true);
3267                 if (rc < 0)
3268                         return rc;
3269         }
3270
3271         if (ingress)
3272                 mac[from].ing_mirr = enabled;
3273         else
3274                 mac[from].egr_mirr = enabled;
3275
3276         return sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, from,
3277                                             &mac[from], true);
3278 }
3279
3280 static int sja1105_mirror_add(struct dsa_switch *ds, int port,
3281                               struct dsa_mall_mirror_tc_entry *mirror,
3282                               bool ingress)
3283 {
3284         return sja1105_mirror_apply(ds->priv, port, mirror->to_local_port,
3285                                     ingress, true);
3286 }
3287
3288 static void sja1105_mirror_del(struct dsa_switch *ds, int port,
3289                                struct dsa_mall_mirror_tc_entry *mirror)
3290 {
3291         sja1105_mirror_apply(ds->priv, port, mirror->to_local_port,
3292                              mirror->ingress, false);
3293 }
3294
3295 static int sja1105_port_policer_add(struct dsa_switch *ds, int port,
3296                                     struct dsa_mall_policer_tc_entry *policer)
3297 {
3298         struct sja1105_l2_policing_entry *policing;
3299         struct sja1105_private *priv = ds->priv;
3300
3301         policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
3302
3303         /* In hardware, every 8 microseconds the credit level is incremented by
3304          * the value of RATE bytes divided by 64, up to a maximum of SMAX
3305          * bytes.
3306          */
3307         policing[port].rate = div_u64(512 * policer->rate_bytes_per_sec,
3308                                       1000000);
3309         policing[port].smax = policer->burst;
3310
3311         return sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
3312 }
3313
3314 static void sja1105_port_policer_del(struct dsa_switch *ds, int port)
3315 {
3316         struct sja1105_l2_policing_entry *policing;
3317         struct sja1105_private *priv = ds->priv;
3318
3319         policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
3320
3321         policing[port].rate = SJA1105_RATE_MBPS(1000);
3322         policing[port].smax = 65535;
3323
3324         sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
3325 }
3326
3327 static int sja1105_port_set_learning(struct sja1105_private *priv, int port,
3328                                      bool enabled)
3329 {
3330         struct sja1105_mac_config_entry *mac;
3331         int rc;
3332
3333         mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
3334
3335         mac[port].dyn_learn = enabled;
3336
3337         rc = sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
3338                                           &mac[port], true);
3339         if (rc)
3340                 return rc;
3341
3342         if (enabled)
3343                 priv->learn_ena |= BIT(port);
3344         else
3345                 priv->learn_ena &= ~BIT(port);
3346
3347         return 0;
3348 }
3349
3350 static int sja1105_port_ucast_bcast_flood(struct sja1105_private *priv, int to,
3351                                           struct switchdev_brport_flags flags)
3352 {
3353         if (flags.mask & BR_FLOOD) {
3354                 if (flags.val & BR_FLOOD)
3355                         priv->ucast_egress_floods |= BIT(to);
3356                 else
3357                         priv->ucast_egress_floods &= ~BIT(to);
3358         }
3359
3360         if (flags.mask & BR_BCAST_FLOOD) {
3361                 if (flags.val & BR_BCAST_FLOOD)
3362                         priv->bcast_egress_floods |= BIT(to);
3363                 else
3364                         priv->bcast_egress_floods &= ~BIT(to);
3365         }
3366
3367         return sja1105_manage_flood_domains(priv);
3368 }
3369
3370 static int sja1105_port_mcast_flood(struct sja1105_private *priv, int to,
3371                                     struct switchdev_brport_flags flags,
3372                                     struct netlink_ext_ack *extack)
3373 {
3374         struct sja1105_l2_lookup_entry *l2_lookup;
3375         struct sja1105_table *table;
3376         int match;
3377
3378         table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP];
3379         l2_lookup = table->entries;
3380
3381         for (match = 0; match < table->entry_count; match++)
3382                 if (l2_lookup[match].macaddr == SJA1105_UNKNOWN_MULTICAST &&
3383                     l2_lookup[match].mask_macaddr == SJA1105_UNKNOWN_MULTICAST)
3384                         break;
3385
3386         if (match == table->entry_count) {
3387                 NL_SET_ERR_MSG_MOD(extack,
3388                                    "Could not find FDB entry for unknown multicast");
3389                 return -ENOSPC;
3390         }
3391
3392         if (flags.val & BR_MCAST_FLOOD)
3393                 l2_lookup[match].destports |= BIT(to);
3394         else
3395                 l2_lookup[match].destports &= ~BIT(to);
3396
3397         return sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
3398                                             l2_lookup[match].index,
3399                                             &l2_lookup[match],
3400                                             true);
3401 }
3402
3403 static int sja1105_port_pre_bridge_flags(struct dsa_switch *ds, int port,
3404                                          struct switchdev_brport_flags flags,
3405                                          struct netlink_ext_ack *extack)
3406 {
3407         struct sja1105_private *priv = ds->priv;
3408
3409         if (flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD |
3410                            BR_BCAST_FLOOD))
3411                 return -EINVAL;
3412
3413         if (flags.mask & (BR_FLOOD | BR_MCAST_FLOOD) &&
3414             !priv->info->can_limit_mcast_flood) {
3415                 bool multicast = !!(flags.val & BR_MCAST_FLOOD);
3416                 bool unicast = !!(flags.val & BR_FLOOD);
3417
3418                 if (unicast != multicast) {
3419                         NL_SET_ERR_MSG_MOD(extack,
3420                                            "This chip cannot configure multicast flooding independently of unicast");
3421                         return -EINVAL;
3422                 }
3423         }
3424
3425         return 0;
3426 }
3427
3428 static int sja1105_port_bridge_flags(struct dsa_switch *ds, int port,
3429                                      struct switchdev_brport_flags flags,
3430                                      struct netlink_ext_ack *extack)
3431 {
3432         struct sja1105_private *priv = ds->priv;
3433         int rc;
3434
3435         if (flags.mask & BR_LEARNING) {
3436                 bool learn_ena = !!(flags.val & BR_LEARNING);
3437
3438                 rc = sja1105_port_set_learning(priv, port, learn_ena);
3439                 if (rc)
3440                         return rc;
3441         }
3442
3443         if (flags.mask & (BR_FLOOD | BR_BCAST_FLOOD)) {
3444                 rc = sja1105_port_ucast_bcast_flood(priv, port, flags);
3445                 if (rc)
3446                         return rc;
3447         }
3448
3449         /* For chips that can't offload BR_MCAST_FLOOD independently, there
3450          * is nothing to do here, we ensured the configuration is in sync by
3451          * offloading BR_FLOOD.
3452          */
3453         if (flags.mask & BR_MCAST_FLOOD && priv->info->can_limit_mcast_flood) {
3454                 rc = sja1105_port_mcast_flood(priv, port, flags,
3455                                               extack);
3456                 if (rc)
3457                         return rc;
3458         }
3459
3460         return 0;
3461 }
3462
3463 static const struct dsa_switch_ops sja1105_switch_ops = {
3464         .get_tag_protocol       = sja1105_get_tag_protocol,
3465         .setup                  = sja1105_setup,
3466         .teardown               = sja1105_teardown,
3467         .set_ageing_time        = sja1105_set_ageing_time,
3468         .port_change_mtu        = sja1105_change_mtu,
3469         .port_max_mtu           = sja1105_get_max_mtu,
3470         .phylink_validate       = sja1105_phylink_validate,
3471         .phylink_mac_link_state = sja1105_mac_pcs_get_state,
3472         .phylink_mac_config     = sja1105_mac_config,
3473         .phylink_mac_link_up    = sja1105_mac_link_up,
3474         .phylink_mac_link_down  = sja1105_mac_link_down,
3475         .get_strings            = sja1105_get_strings,
3476         .get_ethtool_stats      = sja1105_get_ethtool_stats,
3477         .get_sset_count         = sja1105_get_sset_count,
3478         .get_ts_info            = sja1105_get_ts_info,
3479         .port_disable           = sja1105_port_disable,
3480         .port_fdb_dump          = sja1105_fdb_dump,
3481         .port_fdb_add           = sja1105_fdb_add,
3482         .port_fdb_del           = sja1105_fdb_del,
3483         .port_bridge_join       = sja1105_bridge_join,
3484         .port_bridge_leave      = sja1105_bridge_leave,
3485         .port_pre_bridge_flags  = sja1105_port_pre_bridge_flags,
3486         .port_bridge_flags      = sja1105_port_bridge_flags,
3487         .port_stp_state_set     = sja1105_bridge_stp_state_set,
3488         .port_vlan_filtering    = sja1105_vlan_filtering,
3489         .port_vlan_add          = sja1105_vlan_add,
3490         .port_vlan_del          = sja1105_vlan_del,
3491         .port_mdb_add           = sja1105_mdb_add,
3492         .port_mdb_del           = sja1105_mdb_del,
3493         .port_hwtstamp_get      = sja1105_hwtstamp_get,
3494         .port_hwtstamp_set      = sja1105_hwtstamp_set,
3495         .port_rxtstamp          = sja1105_port_rxtstamp,
3496         .port_txtstamp          = sja1105_port_txtstamp,
3497         .port_setup_tc          = sja1105_port_setup_tc,
3498         .port_mirror_add        = sja1105_mirror_add,
3499         .port_mirror_del        = sja1105_mirror_del,
3500         .port_policer_add       = sja1105_port_policer_add,
3501         .port_policer_del       = sja1105_port_policer_del,
3502         .cls_flower_add         = sja1105_cls_flower_add,
3503         .cls_flower_del         = sja1105_cls_flower_del,
3504         .cls_flower_stats       = sja1105_cls_flower_stats,
3505         .crosschip_bridge_join  = sja1105_crosschip_bridge_join,
3506         .crosschip_bridge_leave = sja1105_crosschip_bridge_leave,
3507         .devlink_param_get      = sja1105_devlink_param_get,
3508         .devlink_param_set      = sja1105_devlink_param_set,
3509         .devlink_info_get       = sja1105_devlink_info_get,
3510 };
3511
3512 static const struct of_device_id sja1105_dt_ids[];
3513
3514 static int sja1105_check_device_id(struct sja1105_private *priv)
3515 {
3516         const struct sja1105_regs *regs = priv->info->regs;
3517         u8 prod_id[SJA1105_SIZE_DEVICE_ID] = {0};
3518         struct device *dev = &priv->spidev->dev;
3519         const struct of_device_id *match;
3520         u32 device_id;
3521         u64 part_no;
3522         int rc;
3523
3524         rc = sja1105_xfer_u32(priv, SPI_READ, regs->device_id, &device_id,
3525                               NULL);
3526         if (rc < 0)
3527                 return rc;
3528
3529         rc = sja1105_xfer_buf(priv, SPI_READ, regs->prod_id, prod_id,
3530                               SJA1105_SIZE_DEVICE_ID);
3531         if (rc < 0)
3532                 return rc;
3533
3534         sja1105_unpack(prod_id, &part_no, 19, 4, SJA1105_SIZE_DEVICE_ID);
3535
3536         for (match = sja1105_dt_ids; match->compatible[0]; match++) {
3537                 const struct sja1105_info *info = match->data;
3538
3539                 /* Is what's been probed in our match table at all? */
3540                 if (info->device_id != device_id || info->part_no != part_no)
3541                         continue;
3542
3543                 /* But is it what's in the device tree? */
3544                 if (priv->info->device_id != device_id ||
3545                     priv->info->part_no != part_no) {
3546                         dev_warn(dev, "Device tree specifies chip %s but found %s, please fix it!\n",
3547                                  priv->info->name, info->name);
3548                         /* It isn't. No problem, pick that up. */
3549                         priv->info = info;
3550                 }
3551
3552                 return 0;
3553         }
3554
3555         dev_err(dev, "Unexpected {device ID, part number}: 0x%x 0x%llx\n",
3556                 device_id, part_no);
3557
3558         return -ENODEV;
3559 }
3560
3561 static int sja1105_probe(struct spi_device *spi)
3562 {
3563         struct sja1105_tagger_data *tagger_data;
3564         struct device *dev = &spi->dev;
3565         struct sja1105_private *priv;
3566         struct dsa_switch *ds;
3567         int rc, port;
3568
3569         if (!dev->of_node) {
3570                 dev_err(dev, "No DTS bindings for SJA1105 driver\n");
3571                 return -EINVAL;
3572         }
3573
3574         priv = devm_kzalloc(dev, sizeof(struct sja1105_private), GFP_KERNEL);
3575         if (!priv)
3576                 return -ENOMEM;
3577
3578         /* Configure the optional reset pin and bring up switch */
3579         priv->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
3580         if (IS_ERR(priv->reset_gpio))
3581                 dev_dbg(dev, "reset-gpios not defined, ignoring\n");
3582         else
3583                 sja1105_hw_reset(priv->reset_gpio, 1, 1);
3584
3585         /* Populate our driver private structure (priv) based on
3586          * the device tree node that was probed (spi)
3587          */
3588         priv->spidev = spi;
3589         spi_set_drvdata(spi, priv);
3590
3591         /* Configure the SPI bus */
3592         spi->bits_per_word = 8;
3593         rc = spi_setup(spi);
3594         if (rc < 0) {
3595                 dev_err(dev, "Could not init SPI\n");
3596                 return rc;
3597         }
3598
3599         priv->info = of_device_get_match_data(dev);
3600
3601         /* Detect hardware device */
3602         rc = sja1105_check_device_id(priv);
3603         if (rc < 0) {
3604                 dev_err(dev, "Device ID check failed: %d\n", rc);
3605                 return rc;
3606         }
3607
3608         dev_info(dev, "Probed switch chip: %s\n", priv->info->name);
3609
3610         ds = devm_kzalloc(dev, sizeof(*ds), GFP_KERNEL);
3611         if (!ds)
3612                 return -ENOMEM;
3613
3614         ds->dev = dev;
3615         ds->num_ports = SJA1105_NUM_PORTS;
3616         ds->ops = &sja1105_switch_ops;
3617         ds->priv = priv;
3618         priv->ds = ds;
3619
3620         tagger_data = &priv->tagger_data;
3621
3622         mutex_init(&priv->ptp_data.lock);
3623         mutex_init(&priv->mgmt_lock);
3624
3625         priv->dsa_8021q_ctx = devm_kzalloc(dev, sizeof(*priv->dsa_8021q_ctx),
3626                                            GFP_KERNEL);
3627         if (!priv->dsa_8021q_ctx)
3628                 return -ENOMEM;
3629
3630         priv->dsa_8021q_ctx->ops = &sja1105_dsa_8021q_ops;
3631         priv->dsa_8021q_ctx->proto = htons(ETH_P_8021Q);
3632         priv->dsa_8021q_ctx->ds = ds;
3633
3634         INIT_LIST_HEAD(&priv->dsa_8021q_ctx->crosschip_links);
3635         INIT_LIST_HEAD(&priv->bridge_vlans);
3636         INIT_LIST_HEAD(&priv->dsa_8021q_vlans);
3637
3638         sja1105_tas_setup(ds);
3639         sja1105_flower_setup(ds);
3640
3641         rc = dsa_register_switch(priv->ds);
3642         if (rc)
3643                 return rc;
3644
3645         if (IS_ENABLED(CONFIG_NET_SCH_CBS)) {
3646                 priv->cbs = devm_kcalloc(dev, priv->info->num_cbs_shapers,
3647                                          sizeof(struct sja1105_cbs_entry),
3648                                          GFP_KERNEL);
3649                 if (!priv->cbs)
3650                         return -ENOMEM;
3651         }
3652
3653         /* Connections between dsa_port and sja1105_port */
3654         for (port = 0; port < SJA1105_NUM_PORTS; port++) {
3655                 struct sja1105_port *sp = &priv->ports[port];
3656                 struct dsa_port *dp = dsa_to_port(ds, port);
3657                 struct net_device *slave;
3658                 int subvlan;
3659
3660                 if (!dsa_is_user_port(ds, port))
3661                         continue;
3662
3663                 dp->priv = sp;
3664                 sp->dp = dp;
3665                 sp->data = tagger_data;
3666                 slave = dp->slave;
3667                 kthread_init_work(&sp->xmit_work, sja1105_port_deferred_xmit);
3668                 sp->xmit_worker = kthread_create_worker(0, "%s_xmit",
3669                                                         slave->name);
3670                 if (IS_ERR(sp->xmit_worker)) {
3671                         rc = PTR_ERR(sp->xmit_worker);
3672                         dev_err(ds->dev,
3673                                 "failed to create deferred xmit thread: %d\n",
3674                                 rc);
3675                         goto out;
3676                 }
3677                 skb_queue_head_init(&sp->xmit_queue);
3678                 sp->xmit_tpid = ETH_P_SJA1105;
3679
3680                 for (subvlan = 0; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++)
3681                         sp->subvlan_map[subvlan] = VLAN_N_VID;
3682         }
3683
3684         return 0;
3685 out:
3686         while (port-- > 0) {
3687                 struct sja1105_port *sp = &priv->ports[port];
3688
3689                 if (!dsa_is_user_port(ds, port))
3690                         continue;
3691
3692                 kthread_destroy_worker(sp->xmit_worker);
3693         }
3694         return rc;
3695 }
3696
3697 static int sja1105_remove(struct spi_device *spi)
3698 {
3699         struct sja1105_private *priv = spi_get_drvdata(spi);
3700
3701         dsa_unregister_switch(priv->ds);
3702         return 0;
3703 }
3704
3705 static const struct of_device_id sja1105_dt_ids[] = {
3706         { .compatible = "nxp,sja1105e", .data = &sja1105e_info },
3707         { .compatible = "nxp,sja1105t", .data = &sja1105t_info },
3708         { .compatible = "nxp,sja1105p", .data = &sja1105p_info },
3709         { .compatible = "nxp,sja1105q", .data = &sja1105q_info },
3710         { .compatible = "nxp,sja1105r", .data = &sja1105r_info },
3711         { .compatible = "nxp,sja1105s", .data = &sja1105s_info },
3712         { /* sentinel */ },
3713 };
3714 MODULE_DEVICE_TABLE(of, sja1105_dt_ids);
3715
3716 static struct spi_driver sja1105_driver = {
3717         .driver = {
3718                 .name  = "sja1105",
3719                 .owner = THIS_MODULE,
3720                 .of_match_table = of_match_ptr(sja1105_dt_ids),
3721         },
3722         .probe  = sja1105_probe,
3723         .remove = sja1105_remove,
3724 };
3725
3726 module_spi_driver(sja1105_driver);
3727
3728 MODULE_AUTHOR("Vladimir Oltean <olteanv@gmail.com>");
3729 MODULE_AUTHOR("Georg Waibel <georg.waibel@sensor-technik.de>");
3730 MODULE_DESCRIPTION("SJA1105 Driver");
3731 MODULE_LICENSE("GPL v2");