net: dsa: sja1105: migrate to xpcs for SGMII
[linux-2.6-microblaze.git] / drivers / net / dsa / sja1105 / sja1105_main.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Sensor-Technik Wiedemann GmbH
3  * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
4  */
5
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
8 #include <linux/delay.h>
9 #include <linux/module.h>
10 #include <linux/printk.h>
11 #include <linux/spi/spi.h>
12 #include <linux/errno.h>
13 #include <linux/gpio/consumer.h>
14 #include <linux/phylink.h>
15 #include <linux/of.h>
16 #include <linux/of_net.h>
17 #include <linux/of_mdio.h>
18 #include <linux/of_device.h>
19 #include <linux/pcs/pcs-xpcs.h>
20 #include <linux/netdev_features.h>
21 #include <linux/netdevice.h>
22 #include <linux/if_bridge.h>
23 #include <linux/if_ether.h>
24 #include <linux/dsa/8021q.h>
25 #include "sja1105.h"
26 #include "sja1105_tas.h"
27
28 #define SJA1105_UNKNOWN_MULTICAST       0x010000000000ull
29 #define SJA1105_DEFAULT_VLAN            (VLAN_N_VID - 1)
30
31 static const struct dsa_switch_ops sja1105_switch_ops;
32
33 static void sja1105_hw_reset(struct gpio_desc *gpio, unsigned int pulse_len,
34                              unsigned int startup_delay)
35 {
36         gpiod_set_value_cansleep(gpio, 1);
37         /* Wait for minimum reset pulse length */
38         msleep(pulse_len);
39         gpiod_set_value_cansleep(gpio, 0);
40         /* Wait until chip is ready after reset */
41         msleep(startup_delay);
42 }
43
44 static void
45 sja1105_port_allow_traffic(struct sja1105_l2_forwarding_entry *l2_fwd,
46                            int from, int to, bool allow)
47 {
48         if (allow)
49                 l2_fwd[from].reach_port |= BIT(to);
50         else
51                 l2_fwd[from].reach_port &= ~BIT(to);
52 }
53
54 static bool sja1105_can_forward(struct sja1105_l2_forwarding_entry *l2_fwd,
55                                 int from, int to)
56 {
57         return !!(l2_fwd[from].reach_port & BIT(to));
58 }
59
60 static int sja1105_init_mac_settings(struct sja1105_private *priv)
61 {
62         struct sja1105_mac_config_entry default_mac = {
63                 /* Enable all 8 priority queues on egress.
64                  * Every queue i holds top[i] - base[i] frames.
65                  * Sum of top[i] - base[i] is 511 (max hardware limit).
66                  */
67                 .top  = {0x3F, 0x7F, 0xBF, 0xFF, 0x13F, 0x17F, 0x1BF, 0x1FF},
68                 .base = {0x0, 0x40, 0x80, 0xC0, 0x100, 0x140, 0x180, 0x1C0},
69                 .enabled = {true, true, true, true, true, true, true, true},
70                 /* Keep standard IFG of 12 bytes on egress. */
71                 .ifg = 0,
72                 /* Always put the MAC speed in automatic mode, where it can be
73                  * adjusted at runtime by PHYLINK.
74                  */
75                 .speed = priv->info->port_speed[SJA1105_SPEED_AUTO],
76                 /* No static correction for 1-step 1588 events */
77                 .tp_delin = 0,
78                 .tp_delout = 0,
79                 /* Disable aging for critical TTEthernet traffic */
80                 .maxage = 0xFF,
81                 /* Internal VLAN (pvid) to apply to untagged ingress */
82                 .vlanprio = 0,
83                 .vlanid = 1,
84                 .ing_mirr = false,
85                 .egr_mirr = false,
86                 /* Don't drop traffic with other EtherType than ETH_P_IP */
87                 .drpnona664 = false,
88                 /* Don't drop double-tagged traffic */
89                 .drpdtag = false,
90                 /* Don't drop untagged traffic */
91                 .drpuntag = false,
92                 /* Don't retag 802.1p (VID 0) traffic with the pvid */
93                 .retag = false,
94                 /* Disable learning and I/O on user ports by default -
95                  * STP will enable it.
96                  */
97                 .dyn_learn = false,
98                 .egress = false,
99                 .ingress = false,
100         };
101         struct sja1105_mac_config_entry *mac;
102         struct dsa_switch *ds = priv->ds;
103         struct sja1105_table *table;
104         int i;
105
106         table = &priv->static_config.tables[BLK_IDX_MAC_CONFIG];
107
108         /* Discard previous MAC Configuration Table */
109         if (table->entry_count) {
110                 kfree(table->entries);
111                 table->entry_count = 0;
112         }
113
114         table->entries = kcalloc(table->ops->max_entry_count,
115                                  table->ops->unpacked_entry_size, GFP_KERNEL);
116         if (!table->entries)
117                 return -ENOMEM;
118
119         table->entry_count = table->ops->max_entry_count;
120
121         mac = table->entries;
122
123         for (i = 0; i < ds->num_ports; i++) {
124                 mac[i] = default_mac;
125                 if (i == dsa_upstream_port(priv->ds, i)) {
126                         /* STP doesn't get called for CPU port, so we need to
127                          * set the I/O parameters statically.
128                          */
129                         mac[i].dyn_learn = true;
130                         mac[i].ingress = true;
131                         mac[i].egress = true;
132                 }
133         }
134
135         return 0;
136 }
137
138 static int sja1105_init_mii_settings(struct sja1105_private *priv)
139 {
140         struct device *dev = &priv->spidev->dev;
141         struct sja1105_xmii_params_entry *mii;
142         struct dsa_switch *ds = priv->ds;
143         struct sja1105_table *table;
144         int i;
145
146         table = &priv->static_config.tables[BLK_IDX_XMII_PARAMS];
147
148         /* Discard previous xMII Mode Parameters Table */
149         if (table->entry_count) {
150                 kfree(table->entries);
151                 table->entry_count = 0;
152         }
153
154         table->entries = kcalloc(table->ops->max_entry_count,
155                                  table->ops->unpacked_entry_size, GFP_KERNEL);
156         if (!table->entries)
157                 return -ENOMEM;
158
159         /* Override table based on PHYLINK DT bindings */
160         table->entry_count = table->ops->max_entry_count;
161
162         mii = table->entries;
163
164         for (i = 0; i < ds->num_ports; i++) {
165                 sja1105_mii_role_t role = XMII_MAC;
166
167                 if (dsa_is_unused_port(priv->ds, i))
168                         continue;
169
170                 switch (priv->phy_mode[i]) {
171                 case PHY_INTERFACE_MODE_INTERNAL:
172                         if (priv->info->internal_phy[i] == SJA1105_NO_PHY)
173                                 goto unsupported;
174
175                         mii->xmii_mode[i] = XMII_MODE_MII;
176                         if (priv->info->internal_phy[i] == SJA1105_PHY_BASE_TX)
177                                 mii->special[i] = true;
178
179                         break;
180                 case PHY_INTERFACE_MODE_REVMII:
181                         role = XMII_PHY;
182                         fallthrough;
183                 case PHY_INTERFACE_MODE_MII:
184                         if (!priv->info->supports_mii[i])
185                                 goto unsupported;
186
187                         mii->xmii_mode[i] = XMII_MODE_MII;
188                         break;
189                 case PHY_INTERFACE_MODE_REVRMII:
190                         role = XMII_PHY;
191                         fallthrough;
192                 case PHY_INTERFACE_MODE_RMII:
193                         if (!priv->info->supports_rmii[i])
194                                 goto unsupported;
195
196                         mii->xmii_mode[i] = XMII_MODE_RMII;
197                         break;
198                 case PHY_INTERFACE_MODE_RGMII:
199                 case PHY_INTERFACE_MODE_RGMII_ID:
200                 case PHY_INTERFACE_MODE_RGMII_RXID:
201                 case PHY_INTERFACE_MODE_RGMII_TXID:
202                         if (!priv->info->supports_rgmii[i])
203                                 goto unsupported;
204
205                         mii->xmii_mode[i] = XMII_MODE_RGMII;
206                         break;
207                 case PHY_INTERFACE_MODE_SGMII:
208                         if (!priv->info->supports_sgmii[i])
209                                 goto unsupported;
210
211                         mii->xmii_mode[i] = XMII_MODE_SGMII;
212                         break;
213                 case PHY_INTERFACE_MODE_2500BASEX:
214                         if (!priv->info->supports_2500basex[i])
215                                 goto unsupported;
216
217                         mii->xmii_mode[i] = XMII_MODE_SGMII;
218                         break;
219 unsupported:
220                 default:
221                         dev_err(dev, "Unsupported PHY mode %s on port %d!\n",
222                                 phy_modes(priv->phy_mode[i]), i);
223                         return -EINVAL;
224                 }
225
226                 mii->phy_mac[i] = role;
227         }
228         return 0;
229 }
230
231 static int sja1105_init_static_fdb(struct sja1105_private *priv)
232 {
233         struct sja1105_l2_lookup_entry *l2_lookup;
234         struct sja1105_table *table;
235         int port;
236
237         table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP];
238
239         /* We only populate the FDB table through dynamic L2 Address Lookup
240          * entries, except for a special entry at the end which is a catch-all
241          * for unknown multicast and will be used to control flooding domain.
242          */
243         if (table->entry_count) {
244                 kfree(table->entries);
245                 table->entry_count = 0;
246         }
247
248         if (!priv->info->can_limit_mcast_flood)
249                 return 0;
250
251         table->entries = kcalloc(1, table->ops->unpacked_entry_size,
252                                  GFP_KERNEL);
253         if (!table->entries)
254                 return -ENOMEM;
255
256         table->entry_count = 1;
257         l2_lookup = table->entries;
258
259         /* All L2 multicast addresses have an odd first octet */
260         l2_lookup[0].macaddr = SJA1105_UNKNOWN_MULTICAST;
261         l2_lookup[0].mask_macaddr = SJA1105_UNKNOWN_MULTICAST;
262         l2_lookup[0].lockeds = true;
263         l2_lookup[0].index = SJA1105_MAX_L2_LOOKUP_COUNT - 1;
264
265         /* Flood multicast to every port by default */
266         for (port = 0; port < priv->ds->num_ports; port++)
267                 if (!dsa_is_unused_port(priv->ds, port))
268                         l2_lookup[0].destports |= BIT(port);
269
270         return 0;
271 }
272
273 static int sja1105_init_l2_lookup_params(struct sja1105_private *priv)
274 {
275         struct sja1105_l2_lookup_params_entry default_l2_lookup_params = {
276                 /* Learned FDB entries are forgotten after 300 seconds */
277                 .maxage = SJA1105_AGEING_TIME_MS(300000),
278                 /* All entries within a FDB bin are available for learning */
279                 .dyn_tbsz = SJA1105ET_FDB_BIN_SIZE,
280                 /* And the P/Q/R/S equivalent setting: */
281                 .start_dynspc = 0,
282                 /* 2^8 + 2^5 + 2^3 + 2^2 + 2^1 + 1 in Koopman notation */
283                 .poly = 0x97,
284                 /* This selects between Independent VLAN Learning (IVL) and
285                  * Shared VLAN Learning (SVL)
286                  */
287                 .shared_learn = true,
288                 /* Don't discard management traffic based on ENFPORT -
289                  * we don't perform SMAC port enforcement anyway, so
290                  * what we are setting here doesn't matter.
291                  */
292                 .no_enf_hostprt = false,
293                 /* Don't learn SMAC for mac_fltres1 and mac_fltres0.
294                  * Maybe correlate with no_linklocal_learn from bridge driver?
295                  */
296                 .no_mgmt_learn = true,
297                 /* P/Q/R/S only */
298                 .use_static = true,
299                 /* Dynamically learned FDB entries can overwrite other (older)
300                  * dynamic FDB entries
301                  */
302                 .owr_dyn = true,
303                 .drpnolearn = true,
304         };
305         struct dsa_switch *ds = priv->ds;
306         int port, num_used_ports = 0;
307         struct sja1105_table *table;
308         u64 max_fdb_entries;
309
310         for (port = 0; port < ds->num_ports; port++)
311                 if (!dsa_is_unused_port(ds, port))
312                         num_used_ports++;
313
314         max_fdb_entries = SJA1105_MAX_L2_LOOKUP_COUNT / num_used_ports;
315
316         for (port = 0; port < ds->num_ports; port++) {
317                 if (dsa_is_unused_port(ds, port))
318                         continue;
319
320                 default_l2_lookup_params.maxaddrp[port] = max_fdb_entries;
321         }
322
323         table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS];
324
325         if (table->entry_count) {
326                 kfree(table->entries);
327                 table->entry_count = 0;
328         }
329
330         table->entries = kcalloc(table->ops->max_entry_count,
331                                  table->ops->unpacked_entry_size, GFP_KERNEL);
332         if (!table->entries)
333                 return -ENOMEM;
334
335         table->entry_count = table->ops->max_entry_count;
336
337         /* This table only has a single entry */
338         ((struct sja1105_l2_lookup_params_entry *)table->entries)[0] =
339                                 default_l2_lookup_params;
340
341         return 0;
342 }
343
344 /* Set up a default VLAN for untagged traffic injected from the CPU
345  * using management routes (e.g. STP, PTP) as opposed to tag_8021q.
346  * All DT-defined ports are members of this VLAN, and there are no
347  * restrictions on forwarding (since the CPU selects the destination).
348  * Frames from this VLAN will always be transmitted as untagged, and
349  * neither the bridge nor the 8021q module cannot create this VLAN ID.
350  */
351 static int sja1105_init_static_vlan(struct sja1105_private *priv)
352 {
353         struct sja1105_table *table;
354         struct sja1105_vlan_lookup_entry pvid = {
355                 .type_entry = SJA1110_VLAN_D_TAG,
356                 .ving_mirr = 0,
357                 .vegr_mirr = 0,
358                 .vmemb_port = 0,
359                 .vlan_bc = 0,
360                 .tag_port = 0,
361                 .vlanid = SJA1105_DEFAULT_VLAN,
362         };
363         struct dsa_switch *ds = priv->ds;
364         int port;
365
366         table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
367
368         if (table->entry_count) {
369                 kfree(table->entries);
370                 table->entry_count = 0;
371         }
372
373         table->entries = kzalloc(table->ops->unpacked_entry_size,
374                                  GFP_KERNEL);
375         if (!table->entries)
376                 return -ENOMEM;
377
378         table->entry_count = 1;
379
380         for (port = 0; port < ds->num_ports; port++) {
381                 struct sja1105_bridge_vlan *v;
382
383                 if (dsa_is_unused_port(ds, port))
384                         continue;
385
386                 pvid.vmemb_port |= BIT(port);
387                 pvid.vlan_bc |= BIT(port);
388                 pvid.tag_port &= ~BIT(port);
389
390                 v = kzalloc(sizeof(*v), GFP_KERNEL);
391                 if (!v)
392                         return -ENOMEM;
393
394                 v->port = port;
395                 v->vid = SJA1105_DEFAULT_VLAN;
396                 v->untagged = true;
397                 if (dsa_is_cpu_port(ds, port))
398                         v->pvid = true;
399                 list_add(&v->list, &priv->dsa_8021q_vlans);
400         }
401
402         ((struct sja1105_vlan_lookup_entry *)table->entries)[0] = pvid;
403         return 0;
404 }
405
406 static int sja1105_init_l2_forwarding(struct sja1105_private *priv)
407 {
408         struct sja1105_l2_forwarding_entry *l2fwd;
409         struct dsa_switch *ds = priv->ds;
410         struct sja1105_table *table;
411         int i, j;
412
413         table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING];
414
415         if (table->entry_count) {
416                 kfree(table->entries);
417                 table->entry_count = 0;
418         }
419
420         table->entries = kcalloc(table->ops->max_entry_count,
421                                  table->ops->unpacked_entry_size, GFP_KERNEL);
422         if (!table->entries)
423                 return -ENOMEM;
424
425         table->entry_count = table->ops->max_entry_count;
426
427         l2fwd = table->entries;
428
429         /* First 5 entries define the forwarding rules */
430         for (i = 0; i < ds->num_ports; i++) {
431                 unsigned int upstream = dsa_upstream_port(priv->ds, i);
432
433                 if (dsa_is_unused_port(ds, i))
434                         continue;
435
436                 for (j = 0; j < SJA1105_NUM_TC; j++)
437                         l2fwd[i].vlan_pmap[j] = j;
438
439                 /* All ports start up with egress flooding enabled,
440                  * including the CPU port.
441                  */
442                 priv->ucast_egress_floods |= BIT(i);
443                 priv->bcast_egress_floods |= BIT(i);
444
445                 if (i == upstream)
446                         continue;
447
448                 sja1105_port_allow_traffic(l2fwd, i, upstream, true);
449                 sja1105_port_allow_traffic(l2fwd, upstream, i, true);
450
451                 l2fwd[i].bc_domain = BIT(upstream);
452                 l2fwd[i].fl_domain = BIT(upstream);
453
454                 l2fwd[upstream].bc_domain |= BIT(i);
455                 l2fwd[upstream].fl_domain |= BIT(i);
456         }
457
458         /* Next 8 entries define VLAN PCP mapping from ingress to egress.
459          * Create a one-to-one mapping.
460          */
461         for (i = 0; i < SJA1105_NUM_TC; i++) {
462                 for (j = 0; j < ds->num_ports; j++) {
463                         if (dsa_is_unused_port(ds, j))
464                                 continue;
465
466                         l2fwd[ds->num_ports + i].vlan_pmap[j] = i;
467                 }
468
469                 l2fwd[ds->num_ports + i].type_egrpcp2outputq = true;
470         }
471
472         return 0;
473 }
474
475 static int sja1110_init_pcp_remapping(struct sja1105_private *priv)
476 {
477         struct sja1110_pcp_remapping_entry *pcp_remap;
478         struct dsa_switch *ds = priv->ds;
479         struct sja1105_table *table;
480         int port, tc;
481
482         table = &priv->static_config.tables[BLK_IDX_PCP_REMAPPING];
483
484         /* Nothing to do for SJA1105 */
485         if (!table->ops->max_entry_count)
486                 return 0;
487
488         if (table->entry_count) {
489                 kfree(table->entries);
490                 table->entry_count = 0;
491         }
492
493         table->entries = kcalloc(table->ops->max_entry_count,
494                                  table->ops->unpacked_entry_size, GFP_KERNEL);
495         if (!table->entries)
496                 return -ENOMEM;
497
498         table->entry_count = table->ops->max_entry_count;
499
500         pcp_remap = table->entries;
501
502         /* Repeat the configuration done for vlan_pmap */
503         for (port = 0; port < ds->num_ports; port++) {
504                 if (dsa_is_unused_port(ds, port))
505                         continue;
506
507                 for (tc = 0; tc < SJA1105_NUM_TC; tc++)
508                         pcp_remap[port].egrpcp[tc] = tc;
509         }
510
511         return 0;
512 }
513
514 static int sja1105_init_l2_forwarding_params(struct sja1105_private *priv)
515 {
516         struct sja1105_l2_forwarding_params_entry *l2fwd_params;
517         struct sja1105_table *table;
518
519         table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING_PARAMS];
520
521         if (table->entry_count) {
522                 kfree(table->entries);
523                 table->entry_count = 0;
524         }
525
526         table->entries = kcalloc(table->ops->max_entry_count,
527                                  table->ops->unpacked_entry_size, GFP_KERNEL);
528         if (!table->entries)
529                 return -ENOMEM;
530
531         table->entry_count = table->ops->max_entry_count;
532
533         /* This table only has a single entry */
534         l2fwd_params = table->entries;
535
536         /* Disallow dynamic reconfiguration of vlan_pmap */
537         l2fwd_params->max_dynp = 0;
538         /* Use a single memory partition for all ingress queues */
539         l2fwd_params->part_spc[0] = priv->info->max_frame_mem;
540
541         return 0;
542 }
543
544 void sja1105_frame_memory_partitioning(struct sja1105_private *priv)
545 {
546         struct sja1105_l2_forwarding_params_entry *l2_fwd_params;
547         struct sja1105_vl_forwarding_params_entry *vl_fwd_params;
548         int max_mem = priv->info->max_frame_mem;
549         struct sja1105_table *table;
550
551         /* VLAN retagging is implemented using a loopback port that consumes
552          * frame buffers. That leaves less for us.
553          */
554         if (priv->vlan_state == SJA1105_VLAN_BEST_EFFORT)
555                 max_mem -= SJA1105_FRAME_MEMORY_RETAGGING_OVERHEAD;
556
557         table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING_PARAMS];
558         l2_fwd_params = table->entries;
559         l2_fwd_params->part_spc[0] = max_mem;
560
561         /* If we have any critical-traffic virtual links, we need to reserve
562          * some frame buffer memory for them. At the moment, hardcode the value
563          * at 100 blocks of 128 bytes of memory each. This leaves 829 blocks
564          * remaining for best-effort traffic. TODO: figure out a more flexible
565          * way to perform the frame buffer partitioning.
566          */
567         if (!priv->static_config.tables[BLK_IDX_VL_FORWARDING].entry_count)
568                 return;
569
570         table = &priv->static_config.tables[BLK_IDX_VL_FORWARDING_PARAMS];
571         vl_fwd_params = table->entries;
572
573         l2_fwd_params->part_spc[0] -= SJA1105_VL_FRAME_MEMORY;
574         vl_fwd_params->partspc[0] = SJA1105_VL_FRAME_MEMORY;
575 }
576
577 /* SJA1110 TDMACONFIGIDX values:
578  *
579  *      | 100 Mbps ports |  1Gbps ports  | 2.5Gbps ports | Disabled ports
580  * -----+----------------+---------------+---------------+---------------
581  *   0  |   0, [5:10]    |     [1:2]     |     [3:4]     |     retag
582  *   1  |0, [5:10], retag|     [1:2]     |     [3:4]     |       -
583  *   2  |   0, [5:10]    |  [1:3], retag |       4       |       -
584  *   3  |   0, [5:10]    |[1:2], 4, retag|       3       |       -
585  *   4  |  0, 2, [5:10]  |    1, retag   |     [3:4]     |       -
586  *   5  |  0, 1, [5:10]  |    2, retag   |     [3:4]     |       -
587  *  14  |   0, [5:10]    | [1:4], retag  |       -       |       -
588  *  15  |     [5:10]     | [0:4], retag  |       -       |       -
589  */
590 static void sja1110_select_tdmaconfigidx(struct sja1105_private *priv)
591 {
592         struct sja1105_general_params_entry *general_params;
593         struct sja1105_table *table;
594         bool port_1_is_base_tx;
595         bool port_3_is_2500;
596         bool port_4_is_2500;
597         u64 tdmaconfigidx;
598
599         if (priv->info->device_id != SJA1110_DEVICE_ID)
600                 return;
601
602         table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
603         general_params = table->entries;
604
605         /* All the settings below are "as opposed to SGMII", which is the
606          * other pinmuxing option.
607          */
608         port_1_is_base_tx = priv->phy_mode[1] == PHY_INTERFACE_MODE_INTERNAL;
609         port_3_is_2500 = priv->phy_mode[3] == PHY_INTERFACE_MODE_2500BASEX;
610         port_4_is_2500 = priv->phy_mode[4] == PHY_INTERFACE_MODE_2500BASEX;
611
612         if (port_1_is_base_tx)
613                 /* Retagging port will operate at 1 Gbps */
614                 tdmaconfigidx = 5;
615         else if (port_3_is_2500 && port_4_is_2500)
616                 /* Retagging port will operate at 100 Mbps */
617                 tdmaconfigidx = 1;
618         else if (port_3_is_2500)
619                 /* Retagging port will operate at 1 Gbps */
620                 tdmaconfigidx = 3;
621         else if (port_4_is_2500)
622                 /* Retagging port will operate at 1 Gbps */
623                 tdmaconfigidx = 2;
624         else
625                 /* Retagging port will operate at 1 Gbps */
626                 tdmaconfigidx = 14;
627
628         general_params->tdmaconfigidx = tdmaconfigidx;
629 }
630
631 static int sja1105_init_general_params(struct sja1105_private *priv)
632 {
633         struct sja1105_general_params_entry default_general_params = {
634                 /* Allow dynamic changing of the mirror port */
635                 .mirr_ptacu = true,
636                 .switchid = priv->ds->index,
637                 /* Priority queue for link-local management frames
638                  * (both ingress to and egress from CPU - PTP, STP etc)
639                  */
640                 .hostprio = 7,
641                 .mac_fltres1 = SJA1105_LINKLOCAL_FILTER_A,
642                 .mac_flt1    = SJA1105_LINKLOCAL_FILTER_A_MASK,
643                 .incl_srcpt1 = false,
644                 .send_meta1  = false,
645                 .mac_fltres0 = SJA1105_LINKLOCAL_FILTER_B,
646                 .mac_flt0    = SJA1105_LINKLOCAL_FILTER_B_MASK,
647                 .incl_srcpt0 = false,
648                 .send_meta0  = false,
649                 /* The destination for traffic matching mac_fltres1 and
650                  * mac_fltres0 on all ports except host_port. Such traffic
651                  * receieved on host_port itself would be dropped, except
652                  * by installing a temporary 'management route'
653                  */
654                 .host_port = priv->ds->num_ports,
655                 /* Default to an invalid value */
656                 .mirr_port = priv->ds->num_ports,
657                 /* No TTEthernet */
658                 .vllupformat = SJA1105_VL_FORMAT_PSFP,
659                 .vlmarker = 0,
660                 .vlmask = 0,
661                 /* Only update correctionField for 1-step PTP (L2 transport) */
662                 .ignore2stf = 0,
663                 /* Forcefully disable VLAN filtering by telling
664                  * the switch that VLAN has a different EtherType.
665                  */
666                 .tpid = ETH_P_SJA1105,
667                 .tpid2 = ETH_P_SJA1105,
668                 /* Enable the TTEthernet engine on SJA1110 */
669                 .tte_en = true,
670                 /* Set up the EtherType for control packets on SJA1110 */
671                 .header_type = ETH_P_SJA1110,
672         };
673         struct sja1105_general_params_entry *general_params;
674         struct dsa_switch *ds = priv->ds;
675         struct sja1105_table *table;
676         int port;
677
678         for (port = 0; port < ds->num_ports; port++) {
679                 if (dsa_is_cpu_port(ds, port)) {
680                         default_general_params.host_port = port;
681                         break;
682                 }
683         }
684
685         table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
686
687         if (table->entry_count) {
688                 kfree(table->entries);
689                 table->entry_count = 0;
690         }
691
692         table->entries = kcalloc(table->ops->max_entry_count,
693                                  table->ops->unpacked_entry_size, GFP_KERNEL);
694         if (!table->entries)
695                 return -ENOMEM;
696
697         table->entry_count = table->ops->max_entry_count;
698
699         general_params = table->entries;
700
701         /* This table only has a single entry */
702         general_params[0] = default_general_params;
703
704         sja1110_select_tdmaconfigidx(priv);
705
706         /* Link-local traffic received on casc_port will be forwarded
707          * to host_port without embedding the source port and device ID
708          * info in the destination MAC address, and no RX timestamps will be
709          * taken either (presumably because it is a cascaded port and a
710          * downstream SJA switch already did that).
711          * To disable the feature, we need to do different things depending on
712          * switch generation. On SJA1105 we need to set an invalid port, while
713          * on SJA1110 which support multiple cascaded ports, this field is a
714          * bitmask so it must be left zero.
715          */
716         if (!priv->info->multiple_cascade_ports)
717                 general_params->casc_port = ds->num_ports;
718
719         return 0;
720 }
721
722 static int sja1105_init_avb_params(struct sja1105_private *priv)
723 {
724         struct sja1105_avb_params_entry *avb;
725         struct sja1105_table *table;
726
727         table = &priv->static_config.tables[BLK_IDX_AVB_PARAMS];
728
729         /* Discard previous AVB Parameters Table */
730         if (table->entry_count) {
731                 kfree(table->entries);
732                 table->entry_count = 0;
733         }
734
735         table->entries = kcalloc(table->ops->max_entry_count,
736                                  table->ops->unpacked_entry_size, GFP_KERNEL);
737         if (!table->entries)
738                 return -ENOMEM;
739
740         table->entry_count = table->ops->max_entry_count;
741
742         avb = table->entries;
743
744         /* Configure the MAC addresses for meta frames */
745         avb->destmeta = SJA1105_META_DMAC;
746         avb->srcmeta  = SJA1105_META_SMAC;
747         /* On P/Q/R/S, configure the direction of the PTP_CLK pin as input by
748          * default. This is because there might be boards with a hardware
749          * layout where enabling the pin as output might cause an electrical
750          * clash. On E/T the pin is always an output, which the board designers
751          * probably already knew, so even if there are going to be electrical
752          * issues, there's nothing we can do.
753          */
754         avb->cas_master = false;
755
756         return 0;
757 }
758
759 /* The L2 policing table is 2-stage. The table is looked up for each frame
760  * according to the ingress port, whether it was broadcast or not, and the
761  * classified traffic class (given by VLAN PCP). This portion of the lookup is
762  * fixed, and gives access to the SHARINDX, an indirection register pointing
763  * within the policing table itself, which is used to resolve the policer that
764  * will be used for this frame.
765  *
766  *  Stage 1                              Stage 2
767  * +------------+--------+              +---------------------------------+
768  * |Port 0 TC 0 |SHARINDX|              | Policer 0: Rate, Burst, MTU     |
769  * +------------+--------+              +---------------------------------+
770  * |Port 0 TC 1 |SHARINDX|              | Policer 1: Rate, Burst, MTU     |
771  * +------------+--------+              +---------------------------------+
772  *    ...                               | Policer 2: Rate, Burst, MTU     |
773  * +------------+--------+              +---------------------------------+
774  * |Port 0 TC 7 |SHARINDX|              | Policer 3: Rate, Burst, MTU     |
775  * +------------+--------+              +---------------------------------+
776  * |Port 1 TC 0 |SHARINDX|              | Policer 4: Rate, Burst, MTU     |
777  * +------------+--------+              +---------------------------------+
778  *    ...                               | Policer 5: Rate, Burst, MTU     |
779  * +------------+--------+              +---------------------------------+
780  * |Port 1 TC 7 |SHARINDX|              | Policer 6: Rate, Burst, MTU     |
781  * +------------+--------+              +---------------------------------+
782  *    ...                               | Policer 7: Rate, Burst, MTU     |
783  * +------------+--------+              +---------------------------------+
784  * |Port 4 TC 7 |SHARINDX|                 ...
785  * +------------+--------+
786  * |Port 0 BCAST|SHARINDX|                 ...
787  * +------------+--------+
788  * |Port 1 BCAST|SHARINDX|                 ...
789  * +------------+--------+
790  *    ...                                  ...
791  * +------------+--------+              +---------------------------------+
792  * |Port 4 BCAST|SHARINDX|              | Policer 44: Rate, Burst, MTU    |
793  * +------------+--------+              +---------------------------------+
794  *
795  * In this driver, we shall use policers 0-4 as statically alocated port
796  * (matchall) policers. So we need to make the SHARINDX for all lookups
797  * corresponding to this ingress port (8 VLAN PCP lookups and 1 broadcast
798  * lookup) equal.
799  * The remaining policers (40) shall be dynamically allocated for flower
800  * policers, where the key is either vlan_prio or dst_mac ff:ff:ff:ff:ff:ff.
801  */
802 #define SJA1105_RATE_MBPS(speed) (((speed) * 64000) / 1000)
803
804 static int sja1105_init_l2_policing(struct sja1105_private *priv)
805 {
806         struct sja1105_l2_policing_entry *policing;
807         struct dsa_switch *ds = priv->ds;
808         struct sja1105_table *table;
809         int port, tc;
810
811         table = &priv->static_config.tables[BLK_IDX_L2_POLICING];
812
813         /* Discard previous L2 Policing Table */
814         if (table->entry_count) {
815                 kfree(table->entries);
816                 table->entry_count = 0;
817         }
818
819         table->entries = kcalloc(table->ops->max_entry_count,
820                                  table->ops->unpacked_entry_size, GFP_KERNEL);
821         if (!table->entries)
822                 return -ENOMEM;
823
824         table->entry_count = table->ops->max_entry_count;
825
826         policing = table->entries;
827
828         /* Setup shared indices for the matchall policers */
829         for (port = 0; port < ds->num_ports; port++) {
830                 int mcast = (ds->num_ports * (SJA1105_NUM_TC + 1)) + port;
831                 int bcast = (ds->num_ports * SJA1105_NUM_TC) + port;
832
833                 for (tc = 0; tc < SJA1105_NUM_TC; tc++)
834                         policing[port * SJA1105_NUM_TC + tc].sharindx = port;
835
836                 policing[bcast].sharindx = port;
837                 /* Only SJA1110 has multicast policers */
838                 if (mcast <= table->ops->max_entry_count)
839                         policing[mcast].sharindx = port;
840         }
841
842         /* Setup the matchall policer parameters */
843         for (port = 0; port < ds->num_ports; port++) {
844                 int mtu = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN;
845
846                 if (dsa_is_cpu_port(priv->ds, port))
847                         mtu += VLAN_HLEN;
848
849                 policing[port].smax = 65535; /* Burst size in bytes */
850                 policing[port].rate = SJA1105_RATE_MBPS(1000);
851                 policing[port].maxlen = mtu;
852                 policing[port].partition = 0;
853         }
854
855         return 0;
856 }
857
858 static int sja1105_static_config_load(struct sja1105_private *priv)
859 {
860         int rc;
861
862         sja1105_static_config_free(&priv->static_config);
863         rc = sja1105_static_config_init(&priv->static_config,
864                                         priv->info->static_ops,
865                                         priv->info->device_id);
866         if (rc)
867                 return rc;
868
869         /* Build static configuration */
870         rc = sja1105_init_mac_settings(priv);
871         if (rc < 0)
872                 return rc;
873         rc = sja1105_init_mii_settings(priv);
874         if (rc < 0)
875                 return rc;
876         rc = sja1105_init_static_fdb(priv);
877         if (rc < 0)
878                 return rc;
879         rc = sja1105_init_static_vlan(priv);
880         if (rc < 0)
881                 return rc;
882         rc = sja1105_init_l2_lookup_params(priv);
883         if (rc < 0)
884                 return rc;
885         rc = sja1105_init_l2_forwarding(priv);
886         if (rc < 0)
887                 return rc;
888         rc = sja1105_init_l2_forwarding_params(priv);
889         if (rc < 0)
890                 return rc;
891         rc = sja1105_init_l2_policing(priv);
892         if (rc < 0)
893                 return rc;
894         rc = sja1105_init_general_params(priv);
895         if (rc < 0)
896                 return rc;
897         rc = sja1105_init_avb_params(priv);
898         if (rc < 0)
899                 return rc;
900         rc = sja1110_init_pcp_remapping(priv);
901         if (rc < 0)
902                 return rc;
903
904         /* Send initial configuration to hardware via SPI */
905         return sja1105_static_config_upload(priv);
906 }
907
908 static int sja1105_parse_rgmii_delays(struct sja1105_private *priv)
909 {
910         struct dsa_switch *ds = priv->ds;
911         int port;
912
913         for (port = 0; port < ds->num_ports; port++) {
914                 if (!priv->fixed_link[port])
915                         continue;
916
917                 if (priv->phy_mode[port] == PHY_INTERFACE_MODE_RGMII_RXID ||
918                     priv->phy_mode[port] == PHY_INTERFACE_MODE_RGMII_ID)
919                         priv->rgmii_rx_delay[port] = true;
920
921                 if (priv->phy_mode[port] == PHY_INTERFACE_MODE_RGMII_TXID ||
922                     priv->phy_mode[port] == PHY_INTERFACE_MODE_RGMII_ID)
923                         priv->rgmii_tx_delay[port] = true;
924
925                 if ((priv->rgmii_rx_delay[port] || priv->rgmii_tx_delay[port]) &&
926                     !priv->info->setup_rgmii_delay)
927                         return -EINVAL;
928         }
929         return 0;
930 }
931
932 static int sja1105_parse_ports_node(struct sja1105_private *priv,
933                                     struct device_node *ports_node)
934 {
935         struct device *dev = &priv->spidev->dev;
936         struct device_node *child;
937
938         for_each_available_child_of_node(ports_node, child) {
939                 struct device_node *phy_node;
940                 phy_interface_t phy_mode;
941                 u32 index;
942                 int err;
943
944                 /* Get switch port number from DT */
945                 if (of_property_read_u32(child, "reg", &index) < 0) {
946                         dev_err(dev, "Port number not defined in device tree "
947                                 "(property \"reg\")\n");
948                         of_node_put(child);
949                         return -ENODEV;
950                 }
951
952                 /* Get PHY mode from DT */
953                 err = of_get_phy_mode(child, &phy_mode);
954                 if (err) {
955                         dev_err(dev, "Failed to read phy-mode or "
956                                 "phy-interface-type property for port %d\n",
957                                 index);
958                         of_node_put(child);
959                         return -ENODEV;
960                 }
961
962                 phy_node = of_parse_phandle(child, "phy-handle", 0);
963                 if (!phy_node) {
964                         if (!of_phy_is_fixed_link(child)) {
965                                 dev_err(dev, "phy-handle or fixed-link "
966                                         "properties missing!\n");
967                                 of_node_put(child);
968                                 return -ENODEV;
969                         }
970                         /* phy-handle is missing, but fixed-link isn't.
971                          * So it's a fixed link. Default to PHY role.
972                          */
973                         priv->fixed_link[index] = true;
974                 } else {
975                         of_node_put(phy_node);
976                 }
977
978                 priv->phy_mode[index] = phy_mode;
979         }
980
981         return 0;
982 }
983
984 static int sja1105_parse_dt(struct sja1105_private *priv)
985 {
986         struct device *dev = &priv->spidev->dev;
987         struct device_node *switch_node = dev->of_node;
988         struct device_node *ports_node;
989         int rc;
990
991         ports_node = of_get_child_by_name(switch_node, "ports");
992         if (!ports_node)
993                 ports_node = of_get_child_by_name(switch_node, "ethernet-ports");
994         if (!ports_node) {
995                 dev_err(dev, "Incorrect bindings: absent \"ports\" node\n");
996                 return -ENODEV;
997         }
998
999         rc = sja1105_parse_ports_node(priv, ports_node);
1000         of_node_put(ports_node);
1001
1002         return rc;
1003 }
1004
1005 /* Convert link speed from SJA1105 to ethtool encoding */
1006 static int sja1105_port_speed_to_ethtool(struct sja1105_private *priv,
1007                                          u64 speed)
1008 {
1009         if (speed == priv->info->port_speed[SJA1105_SPEED_10MBPS])
1010                 return SPEED_10;
1011         if (speed == priv->info->port_speed[SJA1105_SPEED_100MBPS])
1012                 return SPEED_100;
1013         if (speed == priv->info->port_speed[SJA1105_SPEED_1000MBPS])
1014                 return SPEED_1000;
1015         if (speed == priv->info->port_speed[SJA1105_SPEED_2500MBPS])
1016                 return SPEED_2500;
1017         return SPEED_UNKNOWN;
1018 }
1019
1020 /* Set link speed in the MAC configuration for a specific port. */
1021 static int sja1105_adjust_port_config(struct sja1105_private *priv, int port,
1022                                       int speed_mbps)
1023 {
1024         struct sja1105_mac_config_entry *mac;
1025         struct device *dev = priv->ds->dev;
1026         u64 speed;
1027         int rc;
1028
1029         /* On P/Q/R/S, one can read from the device via the MAC reconfiguration
1030          * tables. On E/T, MAC reconfig tables are not readable, only writable.
1031          * We have to *know* what the MAC looks like.  For the sake of keeping
1032          * the code common, we'll use the static configuration tables as a
1033          * reasonable approximation for both E/T and P/Q/R/S.
1034          */
1035         mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
1036
1037         switch (speed_mbps) {
1038         case SPEED_UNKNOWN:
1039                 /* PHYLINK called sja1105_mac_config() to inform us about
1040                  * the state->interface, but AN has not completed and the
1041                  * speed is not yet valid. UM10944.pdf says that setting
1042                  * SJA1105_SPEED_AUTO at runtime disables the port, so that is
1043                  * ok for power consumption in case AN will never complete -
1044                  * otherwise PHYLINK should come back with a new update.
1045                  */
1046                 speed = priv->info->port_speed[SJA1105_SPEED_AUTO];
1047                 break;
1048         case SPEED_10:
1049                 speed = priv->info->port_speed[SJA1105_SPEED_10MBPS];
1050                 break;
1051         case SPEED_100:
1052                 speed = priv->info->port_speed[SJA1105_SPEED_100MBPS];
1053                 break;
1054         case SPEED_1000:
1055                 speed = priv->info->port_speed[SJA1105_SPEED_1000MBPS];
1056                 break;
1057         default:
1058                 dev_err(dev, "Invalid speed %iMbps\n", speed_mbps);
1059                 return -EINVAL;
1060         }
1061
1062         /* Overwrite SJA1105_SPEED_AUTO from the static MAC configuration
1063          * table, since this will be used for the clocking setup, and we no
1064          * longer need to store it in the static config (already told hardware
1065          * we want auto during upload phase).
1066          * Actually for the SGMII port, the MAC is fixed at 1 Gbps and
1067          * we need to configure the PCS only (if even that).
1068          */
1069         if (priv->phy_mode[port] == PHY_INTERFACE_MODE_SGMII)
1070                 mac[port].speed = priv->info->port_speed[SJA1105_SPEED_1000MBPS];
1071         else
1072                 mac[port].speed = speed;
1073
1074         /* Write to the dynamic reconfiguration tables */
1075         rc = sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
1076                                           &mac[port], true);
1077         if (rc < 0) {
1078                 dev_err(dev, "Failed to write MAC config: %d\n", rc);
1079                 return rc;
1080         }
1081
1082         /* Reconfigure the PLLs for the RGMII interfaces (required 125 MHz at
1083          * gigabit, 25 MHz at 100 Mbps and 2.5 MHz at 10 Mbps). For MII and
1084          * RMII no change of the clock setup is required. Actually, changing
1085          * the clock setup does interrupt the clock signal for a certain time
1086          * which causes trouble for all PHYs relying on this signal.
1087          */
1088         if (!phy_interface_mode_is_rgmii(priv->phy_mode[port]))
1089                 return 0;
1090
1091         return sja1105_clocking_setup_port(priv, port);
1092 }
1093
1094 /* The SJA1105 MAC programming model is through the static config (the xMII
1095  * Mode table cannot be dynamically reconfigured), and we have to program
1096  * that early (earlier than PHYLINK calls us, anyway).
1097  * So just error out in case the connected PHY attempts to change the initial
1098  * system interface MII protocol from what is defined in the DT, at least for
1099  * now.
1100  */
1101 static bool sja1105_phy_mode_mismatch(struct sja1105_private *priv, int port,
1102                                       phy_interface_t interface)
1103 {
1104         return priv->phy_mode[port] != interface;
1105 }
1106
1107 static void sja1105_mac_config(struct dsa_switch *ds, int port,
1108                                unsigned int mode,
1109                                const struct phylink_link_state *state)
1110 {
1111         struct dsa_port *dp = dsa_to_port(ds, port);
1112         struct sja1105_private *priv = ds->priv;
1113         struct dw_xpcs *xpcs;
1114
1115         if (sja1105_phy_mode_mismatch(priv, port, state->interface)) {
1116                 dev_err(ds->dev, "Changing PHY mode to %s not supported!\n",
1117                         phy_modes(state->interface));
1118                 return;
1119         }
1120
1121         xpcs = priv->xpcs[port];
1122
1123         if (xpcs)
1124                 phylink_set_pcs(dp->pl, &xpcs->pcs);
1125 }
1126
1127 static void sja1105_mac_link_down(struct dsa_switch *ds, int port,
1128                                   unsigned int mode,
1129                                   phy_interface_t interface)
1130 {
1131         sja1105_inhibit_tx(ds->priv, BIT(port), true);
1132 }
1133
1134 static void sja1105_mac_link_up(struct dsa_switch *ds, int port,
1135                                 unsigned int mode,
1136                                 phy_interface_t interface,
1137                                 struct phy_device *phydev,
1138                                 int speed, int duplex,
1139                                 bool tx_pause, bool rx_pause)
1140 {
1141         struct sja1105_private *priv = ds->priv;
1142
1143         sja1105_adjust_port_config(priv, port, speed);
1144
1145         sja1105_inhibit_tx(priv, BIT(port), false);
1146 }
1147
1148 static void sja1105_phylink_validate(struct dsa_switch *ds, int port,
1149                                      unsigned long *supported,
1150                                      struct phylink_link_state *state)
1151 {
1152         /* Construct a new mask which exhaustively contains all link features
1153          * supported by the MAC, and then apply that (logical AND) to what will
1154          * be sent to the PHY for "marketing".
1155          */
1156         __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
1157         struct sja1105_private *priv = ds->priv;
1158         struct sja1105_xmii_params_entry *mii;
1159
1160         mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries;
1161
1162         /* include/linux/phylink.h says:
1163          *     When @state->interface is %PHY_INTERFACE_MODE_NA, phylink
1164          *     expects the MAC driver to return all supported link modes.
1165          */
1166         if (state->interface != PHY_INTERFACE_MODE_NA &&
1167             sja1105_phy_mode_mismatch(priv, port, state->interface)) {
1168                 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
1169                 return;
1170         }
1171
1172         /* The MAC does not support pause frames, and also doesn't
1173          * support half-duplex traffic modes.
1174          */
1175         phylink_set(mask, Autoneg);
1176         phylink_set(mask, MII);
1177         phylink_set(mask, 10baseT_Full);
1178         phylink_set(mask, 100baseT_Full);
1179         phylink_set(mask, 100baseT1_Full);
1180         if (mii->xmii_mode[port] == XMII_MODE_RGMII ||
1181             mii->xmii_mode[port] == XMII_MODE_SGMII)
1182                 phylink_set(mask, 1000baseT_Full);
1183
1184         bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
1185         bitmap_and(state->advertising, state->advertising, mask,
1186                    __ETHTOOL_LINK_MODE_MASK_NBITS);
1187 }
1188
1189 static int
1190 sja1105_find_static_fdb_entry(struct sja1105_private *priv, int port,
1191                               const struct sja1105_l2_lookup_entry *requested)
1192 {
1193         struct sja1105_l2_lookup_entry *l2_lookup;
1194         struct sja1105_table *table;
1195         int i;
1196
1197         table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP];
1198         l2_lookup = table->entries;
1199
1200         for (i = 0; i < table->entry_count; i++)
1201                 if (l2_lookup[i].macaddr == requested->macaddr &&
1202                     l2_lookup[i].vlanid == requested->vlanid &&
1203                     l2_lookup[i].destports & BIT(port))
1204                         return i;
1205
1206         return -1;
1207 }
1208
1209 /* We want FDB entries added statically through the bridge command to persist
1210  * across switch resets, which are a common thing during normal SJA1105
1211  * operation. So we have to back them up in the static configuration tables
1212  * and hence apply them on next static config upload... yay!
1213  */
1214 static int
1215 sja1105_static_fdb_change(struct sja1105_private *priv, int port,
1216                           const struct sja1105_l2_lookup_entry *requested,
1217                           bool keep)
1218 {
1219         struct sja1105_l2_lookup_entry *l2_lookup;
1220         struct sja1105_table *table;
1221         int rc, match;
1222
1223         table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP];
1224
1225         match = sja1105_find_static_fdb_entry(priv, port, requested);
1226         if (match < 0) {
1227                 /* Can't delete a missing entry. */
1228                 if (!keep)
1229                         return 0;
1230
1231                 /* No match => new entry */
1232                 rc = sja1105_table_resize(table, table->entry_count + 1);
1233                 if (rc)
1234                         return rc;
1235
1236                 match = table->entry_count - 1;
1237         }
1238
1239         /* Assign pointer after the resize (it may be new memory) */
1240         l2_lookup = table->entries;
1241
1242         /* We have a match.
1243          * If the job was to add this FDB entry, it's already done (mostly
1244          * anyway, since the port forwarding mask may have changed, case in
1245          * which we update it).
1246          * Otherwise we have to delete it.
1247          */
1248         if (keep) {
1249                 l2_lookup[match] = *requested;
1250                 return 0;
1251         }
1252
1253         /* To remove, the strategy is to overwrite the element with
1254          * the last one, and then reduce the array size by 1
1255          */
1256         l2_lookup[match] = l2_lookup[table->entry_count - 1];
1257         return sja1105_table_resize(table, table->entry_count - 1);
1258 }
1259
1260 /* First-generation switches have a 4-way set associative TCAM that
1261  * holds the FDB entries. An FDB index spans from 0 to 1023 and is comprised of
1262  * a "bin" (grouping of 4 entries) and a "way" (an entry within a bin).
1263  * For the placement of a newly learnt FDB entry, the switch selects the bin
1264  * based on a hash function, and the way within that bin incrementally.
1265  */
1266 static int sja1105et_fdb_index(int bin, int way)
1267 {
1268         return bin * SJA1105ET_FDB_BIN_SIZE + way;
1269 }
1270
1271 static int sja1105et_is_fdb_entry_in_bin(struct sja1105_private *priv, int bin,
1272                                          const u8 *addr, u16 vid,
1273                                          struct sja1105_l2_lookup_entry *match,
1274                                          int *last_unused)
1275 {
1276         int way;
1277
1278         for (way = 0; way < SJA1105ET_FDB_BIN_SIZE; way++) {
1279                 struct sja1105_l2_lookup_entry l2_lookup = {0};
1280                 int index = sja1105et_fdb_index(bin, way);
1281
1282                 /* Skip unused entries, optionally marking them
1283                  * into the return value
1284                  */
1285                 if (sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
1286                                                 index, &l2_lookup)) {
1287                         if (last_unused)
1288                                 *last_unused = way;
1289                         continue;
1290                 }
1291
1292                 if (l2_lookup.macaddr == ether_addr_to_u64(addr) &&
1293                     l2_lookup.vlanid == vid) {
1294                         if (match)
1295                                 *match = l2_lookup;
1296                         return way;
1297                 }
1298         }
1299         /* Return an invalid entry index if not found */
1300         return -1;
1301 }
1302
1303 int sja1105et_fdb_add(struct dsa_switch *ds, int port,
1304                       const unsigned char *addr, u16 vid)
1305 {
1306         struct sja1105_l2_lookup_entry l2_lookup = {0};
1307         struct sja1105_private *priv = ds->priv;
1308         struct device *dev = ds->dev;
1309         int last_unused = -1;
1310         int bin, way, rc;
1311
1312         bin = sja1105et_fdb_hash(priv, addr, vid);
1313
1314         way = sja1105et_is_fdb_entry_in_bin(priv, bin, addr, vid,
1315                                             &l2_lookup, &last_unused);
1316         if (way >= 0) {
1317                 /* We have an FDB entry. Is our port in the destination
1318                  * mask? If yes, we need to do nothing. If not, we need
1319                  * to rewrite the entry by adding this port to it.
1320                  */
1321                 if (l2_lookup.destports & BIT(port))
1322                         return 0;
1323                 l2_lookup.destports |= BIT(port);
1324         } else {
1325                 int index = sja1105et_fdb_index(bin, way);
1326
1327                 /* We don't have an FDB entry. We construct a new one and
1328                  * try to find a place for it within the FDB table.
1329                  */
1330                 l2_lookup.macaddr = ether_addr_to_u64(addr);
1331                 l2_lookup.destports = BIT(port);
1332                 l2_lookup.vlanid = vid;
1333
1334                 if (last_unused >= 0) {
1335                         way = last_unused;
1336                 } else {
1337                         /* Bin is full, need to evict somebody.
1338                          * Choose victim at random. If you get these messages
1339                          * often, you may need to consider changing the
1340                          * distribution function:
1341                          * static_config[BLK_IDX_L2_LOOKUP_PARAMS].entries->poly
1342                          */
1343                         get_random_bytes(&way, sizeof(u8));
1344                         way %= SJA1105ET_FDB_BIN_SIZE;
1345                         dev_warn(dev, "Warning, FDB bin %d full while adding entry for %pM. Evicting entry %u.\n",
1346                                  bin, addr, way);
1347                         /* Evict entry */
1348                         sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
1349                                                      index, NULL, false);
1350                 }
1351         }
1352         l2_lookup.index = sja1105et_fdb_index(bin, way);
1353
1354         rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
1355                                           l2_lookup.index, &l2_lookup,
1356                                           true);
1357         if (rc < 0)
1358                 return rc;
1359
1360         return sja1105_static_fdb_change(priv, port, &l2_lookup, true);
1361 }
1362
1363 int sja1105et_fdb_del(struct dsa_switch *ds, int port,
1364                       const unsigned char *addr, u16 vid)
1365 {
1366         struct sja1105_l2_lookup_entry l2_lookup = {0};
1367         struct sja1105_private *priv = ds->priv;
1368         int index, bin, way, rc;
1369         bool keep;
1370
1371         bin = sja1105et_fdb_hash(priv, addr, vid);
1372         way = sja1105et_is_fdb_entry_in_bin(priv, bin, addr, vid,
1373                                             &l2_lookup, NULL);
1374         if (way < 0)
1375                 return 0;
1376         index = sja1105et_fdb_index(bin, way);
1377
1378         /* We have an FDB entry. Is our port in the destination mask? If yes,
1379          * we need to remove it. If the resulting port mask becomes empty, we
1380          * need to completely evict the FDB entry.
1381          * Otherwise we just write it back.
1382          */
1383         l2_lookup.destports &= ~BIT(port);
1384
1385         if (l2_lookup.destports)
1386                 keep = true;
1387         else
1388                 keep = false;
1389
1390         rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
1391                                           index, &l2_lookup, keep);
1392         if (rc < 0)
1393                 return rc;
1394
1395         return sja1105_static_fdb_change(priv, port, &l2_lookup, keep);
1396 }
1397
1398 int sja1105pqrs_fdb_add(struct dsa_switch *ds, int port,
1399                         const unsigned char *addr, u16 vid)
1400 {
1401         struct sja1105_l2_lookup_entry l2_lookup = {0};
1402         struct sja1105_private *priv = ds->priv;
1403         int rc, i;
1404
1405         /* Search for an existing entry in the FDB table */
1406         l2_lookup.macaddr = ether_addr_to_u64(addr);
1407         l2_lookup.vlanid = vid;
1408         l2_lookup.iotag = SJA1105_S_TAG;
1409         l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0);
1410         if (priv->vlan_state != SJA1105_VLAN_UNAWARE) {
1411                 l2_lookup.mask_vlanid = VLAN_VID_MASK;
1412                 l2_lookup.mask_iotag = BIT(0);
1413         } else {
1414                 l2_lookup.mask_vlanid = 0;
1415                 l2_lookup.mask_iotag = 0;
1416         }
1417         l2_lookup.destports = BIT(port);
1418
1419         rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
1420                                          SJA1105_SEARCH, &l2_lookup);
1421         if (rc == 0) {
1422                 /* Found and this port is already in the entry's
1423                  * port mask => job done
1424                  */
1425                 if (l2_lookup.destports & BIT(port))
1426                         return 0;
1427                 /* l2_lookup.index is populated by the switch in case it
1428                  * found something.
1429                  */
1430                 l2_lookup.destports |= BIT(port);
1431                 goto skip_finding_an_index;
1432         }
1433
1434         /* Not found, so try to find an unused spot in the FDB.
1435          * This is slightly inefficient because the strategy is knock-knock at
1436          * every possible position from 0 to 1023.
1437          */
1438         for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) {
1439                 rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
1440                                                  i, NULL);
1441                 if (rc < 0)
1442                         break;
1443         }
1444         if (i == SJA1105_MAX_L2_LOOKUP_COUNT) {
1445                 dev_err(ds->dev, "FDB is full, cannot add entry.\n");
1446                 return -EINVAL;
1447         }
1448         l2_lookup.lockeds = true;
1449         l2_lookup.index = i;
1450
1451 skip_finding_an_index:
1452         rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
1453                                           l2_lookup.index, &l2_lookup,
1454                                           true);
1455         if (rc < 0)
1456                 return rc;
1457
1458         return sja1105_static_fdb_change(priv, port, &l2_lookup, true);
1459 }
1460
1461 int sja1105pqrs_fdb_del(struct dsa_switch *ds, int port,
1462                         const unsigned char *addr, u16 vid)
1463 {
1464         struct sja1105_l2_lookup_entry l2_lookup = {0};
1465         struct sja1105_private *priv = ds->priv;
1466         bool keep;
1467         int rc;
1468
1469         l2_lookup.macaddr = ether_addr_to_u64(addr);
1470         l2_lookup.vlanid = vid;
1471         l2_lookup.iotag = SJA1105_S_TAG;
1472         l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0);
1473         if (priv->vlan_state != SJA1105_VLAN_UNAWARE) {
1474                 l2_lookup.mask_vlanid = VLAN_VID_MASK;
1475                 l2_lookup.mask_iotag = BIT(0);
1476         } else {
1477                 l2_lookup.mask_vlanid = 0;
1478                 l2_lookup.mask_iotag = 0;
1479         }
1480         l2_lookup.destports = BIT(port);
1481
1482         rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
1483                                          SJA1105_SEARCH, &l2_lookup);
1484         if (rc < 0)
1485                 return 0;
1486
1487         l2_lookup.destports &= ~BIT(port);
1488
1489         /* Decide whether we remove just this port from the FDB entry,
1490          * or if we remove it completely.
1491          */
1492         if (l2_lookup.destports)
1493                 keep = true;
1494         else
1495                 keep = false;
1496
1497         rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
1498                                           l2_lookup.index, &l2_lookup, keep);
1499         if (rc < 0)
1500                 return rc;
1501
1502         return sja1105_static_fdb_change(priv, port, &l2_lookup, keep);
1503 }
1504
1505 static int sja1105_fdb_add(struct dsa_switch *ds, int port,
1506                            const unsigned char *addr, u16 vid)
1507 {
1508         struct sja1105_private *priv = ds->priv;
1509
1510         /* dsa_8021q is in effect when the bridge's vlan_filtering isn't,
1511          * so the switch still does some VLAN processing internally.
1512          * But Shared VLAN Learning (SVL) is also active, and it will take
1513          * care of autonomous forwarding between the unique pvid's of each
1514          * port.  Here we just make sure that users can't add duplicate FDB
1515          * entries when in this mode - the actual VID doesn't matter except
1516          * for what gets printed in 'bridge fdb show'.  In the case of zero,
1517          * no VID gets printed at all.
1518          */
1519         if (priv->vlan_state != SJA1105_VLAN_FILTERING_FULL)
1520                 vid = 0;
1521
1522         return priv->info->fdb_add_cmd(ds, port, addr, vid);
1523 }
1524
1525 static int sja1105_fdb_del(struct dsa_switch *ds, int port,
1526                            const unsigned char *addr, u16 vid)
1527 {
1528         struct sja1105_private *priv = ds->priv;
1529
1530         if (priv->vlan_state != SJA1105_VLAN_FILTERING_FULL)
1531                 vid = 0;
1532
1533         return priv->info->fdb_del_cmd(ds, port, addr, vid);
1534 }
1535
1536 static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
1537                             dsa_fdb_dump_cb_t *cb, void *data)
1538 {
1539         struct sja1105_private *priv = ds->priv;
1540         struct device *dev = ds->dev;
1541         int i;
1542
1543         for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) {
1544                 struct sja1105_l2_lookup_entry l2_lookup = {0};
1545                 u8 macaddr[ETH_ALEN];
1546                 int rc;
1547
1548                 rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
1549                                                  i, &l2_lookup);
1550                 /* No fdb entry at i, not an issue */
1551                 if (rc == -ENOENT)
1552                         continue;
1553                 if (rc) {
1554                         dev_err(dev, "Failed to dump FDB: %d\n", rc);
1555                         return rc;
1556                 }
1557
1558                 /* FDB dump callback is per port. This means we have to
1559                  * disregard a valid entry if it's not for this port, even if
1560                  * only to revisit it later. This is inefficient because the
1561                  * 1024-sized FDB table needs to be traversed 4 times through
1562                  * SPI during a 'bridge fdb show' command.
1563                  */
1564                 if (!(l2_lookup.destports & BIT(port)))
1565                         continue;
1566
1567                 /* We need to hide the FDB entry for unknown multicast */
1568                 if (l2_lookup.macaddr == SJA1105_UNKNOWN_MULTICAST &&
1569                     l2_lookup.mask_macaddr == SJA1105_UNKNOWN_MULTICAST)
1570                         continue;
1571
1572                 u64_to_ether_addr(l2_lookup.macaddr, macaddr);
1573
1574                 /* We need to hide the dsa_8021q VLANs from the user. */
1575                 if (priv->vlan_state == SJA1105_VLAN_UNAWARE)
1576                         l2_lookup.vlanid = 0;
1577                 cb(macaddr, l2_lookup.vlanid, l2_lookup.lockeds, data);
1578         }
1579         return 0;
1580 }
1581
1582 static int sja1105_mdb_add(struct dsa_switch *ds, int port,
1583                            const struct switchdev_obj_port_mdb *mdb)
1584 {
1585         return sja1105_fdb_add(ds, port, mdb->addr, mdb->vid);
1586 }
1587
1588 static int sja1105_mdb_del(struct dsa_switch *ds, int port,
1589                            const struct switchdev_obj_port_mdb *mdb)
1590 {
1591         return sja1105_fdb_del(ds, port, mdb->addr, mdb->vid);
1592 }
1593
1594 /* Common function for unicast and broadcast flood configuration.
1595  * Flooding is configured between each {ingress, egress} port pair, and since
1596  * the bridge's semantics are those of "egress flooding", it means we must
1597  * enable flooding towards this port from all ingress ports that are in the
1598  * same forwarding domain.
1599  */
1600 static int sja1105_manage_flood_domains(struct sja1105_private *priv)
1601 {
1602         struct sja1105_l2_forwarding_entry *l2_fwd;
1603         struct dsa_switch *ds = priv->ds;
1604         int from, to, rc;
1605
1606         l2_fwd = priv->static_config.tables[BLK_IDX_L2_FORWARDING].entries;
1607
1608         for (from = 0; from < ds->num_ports; from++) {
1609                 u64 fl_domain = 0, bc_domain = 0;
1610
1611                 for (to = 0; to < priv->ds->num_ports; to++) {
1612                         if (!sja1105_can_forward(l2_fwd, from, to))
1613                                 continue;
1614
1615                         if (priv->ucast_egress_floods & BIT(to))
1616                                 fl_domain |= BIT(to);
1617                         if (priv->bcast_egress_floods & BIT(to))
1618                                 bc_domain |= BIT(to);
1619                 }
1620
1621                 /* Nothing changed, nothing to do */
1622                 if (l2_fwd[from].fl_domain == fl_domain &&
1623                     l2_fwd[from].bc_domain == bc_domain)
1624                         continue;
1625
1626                 l2_fwd[from].fl_domain = fl_domain;
1627                 l2_fwd[from].bc_domain = bc_domain;
1628
1629                 rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_FORWARDING,
1630                                                   from, &l2_fwd[from], true);
1631                 if (rc < 0)
1632                         return rc;
1633         }
1634
1635         return 0;
1636 }
1637
1638 static int sja1105_bridge_member(struct dsa_switch *ds, int port,
1639                                  struct net_device *br, bool member)
1640 {
1641         struct sja1105_l2_forwarding_entry *l2_fwd;
1642         struct sja1105_private *priv = ds->priv;
1643         int i, rc;
1644
1645         l2_fwd = priv->static_config.tables[BLK_IDX_L2_FORWARDING].entries;
1646
1647         for (i = 0; i < ds->num_ports; i++) {
1648                 /* Add this port to the forwarding matrix of the
1649                  * other ports in the same bridge, and viceversa.
1650                  */
1651                 if (!dsa_is_user_port(ds, i))
1652                         continue;
1653                 /* For the ports already under the bridge, only one thing needs
1654                  * to be done, and that is to add this port to their
1655                  * reachability domain. So we can perform the SPI write for
1656                  * them immediately. However, for this port itself (the one
1657                  * that is new to the bridge), we need to add all other ports
1658                  * to its reachability domain. So we do that incrementally in
1659                  * this loop, and perform the SPI write only at the end, once
1660                  * the domain contains all other bridge ports.
1661                  */
1662                 if (i == port)
1663                         continue;
1664                 if (dsa_to_port(ds, i)->bridge_dev != br)
1665                         continue;
1666                 sja1105_port_allow_traffic(l2_fwd, i, port, member);
1667                 sja1105_port_allow_traffic(l2_fwd, port, i, member);
1668
1669                 rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_FORWARDING,
1670                                                   i, &l2_fwd[i], true);
1671                 if (rc < 0)
1672                         return rc;
1673         }
1674
1675         rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_FORWARDING,
1676                                           port, &l2_fwd[port], true);
1677         if (rc)
1678                 return rc;
1679
1680         return sja1105_manage_flood_domains(priv);
1681 }
1682
1683 static void sja1105_bridge_stp_state_set(struct dsa_switch *ds, int port,
1684                                          u8 state)
1685 {
1686         struct sja1105_private *priv = ds->priv;
1687         struct sja1105_mac_config_entry *mac;
1688
1689         mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
1690
1691         switch (state) {
1692         case BR_STATE_DISABLED:
1693         case BR_STATE_BLOCKING:
1694                 /* From UM10944 description of DRPDTAG (why put this there?):
1695                  * "Management traffic flows to the port regardless of the state
1696                  * of the INGRESS flag". So BPDUs are still be allowed to pass.
1697                  * At the moment no difference between DISABLED and BLOCKING.
1698                  */
1699                 mac[port].ingress   = false;
1700                 mac[port].egress    = false;
1701                 mac[port].dyn_learn = false;
1702                 break;
1703         case BR_STATE_LISTENING:
1704                 mac[port].ingress   = true;
1705                 mac[port].egress    = false;
1706                 mac[port].dyn_learn = false;
1707                 break;
1708         case BR_STATE_LEARNING:
1709                 mac[port].ingress   = true;
1710                 mac[port].egress    = false;
1711                 mac[port].dyn_learn = !!(priv->learn_ena & BIT(port));
1712                 break;
1713         case BR_STATE_FORWARDING:
1714                 mac[port].ingress   = true;
1715                 mac[port].egress    = true;
1716                 mac[port].dyn_learn = !!(priv->learn_ena & BIT(port));
1717                 break;
1718         default:
1719                 dev_err(ds->dev, "invalid STP state: %d\n", state);
1720                 return;
1721         }
1722
1723         sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
1724                                      &mac[port], true);
1725 }
1726
1727 static int sja1105_bridge_join(struct dsa_switch *ds, int port,
1728                                struct net_device *br)
1729 {
1730         return sja1105_bridge_member(ds, port, br, true);
1731 }
1732
1733 static void sja1105_bridge_leave(struct dsa_switch *ds, int port,
1734                                  struct net_device *br)
1735 {
1736         sja1105_bridge_member(ds, port, br, false);
1737 }
1738
1739 #define BYTES_PER_KBIT (1000LL / 8)
1740
1741 static int sja1105_find_unused_cbs_shaper(struct sja1105_private *priv)
1742 {
1743         int i;
1744
1745         for (i = 0; i < priv->info->num_cbs_shapers; i++)
1746                 if (!priv->cbs[i].idle_slope && !priv->cbs[i].send_slope)
1747                         return i;
1748
1749         return -1;
1750 }
1751
1752 static int sja1105_delete_cbs_shaper(struct sja1105_private *priv, int port,
1753                                      int prio)
1754 {
1755         int i;
1756
1757         for (i = 0; i < priv->info->num_cbs_shapers; i++) {
1758                 struct sja1105_cbs_entry *cbs = &priv->cbs[i];
1759
1760                 if (cbs->port == port && cbs->prio == prio) {
1761                         memset(cbs, 0, sizeof(*cbs));
1762                         return sja1105_dynamic_config_write(priv, BLK_IDX_CBS,
1763                                                             i, cbs, true);
1764                 }
1765         }
1766
1767         return 0;
1768 }
1769
1770 static int sja1105_setup_tc_cbs(struct dsa_switch *ds, int port,
1771                                 struct tc_cbs_qopt_offload *offload)
1772 {
1773         struct sja1105_private *priv = ds->priv;
1774         struct sja1105_cbs_entry *cbs;
1775         int index;
1776
1777         if (!offload->enable)
1778                 return sja1105_delete_cbs_shaper(priv, port, offload->queue);
1779
1780         index = sja1105_find_unused_cbs_shaper(priv);
1781         if (index < 0)
1782                 return -ENOSPC;
1783
1784         cbs = &priv->cbs[index];
1785         cbs->port = port;
1786         cbs->prio = offload->queue;
1787         /* locredit and sendslope are negative by definition. In hardware,
1788          * positive values must be provided, and the negative sign is implicit.
1789          */
1790         cbs->credit_hi = offload->hicredit;
1791         cbs->credit_lo = abs(offload->locredit);
1792         /* User space is in kbits/sec, hardware in bytes/sec */
1793         cbs->idle_slope = offload->idleslope * BYTES_PER_KBIT;
1794         cbs->send_slope = abs(offload->sendslope * BYTES_PER_KBIT);
1795         /* Convert the negative values from 64-bit 2's complement
1796          * to 32-bit 2's complement (for the case of 0x80000000 whose
1797          * negative is still negative).
1798          */
1799         cbs->credit_lo &= GENMASK_ULL(31, 0);
1800         cbs->send_slope &= GENMASK_ULL(31, 0);
1801
1802         return sja1105_dynamic_config_write(priv, BLK_IDX_CBS, index, cbs,
1803                                             true);
1804 }
1805
1806 static int sja1105_reload_cbs(struct sja1105_private *priv)
1807 {
1808         int rc = 0, i;
1809
1810         for (i = 0; i < priv->info->num_cbs_shapers; i++) {
1811                 struct sja1105_cbs_entry *cbs = &priv->cbs[i];
1812
1813                 if (!cbs->idle_slope && !cbs->send_slope)
1814                         continue;
1815
1816                 rc = sja1105_dynamic_config_write(priv, BLK_IDX_CBS, i, cbs,
1817                                                   true);
1818                 if (rc)
1819                         break;
1820         }
1821
1822         return rc;
1823 }
1824
1825 static const char * const sja1105_reset_reasons[] = {
1826         [SJA1105_VLAN_FILTERING] = "VLAN filtering",
1827         [SJA1105_RX_HWTSTAMPING] = "RX timestamping",
1828         [SJA1105_AGEING_TIME] = "Ageing time",
1829         [SJA1105_SCHEDULING] = "Time-aware scheduling",
1830         [SJA1105_BEST_EFFORT_POLICING] = "Best-effort policing",
1831         [SJA1105_VIRTUAL_LINKS] = "Virtual links",
1832 };
1833
1834 /* For situations where we need to change a setting at runtime that is only
1835  * available through the static configuration, resetting the switch in order
1836  * to upload the new static config is unavoidable. Back up the settings we
1837  * modify at runtime (currently only MAC) and restore them after uploading,
1838  * such that this operation is relatively seamless.
1839  */
1840 int sja1105_static_config_reload(struct sja1105_private *priv,
1841                                  enum sja1105_reset_reason reason)
1842 {
1843         struct ptp_system_timestamp ptp_sts_before;
1844         struct ptp_system_timestamp ptp_sts_after;
1845         int speed_mbps[SJA1105_MAX_NUM_PORTS];
1846         u16 bmcr[SJA1105_MAX_NUM_PORTS] = {0};
1847         struct sja1105_mac_config_entry *mac;
1848         struct dsa_switch *ds = priv->ds;
1849         s64 t1, t2, t3, t4;
1850         s64 t12, t34;
1851         int rc, i;
1852         s64 now;
1853
1854         mutex_lock(&priv->mgmt_lock);
1855
1856         mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
1857
1858         /* Back up the dynamic link speed changed by sja1105_adjust_port_config
1859          * in order to temporarily restore it to SJA1105_SPEED_AUTO - which the
1860          * switch wants to see in the static config in order to allow us to
1861          * change it through the dynamic interface later.
1862          */
1863         for (i = 0; i < ds->num_ports; i++) {
1864                 u32 reg_addr = mdiobus_c45_addr(MDIO_MMD_VEND2, MDIO_CTRL1);
1865
1866                 speed_mbps[i] = sja1105_port_speed_to_ethtool(priv,
1867                                                               mac[i].speed);
1868                 mac[i].speed = priv->info->port_speed[SJA1105_SPEED_AUTO];
1869
1870                 if (priv->xpcs[i])
1871                         bmcr[i] = mdiobus_read(priv->mdio_pcs, i, reg_addr);
1872         }
1873
1874         /* No PTP operations can run right now */
1875         mutex_lock(&priv->ptp_data.lock);
1876
1877         rc = __sja1105_ptp_gettimex(ds, &now, &ptp_sts_before);
1878         if (rc < 0)
1879                 goto out_unlock_ptp;
1880
1881         /* Reset switch and send updated static configuration */
1882         rc = sja1105_static_config_upload(priv);
1883         if (rc < 0)
1884                 goto out_unlock_ptp;
1885
1886         rc = __sja1105_ptp_settime(ds, 0, &ptp_sts_after);
1887         if (rc < 0)
1888                 goto out_unlock_ptp;
1889
1890         t1 = timespec64_to_ns(&ptp_sts_before.pre_ts);
1891         t2 = timespec64_to_ns(&ptp_sts_before.post_ts);
1892         t3 = timespec64_to_ns(&ptp_sts_after.pre_ts);
1893         t4 = timespec64_to_ns(&ptp_sts_after.post_ts);
1894         /* Mid point, corresponds to pre-reset PTPCLKVAL */
1895         t12 = t1 + (t2 - t1) / 2;
1896         /* Mid point, corresponds to post-reset PTPCLKVAL, aka 0 */
1897         t34 = t3 + (t4 - t3) / 2;
1898         /* Advance PTPCLKVAL by the time it took since its readout */
1899         now += (t34 - t12);
1900
1901         __sja1105_ptp_adjtime(ds, now);
1902
1903 out_unlock_ptp:
1904         mutex_unlock(&priv->ptp_data.lock);
1905
1906         dev_info(priv->ds->dev,
1907                  "Reset switch and programmed static config. Reason: %s\n",
1908                  sja1105_reset_reasons[reason]);
1909
1910         /* Configure the CGU (PLLs) for MII and RMII PHYs.
1911          * For these interfaces there is no dynamic configuration
1912          * needed, since PLLs have same settings at all speeds.
1913          */
1914         rc = priv->info->clocking_setup(priv);
1915         if (rc < 0)
1916                 goto out;
1917
1918         for (i = 0; i < ds->num_ports; i++) {
1919                 struct dw_xpcs *xpcs = priv->xpcs[i];
1920                 unsigned int mode;
1921
1922                 rc = sja1105_adjust_port_config(priv, i, speed_mbps[i]);
1923                 if (rc < 0)
1924                         goto out;
1925
1926                 if (!xpcs)
1927                         continue;
1928
1929                 if (bmcr[i] & BMCR_ANENABLE)
1930                         mode = MLO_AN_INBAND;
1931                 else if (priv->fixed_link[i])
1932                         mode = MLO_AN_FIXED;
1933                 else
1934                         mode = MLO_AN_PHY;
1935
1936                 rc = xpcs_do_config(xpcs, priv->phy_mode[i], mode);
1937                 if (rc < 0)
1938                         goto out;
1939
1940                 if (!phylink_autoneg_inband(mode)) {
1941                         int speed = SPEED_UNKNOWN;
1942
1943                         if (bmcr[i] & BMCR_SPEED1000)
1944                                 speed = SPEED_1000;
1945                         else if (bmcr[i] & BMCR_SPEED100)
1946                                 speed = SPEED_100;
1947                         else
1948                                 speed = SPEED_10;
1949
1950                         xpcs_link_up(&xpcs->pcs, mode, priv->phy_mode[i],
1951                                      speed, DUPLEX_FULL);
1952                 }
1953         }
1954
1955         rc = sja1105_reload_cbs(priv);
1956         if (rc < 0)
1957                 goto out;
1958 out:
1959         mutex_unlock(&priv->mgmt_lock);
1960
1961         return rc;
1962 }
1963
1964 static int sja1105_pvid_apply(struct sja1105_private *priv, int port, u16 pvid)
1965 {
1966         struct sja1105_mac_config_entry *mac;
1967
1968         mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
1969
1970         mac[port].vlanid = pvid;
1971
1972         return sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
1973                                            &mac[port], true);
1974 }
1975
1976 static int sja1105_crosschip_bridge_join(struct dsa_switch *ds,
1977                                          int tree_index, int sw_index,
1978                                          int other_port, struct net_device *br)
1979 {
1980         struct dsa_switch *other_ds = dsa_switch_find(tree_index, sw_index);
1981         struct sja1105_private *other_priv = other_ds->priv;
1982         struct sja1105_private *priv = ds->priv;
1983         int port, rc;
1984
1985         if (other_ds->ops != &sja1105_switch_ops)
1986                 return 0;
1987
1988         for (port = 0; port < ds->num_ports; port++) {
1989                 if (!dsa_is_user_port(ds, port))
1990                         continue;
1991                 if (dsa_to_port(ds, port)->bridge_dev != br)
1992                         continue;
1993
1994                 rc = dsa_8021q_crosschip_bridge_join(priv->dsa_8021q_ctx,
1995                                                      port,
1996                                                      other_priv->dsa_8021q_ctx,
1997                                                      other_port);
1998                 if (rc)
1999                         return rc;
2000
2001                 rc = dsa_8021q_crosschip_bridge_join(other_priv->dsa_8021q_ctx,
2002                                                      other_port,
2003                                                      priv->dsa_8021q_ctx,
2004                                                      port);
2005                 if (rc)
2006                         return rc;
2007         }
2008
2009         return 0;
2010 }
2011
2012 static void sja1105_crosschip_bridge_leave(struct dsa_switch *ds,
2013                                            int tree_index, int sw_index,
2014                                            int other_port,
2015                                            struct net_device *br)
2016 {
2017         struct dsa_switch *other_ds = dsa_switch_find(tree_index, sw_index);
2018         struct sja1105_private *other_priv = other_ds->priv;
2019         struct sja1105_private *priv = ds->priv;
2020         int port;
2021
2022         if (other_ds->ops != &sja1105_switch_ops)
2023                 return;
2024
2025         for (port = 0; port < ds->num_ports; port++) {
2026                 if (!dsa_is_user_port(ds, port))
2027                         continue;
2028                 if (dsa_to_port(ds, port)->bridge_dev != br)
2029                         continue;
2030
2031                 dsa_8021q_crosschip_bridge_leave(priv->dsa_8021q_ctx, port,
2032                                                  other_priv->dsa_8021q_ctx,
2033                                                  other_port);
2034
2035                 dsa_8021q_crosschip_bridge_leave(other_priv->dsa_8021q_ctx,
2036                                                  other_port,
2037                                                  priv->dsa_8021q_ctx, port);
2038         }
2039 }
2040
2041 static int sja1105_setup_8021q_tagging(struct dsa_switch *ds, bool enabled)
2042 {
2043         struct sja1105_private *priv = ds->priv;
2044         int rc;
2045
2046         rc = dsa_8021q_setup(priv->dsa_8021q_ctx, enabled);
2047         if (rc)
2048                 return rc;
2049
2050         dev_info(ds->dev, "%s switch tagging\n",
2051                  enabled ? "Enabled" : "Disabled");
2052         return 0;
2053 }
2054
2055 static enum dsa_tag_protocol
2056 sja1105_get_tag_protocol(struct dsa_switch *ds, int port,
2057                          enum dsa_tag_protocol mp)
2058 {
2059         struct sja1105_private *priv = ds->priv;
2060
2061         return priv->info->tag_proto;
2062 }
2063
2064 static int sja1105_find_free_subvlan(u16 *subvlan_map, bool pvid)
2065 {
2066         int subvlan;
2067
2068         if (pvid)
2069                 return 0;
2070
2071         for (subvlan = 1; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++)
2072                 if (subvlan_map[subvlan] == VLAN_N_VID)
2073                         return subvlan;
2074
2075         return -1;
2076 }
2077
2078 static int sja1105_find_subvlan(u16 *subvlan_map, u16 vid)
2079 {
2080         int subvlan;
2081
2082         for (subvlan = 0; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++)
2083                 if (subvlan_map[subvlan] == vid)
2084                         return subvlan;
2085
2086         return -1;
2087 }
2088
2089 static int sja1105_find_committed_subvlan(struct sja1105_private *priv,
2090                                           int port, u16 vid)
2091 {
2092         struct sja1105_port *sp = &priv->ports[port];
2093
2094         return sja1105_find_subvlan(sp->subvlan_map, vid);
2095 }
2096
2097 static void sja1105_init_subvlan_map(u16 *subvlan_map)
2098 {
2099         int subvlan;
2100
2101         for (subvlan = 0; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++)
2102                 subvlan_map[subvlan] = VLAN_N_VID;
2103 }
2104
2105 static void sja1105_commit_subvlan_map(struct sja1105_private *priv, int port,
2106                                        u16 *subvlan_map)
2107 {
2108         struct sja1105_port *sp = &priv->ports[port];
2109         int subvlan;
2110
2111         for (subvlan = 0; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++)
2112                 sp->subvlan_map[subvlan] = subvlan_map[subvlan];
2113 }
2114
2115 static int sja1105_is_vlan_configured(struct sja1105_private *priv, u16 vid)
2116 {
2117         struct sja1105_vlan_lookup_entry *vlan;
2118         int count, i;
2119
2120         vlan = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entries;
2121         count = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entry_count;
2122
2123         for (i = 0; i < count; i++)
2124                 if (vlan[i].vlanid == vid)
2125                         return i;
2126
2127         /* Return an invalid entry index if not found */
2128         return -1;
2129 }
2130
2131 static int
2132 sja1105_find_retagging_entry(struct sja1105_retagging_entry *retagging,
2133                              int count, int from_port, u16 from_vid,
2134                              u16 to_vid)
2135 {
2136         int i;
2137
2138         for (i = 0; i < count; i++)
2139                 if (retagging[i].ing_port == BIT(from_port) &&
2140                     retagging[i].vlan_ing == from_vid &&
2141                     retagging[i].vlan_egr == to_vid)
2142                         return i;
2143
2144         /* Return an invalid entry index if not found */
2145         return -1;
2146 }
2147
2148 static int sja1105_commit_vlans(struct sja1105_private *priv,
2149                                 struct sja1105_vlan_lookup_entry *new_vlan,
2150                                 struct sja1105_retagging_entry *new_retagging,
2151                                 int num_retagging)
2152 {
2153         struct sja1105_retagging_entry *retagging;
2154         struct sja1105_vlan_lookup_entry *vlan;
2155         struct sja1105_table *table;
2156         int num_vlans = 0;
2157         int rc, i, k = 0;
2158
2159         /* VLAN table */
2160         table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
2161         vlan = table->entries;
2162
2163         for (i = 0; i < VLAN_N_VID; i++) {
2164                 int match = sja1105_is_vlan_configured(priv, i);
2165
2166                 if (new_vlan[i].vlanid != VLAN_N_VID)
2167                         num_vlans++;
2168
2169                 if (new_vlan[i].vlanid == VLAN_N_VID && match >= 0) {
2170                         /* Was there before, no longer is. Delete */
2171                         dev_dbg(priv->ds->dev, "Deleting VLAN %d\n", i);
2172                         rc = sja1105_dynamic_config_write(priv,
2173                                                           BLK_IDX_VLAN_LOOKUP,
2174                                                           i, &vlan[match], false);
2175                         if (rc < 0)
2176                                 return rc;
2177                 } else if (new_vlan[i].vlanid != VLAN_N_VID) {
2178                         /* Nothing changed, don't do anything */
2179                         if (match >= 0 &&
2180                             vlan[match].vlanid == new_vlan[i].vlanid &&
2181                             vlan[match].tag_port == new_vlan[i].tag_port &&
2182                             vlan[match].vlan_bc == new_vlan[i].vlan_bc &&
2183                             vlan[match].vmemb_port == new_vlan[i].vmemb_port)
2184                                 continue;
2185                         /* Update entry */
2186                         dev_dbg(priv->ds->dev, "Updating VLAN %d\n", i);
2187                         rc = sja1105_dynamic_config_write(priv,
2188                                                           BLK_IDX_VLAN_LOOKUP,
2189                                                           i, &new_vlan[i],
2190                                                           true);
2191                         if (rc < 0)
2192                                 return rc;
2193                 }
2194         }
2195
2196         if (table->entry_count)
2197                 kfree(table->entries);
2198
2199         table->entries = kcalloc(num_vlans, table->ops->unpacked_entry_size,
2200                                  GFP_KERNEL);
2201         if (!table->entries)
2202                 return -ENOMEM;
2203
2204         table->entry_count = num_vlans;
2205         vlan = table->entries;
2206
2207         for (i = 0; i < VLAN_N_VID; i++) {
2208                 if (new_vlan[i].vlanid == VLAN_N_VID)
2209                         continue;
2210                 vlan[k++] = new_vlan[i];
2211         }
2212
2213         /* VLAN Retagging Table */
2214         table = &priv->static_config.tables[BLK_IDX_RETAGGING];
2215         retagging = table->entries;
2216
2217         for (i = 0; i < table->entry_count; i++) {
2218                 rc = sja1105_dynamic_config_write(priv, BLK_IDX_RETAGGING,
2219                                                   i, &retagging[i], false);
2220                 if (rc)
2221                         return rc;
2222         }
2223
2224         if (table->entry_count)
2225                 kfree(table->entries);
2226
2227         table->entries = kcalloc(num_retagging, table->ops->unpacked_entry_size,
2228                                  GFP_KERNEL);
2229         if (!table->entries)
2230                 return -ENOMEM;
2231
2232         table->entry_count = num_retagging;
2233         retagging = table->entries;
2234
2235         for (i = 0; i < num_retagging; i++) {
2236                 retagging[i] = new_retagging[i];
2237
2238                 /* Update entry */
2239                 rc = sja1105_dynamic_config_write(priv, BLK_IDX_RETAGGING,
2240                                                   i, &retagging[i], true);
2241                 if (rc < 0)
2242                         return rc;
2243         }
2244
2245         return 0;
2246 }
2247
2248 struct sja1105_crosschip_vlan {
2249         struct list_head list;
2250         u16 vid;
2251         bool untagged;
2252         int port;
2253         int other_port;
2254         struct dsa_8021q_context *other_ctx;
2255 };
2256
2257 struct sja1105_crosschip_switch {
2258         struct list_head list;
2259         struct dsa_8021q_context *other_ctx;
2260 };
2261
2262 static int sja1105_commit_pvid(struct sja1105_private *priv)
2263 {
2264         struct sja1105_bridge_vlan *v;
2265         struct list_head *vlan_list;
2266         int rc = 0;
2267
2268         if (priv->vlan_state == SJA1105_VLAN_FILTERING_FULL)
2269                 vlan_list = &priv->bridge_vlans;
2270         else
2271                 vlan_list = &priv->dsa_8021q_vlans;
2272
2273         list_for_each_entry(v, vlan_list, list) {
2274                 if (v->pvid) {
2275                         rc = sja1105_pvid_apply(priv, v->port, v->vid);
2276                         if (rc)
2277                                 break;
2278                 }
2279         }
2280
2281         return rc;
2282 }
2283
2284 static int
2285 sja1105_build_bridge_vlans(struct sja1105_private *priv,
2286                            struct sja1105_vlan_lookup_entry *new_vlan)
2287 {
2288         struct sja1105_bridge_vlan *v;
2289
2290         if (priv->vlan_state == SJA1105_VLAN_UNAWARE)
2291                 return 0;
2292
2293         list_for_each_entry(v, &priv->bridge_vlans, list) {
2294                 int match = v->vid;
2295
2296                 new_vlan[match].vlanid = v->vid;
2297                 new_vlan[match].vmemb_port |= BIT(v->port);
2298                 new_vlan[match].vlan_bc |= BIT(v->port);
2299                 if (!v->untagged)
2300                         new_vlan[match].tag_port |= BIT(v->port);
2301                 new_vlan[match].type_entry = SJA1110_VLAN_D_TAG;
2302         }
2303
2304         return 0;
2305 }
2306
2307 static int
2308 sja1105_build_dsa_8021q_vlans(struct sja1105_private *priv,
2309                               struct sja1105_vlan_lookup_entry *new_vlan)
2310 {
2311         struct sja1105_bridge_vlan *v;
2312
2313         if (priv->vlan_state == SJA1105_VLAN_FILTERING_FULL)
2314                 return 0;
2315
2316         list_for_each_entry(v, &priv->dsa_8021q_vlans, list) {
2317                 int match = v->vid;
2318
2319                 new_vlan[match].vlanid = v->vid;
2320                 new_vlan[match].vmemb_port |= BIT(v->port);
2321                 new_vlan[match].vlan_bc |= BIT(v->port);
2322                 if (!v->untagged)
2323                         new_vlan[match].tag_port |= BIT(v->port);
2324                 new_vlan[match].type_entry = SJA1110_VLAN_D_TAG;
2325         }
2326
2327         return 0;
2328 }
2329
2330 static int sja1105_build_subvlans(struct sja1105_private *priv,
2331                                   u16 subvlan_map[][DSA_8021Q_N_SUBVLAN],
2332                                   struct sja1105_vlan_lookup_entry *new_vlan,
2333                                   struct sja1105_retagging_entry *new_retagging,
2334                                   int *num_retagging)
2335 {
2336         struct sja1105_bridge_vlan *v;
2337         int k = *num_retagging;
2338
2339         if (priv->vlan_state != SJA1105_VLAN_BEST_EFFORT)
2340                 return 0;
2341
2342         list_for_each_entry(v, &priv->bridge_vlans, list) {
2343                 int upstream = dsa_upstream_port(priv->ds, v->port);
2344                 int match, subvlan;
2345                 u16 rx_vid;
2346
2347                 /* Only sub-VLANs on user ports need to be applied.
2348                  * Bridge VLANs also include VLANs added automatically
2349                  * by DSA on the CPU port.
2350                  */
2351                 if (!dsa_is_user_port(priv->ds, v->port))
2352                         continue;
2353
2354                 subvlan = sja1105_find_subvlan(subvlan_map[v->port],
2355                                                v->vid);
2356                 if (subvlan < 0) {
2357                         subvlan = sja1105_find_free_subvlan(subvlan_map[v->port],
2358                                                             v->pvid);
2359                         if (subvlan < 0) {
2360                                 dev_err(priv->ds->dev, "No more free subvlans\n");
2361                                 return -ENOSPC;
2362                         }
2363                 }
2364
2365                 rx_vid = dsa_8021q_rx_vid_subvlan(priv->ds, v->port, subvlan);
2366
2367                 /* @v->vid on @v->port needs to be retagged to @rx_vid
2368                  * on @upstream. Assume @v->vid on @v->port and on
2369                  * @upstream was already configured by the previous
2370                  * iteration over bridge_vlans.
2371                  */
2372                 match = rx_vid;
2373                 new_vlan[match].vlanid = rx_vid;
2374                 new_vlan[match].vmemb_port |= BIT(v->port);
2375                 new_vlan[match].vmemb_port |= BIT(upstream);
2376                 new_vlan[match].vlan_bc |= BIT(v->port);
2377                 new_vlan[match].vlan_bc |= BIT(upstream);
2378                 /* The "untagged" flag is set the same as for the
2379                  * original VLAN
2380                  */
2381                 if (!v->untagged)
2382                         new_vlan[match].tag_port |= BIT(v->port);
2383                 /* But it's always tagged towards the CPU */
2384                 new_vlan[match].tag_port |= BIT(upstream);
2385                 new_vlan[match].type_entry = SJA1110_VLAN_D_TAG;
2386
2387                 /* The Retagging Table generates packet *clones* with
2388                  * the new VLAN. This is a very odd hardware quirk
2389                  * which we need to suppress by dropping the original
2390                  * packet.
2391                  * Deny egress of the original VLAN towards the CPU
2392                  * port. This will force the switch to drop it, and
2393                  * we'll see only the retagged packets.
2394                  */
2395                 match = v->vid;
2396                 new_vlan[match].vlan_bc &= ~BIT(upstream);
2397
2398                 /* And the retagging itself */
2399                 new_retagging[k].vlan_ing = v->vid;
2400                 new_retagging[k].vlan_egr = rx_vid;
2401                 new_retagging[k].ing_port = BIT(v->port);
2402                 new_retagging[k].egr_port = BIT(upstream);
2403                 if (k++ == SJA1105_MAX_RETAGGING_COUNT) {
2404                         dev_err(priv->ds->dev, "No more retagging rules\n");
2405                         return -ENOSPC;
2406                 }
2407
2408                 subvlan_map[v->port][subvlan] = v->vid;
2409         }
2410
2411         *num_retagging = k;
2412
2413         return 0;
2414 }
2415
2416 /* Sadly, in crosschip scenarios where the CPU port is also the link to another
2417  * switch, we should retag backwards (the dsa_8021q vid to the original vid) on
2418  * the CPU port of neighbour switches.
2419  */
2420 static int
2421 sja1105_build_crosschip_subvlans(struct sja1105_private *priv,
2422                                  struct sja1105_vlan_lookup_entry *new_vlan,
2423                                  struct sja1105_retagging_entry *new_retagging,
2424                                  int *num_retagging)
2425 {
2426         struct sja1105_crosschip_vlan *tmp, *pos;
2427         struct dsa_8021q_crosschip_link *c;
2428         struct sja1105_bridge_vlan *v, *w;
2429         struct list_head crosschip_vlans;
2430         int k = *num_retagging;
2431         int rc = 0;
2432
2433         if (priv->vlan_state != SJA1105_VLAN_BEST_EFFORT)
2434                 return 0;
2435
2436         INIT_LIST_HEAD(&crosschip_vlans);
2437
2438         list_for_each_entry(c, &priv->dsa_8021q_ctx->crosschip_links, list) {
2439                 struct sja1105_private *other_priv = c->other_ctx->ds->priv;
2440
2441                 if (other_priv->vlan_state == SJA1105_VLAN_FILTERING_FULL)
2442                         continue;
2443
2444                 /* Crosschip links are also added to the CPU ports.
2445                  * Ignore those.
2446                  */
2447                 if (!dsa_is_user_port(priv->ds, c->port))
2448                         continue;
2449                 if (!dsa_is_user_port(c->other_ctx->ds, c->other_port))
2450                         continue;
2451
2452                 /* Search for VLANs on the remote port */
2453                 list_for_each_entry(v, &other_priv->bridge_vlans, list) {
2454                         bool already_added = false;
2455                         bool we_have_it = false;
2456
2457                         if (v->port != c->other_port)
2458                                 continue;
2459
2460                         /* If @v is a pvid on @other_ds, it does not need
2461                          * re-retagging, because its SVL field is 0 and we
2462                          * already allow that, via the dsa_8021q crosschip
2463                          * links.
2464                          */
2465                         if (v->pvid)
2466                                 continue;
2467
2468                         /* Search for the VLAN on our local port */
2469                         list_for_each_entry(w, &priv->bridge_vlans, list) {
2470                                 if (w->port == c->port && w->vid == v->vid) {
2471                                         we_have_it = true;
2472                                         break;
2473                                 }
2474                         }
2475
2476                         if (!we_have_it)
2477                                 continue;
2478
2479                         list_for_each_entry(tmp, &crosschip_vlans, list) {
2480                                 if (tmp->vid == v->vid &&
2481                                     tmp->untagged == v->untagged &&
2482                                     tmp->port == c->port &&
2483                                     tmp->other_port == v->port &&
2484                                     tmp->other_ctx == c->other_ctx) {
2485                                         already_added = true;
2486                                         break;
2487                                 }
2488                         }
2489
2490                         if (already_added)
2491                                 continue;
2492
2493                         tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
2494                         if (!tmp) {
2495                                 dev_err(priv->ds->dev, "Failed to allocate memory\n");
2496                                 rc = -ENOMEM;
2497                                 goto out;
2498                         }
2499                         tmp->vid = v->vid;
2500                         tmp->port = c->port;
2501                         tmp->other_port = v->port;
2502                         tmp->other_ctx = c->other_ctx;
2503                         tmp->untagged = v->untagged;
2504                         list_add(&tmp->list, &crosschip_vlans);
2505                 }
2506         }
2507
2508         list_for_each_entry(tmp, &crosschip_vlans, list) {
2509                 struct sja1105_private *other_priv = tmp->other_ctx->ds->priv;
2510                 int upstream = dsa_upstream_port(priv->ds, tmp->port);
2511                 int match, subvlan;
2512                 u16 rx_vid;
2513
2514                 subvlan = sja1105_find_committed_subvlan(other_priv,
2515                                                          tmp->other_port,
2516                                                          tmp->vid);
2517                 /* If this happens, it's a bug. The neighbour switch does not
2518                  * have a subvlan for tmp->vid on tmp->other_port, but it
2519                  * should, since we already checked for its vlan_state.
2520                  */
2521                 if (WARN_ON(subvlan < 0)) {
2522                         rc = -EINVAL;
2523                         goto out;
2524                 }
2525
2526                 rx_vid = dsa_8021q_rx_vid_subvlan(tmp->other_ctx->ds,
2527                                                   tmp->other_port,
2528                                                   subvlan);
2529
2530                 /* The @rx_vid retagged from @tmp->vid on
2531                  * {@tmp->other_ds, @tmp->other_port} needs to be
2532                  * re-retagged to @tmp->vid on the way back to us.
2533                  *
2534                  * Assume the original @tmp->vid is already configured
2535                  * on this local switch, otherwise we wouldn't be
2536                  * retagging its subvlan on the other switch in the
2537                  * first place. We just need to add a reverse retagging
2538                  * rule for @rx_vid and install @rx_vid on our ports.
2539                  */
2540                 match = rx_vid;
2541                 new_vlan[match].vlanid = rx_vid;
2542                 new_vlan[match].vmemb_port |= BIT(tmp->port);
2543                 new_vlan[match].vmemb_port |= BIT(upstream);
2544                 /* The "untagged" flag is set the same as for the
2545                  * original VLAN. And towards the CPU, it doesn't
2546                  * really matter, because @rx_vid will only receive
2547                  * traffic on that port. For consistency with other dsa_8021q
2548                  * VLANs, we'll keep the CPU port tagged.
2549                  */
2550                 if (!tmp->untagged)
2551                         new_vlan[match].tag_port |= BIT(tmp->port);
2552                 new_vlan[match].tag_port |= BIT(upstream);
2553                 new_vlan[match].type_entry = SJA1110_VLAN_D_TAG;
2554                 /* Deny egress of @rx_vid towards our front-panel port.
2555                  * This will force the switch to drop it, and we'll see
2556                  * only the re-retagged packets (having the original,
2557                  * pre-initial-retagging, VLAN @tmp->vid).
2558                  */
2559                 new_vlan[match].vlan_bc &= ~BIT(tmp->port);
2560
2561                 /* On reverse retagging, the same ingress VLAN goes to multiple
2562                  * ports. So we have an opportunity to create composite rules
2563                  * to not waste the limited space in the retagging table.
2564                  */
2565                 k = sja1105_find_retagging_entry(new_retagging, *num_retagging,
2566                                                  upstream, rx_vid, tmp->vid);
2567                 if (k < 0) {
2568                         if (*num_retagging == SJA1105_MAX_RETAGGING_COUNT) {
2569                                 dev_err(priv->ds->dev, "No more retagging rules\n");
2570                                 rc = -ENOSPC;
2571                                 goto out;
2572                         }
2573                         k = (*num_retagging)++;
2574                 }
2575                 /* And the retagging itself */
2576                 new_retagging[k].vlan_ing = rx_vid;
2577                 new_retagging[k].vlan_egr = tmp->vid;
2578                 new_retagging[k].ing_port = BIT(upstream);
2579                 new_retagging[k].egr_port |= BIT(tmp->port);
2580         }
2581
2582 out:
2583         list_for_each_entry_safe(tmp, pos, &crosschip_vlans, list) {
2584                 list_del(&tmp->list);
2585                 kfree(tmp);
2586         }
2587
2588         return rc;
2589 }
2590
2591 static int sja1105_build_vlan_table(struct sja1105_private *priv, bool notify);
2592
2593 static int sja1105_notify_crosschip_switches(struct sja1105_private *priv)
2594 {
2595         struct sja1105_crosschip_switch *s, *pos;
2596         struct list_head crosschip_switches;
2597         struct dsa_8021q_crosschip_link *c;
2598         int rc = 0;
2599
2600         INIT_LIST_HEAD(&crosschip_switches);
2601
2602         list_for_each_entry(c, &priv->dsa_8021q_ctx->crosschip_links, list) {
2603                 bool already_added = false;
2604
2605                 list_for_each_entry(s, &crosschip_switches, list) {
2606                         if (s->other_ctx == c->other_ctx) {
2607                                 already_added = true;
2608                                 break;
2609                         }
2610                 }
2611
2612                 if (already_added)
2613                         continue;
2614
2615                 s = kzalloc(sizeof(*s), GFP_KERNEL);
2616                 if (!s) {
2617                         dev_err(priv->ds->dev, "Failed to allocate memory\n");
2618                         rc = -ENOMEM;
2619                         goto out;
2620                 }
2621                 s->other_ctx = c->other_ctx;
2622                 list_add(&s->list, &crosschip_switches);
2623         }
2624
2625         list_for_each_entry(s, &crosschip_switches, list) {
2626                 struct sja1105_private *other_priv = s->other_ctx->ds->priv;
2627
2628                 rc = sja1105_build_vlan_table(other_priv, false);
2629                 if (rc)
2630                         goto out;
2631         }
2632
2633 out:
2634         list_for_each_entry_safe(s, pos, &crosschip_switches, list) {
2635                 list_del(&s->list);
2636                 kfree(s);
2637         }
2638
2639         return rc;
2640 }
2641
2642 static int sja1105_build_vlan_table(struct sja1105_private *priv, bool notify)
2643 {
2644         u16 subvlan_map[SJA1105_MAX_NUM_PORTS][DSA_8021Q_N_SUBVLAN];
2645         struct sja1105_retagging_entry *new_retagging;
2646         struct sja1105_vlan_lookup_entry *new_vlan;
2647         struct sja1105_table *table;
2648         int i, num_retagging = 0;
2649         int rc;
2650
2651         table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
2652         new_vlan = kcalloc(VLAN_N_VID,
2653                            table->ops->unpacked_entry_size, GFP_KERNEL);
2654         if (!new_vlan)
2655                 return -ENOMEM;
2656
2657         table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
2658         new_retagging = kcalloc(SJA1105_MAX_RETAGGING_COUNT,
2659                                 table->ops->unpacked_entry_size, GFP_KERNEL);
2660         if (!new_retagging) {
2661                 kfree(new_vlan);
2662                 return -ENOMEM;
2663         }
2664
2665         for (i = 0; i < VLAN_N_VID; i++)
2666                 new_vlan[i].vlanid = VLAN_N_VID;
2667
2668         for (i = 0; i < SJA1105_MAX_RETAGGING_COUNT; i++)
2669                 new_retagging[i].vlan_ing = VLAN_N_VID;
2670
2671         for (i = 0; i < priv->ds->num_ports; i++)
2672                 sja1105_init_subvlan_map(subvlan_map[i]);
2673
2674         /* Bridge VLANs */
2675         rc = sja1105_build_bridge_vlans(priv, new_vlan);
2676         if (rc)
2677                 goto out;
2678
2679         /* VLANs necessary for dsa_8021q operation, given to us by tag_8021q.c:
2680          * - RX VLANs
2681          * - TX VLANs
2682          * - Crosschip links
2683          */
2684         rc = sja1105_build_dsa_8021q_vlans(priv, new_vlan);
2685         if (rc)
2686                 goto out;
2687
2688         /* Private VLANs necessary for dsa_8021q operation, which we need to
2689          * determine on our own:
2690          * - Sub-VLANs
2691          * - Sub-VLANs of crosschip switches
2692          */
2693         rc = sja1105_build_subvlans(priv, subvlan_map, new_vlan, new_retagging,
2694                                     &num_retagging);
2695         if (rc)
2696                 goto out;
2697
2698         rc = sja1105_build_crosschip_subvlans(priv, new_vlan, new_retagging,
2699                                               &num_retagging);
2700         if (rc)
2701                 goto out;
2702
2703         rc = sja1105_commit_vlans(priv, new_vlan, new_retagging, num_retagging);
2704         if (rc)
2705                 goto out;
2706
2707         rc = sja1105_commit_pvid(priv);
2708         if (rc)
2709                 goto out;
2710
2711         for (i = 0; i < priv->ds->num_ports; i++)
2712                 sja1105_commit_subvlan_map(priv, i, subvlan_map[i]);
2713
2714         if (notify) {
2715                 rc = sja1105_notify_crosschip_switches(priv);
2716                 if (rc)
2717                         goto out;
2718         }
2719
2720 out:
2721         kfree(new_vlan);
2722         kfree(new_retagging);
2723
2724         return rc;
2725 }
2726
2727 /* The TPID setting belongs to the General Parameters table,
2728  * which can only be partially reconfigured at runtime (and not the TPID).
2729  * So a switch reset is required.
2730  */
2731 int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled,
2732                            struct netlink_ext_ack *extack)
2733 {
2734         struct sja1105_l2_lookup_params_entry *l2_lookup_params;
2735         struct sja1105_general_params_entry *general_params;
2736         struct sja1105_private *priv = ds->priv;
2737         enum sja1105_vlan_state state;
2738         struct sja1105_table *table;
2739         struct sja1105_rule *rule;
2740         bool want_tagging;
2741         u16 tpid, tpid2;
2742         int rc;
2743
2744         list_for_each_entry(rule, &priv->flow_block.rules, list) {
2745                 if (rule->type == SJA1105_RULE_VL) {
2746                         NL_SET_ERR_MSG_MOD(extack,
2747                                            "Cannot change VLAN filtering with active VL rules");
2748                         return -EBUSY;
2749                 }
2750         }
2751
2752         if (enabled) {
2753                 /* Enable VLAN filtering. */
2754                 tpid  = ETH_P_8021Q;
2755                 tpid2 = ETH_P_8021AD;
2756         } else {
2757                 /* Disable VLAN filtering. */
2758                 tpid  = ETH_P_SJA1105;
2759                 tpid2 = ETH_P_SJA1105;
2760         }
2761
2762         for (port = 0; port < ds->num_ports; port++) {
2763                 struct sja1105_port *sp = &priv->ports[port];
2764
2765                 if (enabled)
2766                         sp->xmit_tpid = priv->info->qinq_tpid;
2767                 else
2768                         sp->xmit_tpid = ETH_P_SJA1105;
2769         }
2770
2771         if (!enabled)
2772                 state = SJA1105_VLAN_UNAWARE;
2773         else if (priv->best_effort_vlan_filtering)
2774                 state = SJA1105_VLAN_BEST_EFFORT;
2775         else
2776                 state = SJA1105_VLAN_FILTERING_FULL;
2777
2778         if (priv->vlan_state == state)
2779                 return 0;
2780
2781         priv->vlan_state = state;
2782         want_tagging = (state == SJA1105_VLAN_UNAWARE ||
2783                         state == SJA1105_VLAN_BEST_EFFORT);
2784
2785         table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
2786         general_params = table->entries;
2787         /* EtherType used to identify inner tagged (C-tag) VLAN traffic */
2788         general_params->tpid = tpid;
2789         /* EtherType used to identify outer tagged (S-tag) VLAN traffic */
2790         general_params->tpid2 = tpid2;
2791         /* When VLAN filtering is on, we need to at least be able to
2792          * decode management traffic through the "backup plan".
2793          */
2794         general_params->incl_srcpt1 = enabled;
2795         general_params->incl_srcpt0 = enabled;
2796
2797         want_tagging = priv->best_effort_vlan_filtering || !enabled;
2798
2799         /* VLAN filtering => independent VLAN learning.
2800          * No VLAN filtering (or best effort) => shared VLAN learning.
2801          *
2802          * In shared VLAN learning mode, untagged traffic still gets
2803          * pvid-tagged, and the FDB table gets populated with entries
2804          * containing the "real" (pvid or from VLAN tag) VLAN ID.
2805          * However the switch performs a masked L2 lookup in the FDB,
2806          * effectively only looking up a frame's DMAC (and not VID) for the
2807          * forwarding decision.
2808          *
2809          * This is extremely convenient for us, because in modes with
2810          * vlan_filtering=0, dsa_8021q actually installs unique pvid's into
2811          * each front panel port. This is good for identification but breaks
2812          * learning badly - the VID of the learnt FDB entry is unique, aka
2813          * no frames coming from any other port are going to have it. So
2814          * for forwarding purposes, this is as though learning was broken
2815          * (all frames get flooded).
2816          */
2817         table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS];
2818         l2_lookup_params = table->entries;
2819         l2_lookup_params->shared_learn = want_tagging;
2820
2821         sja1105_frame_memory_partitioning(priv);
2822
2823         rc = sja1105_build_vlan_table(priv, false);
2824         if (rc)
2825                 return rc;
2826
2827         rc = sja1105_static_config_reload(priv, SJA1105_VLAN_FILTERING);
2828         if (rc)
2829                 NL_SET_ERR_MSG_MOD(extack, "Failed to change VLAN Ethertype");
2830
2831         /* Switch port identification based on 802.1Q is only passable
2832          * if we are not under a vlan_filtering bridge. So make sure
2833          * the two configurations are mutually exclusive (of course, the
2834          * user may know better, i.e. best_effort_vlan_filtering).
2835          */
2836         return sja1105_setup_8021q_tagging(ds, want_tagging);
2837 }
2838
2839 /* Returns number of VLANs added (0 or 1) on success,
2840  * or a negative error code.
2841  */
2842 static int sja1105_vlan_add_one(struct dsa_switch *ds, int port, u16 vid,
2843                                 u16 flags, struct list_head *vlan_list)
2844 {
2845         bool untagged = flags & BRIDGE_VLAN_INFO_UNTAGGED;
2846         bool pvid = flags & BRIDGE_VLAN_INFO_PVID;
2847         struct sja1105_bridge_vlan *v;
2848
2849         list_for_each_entry(v, vlan_list, list) {
2850                 if (v->port == port && v->vid == vid) {
2851                         /* Already added */
2852                         if (v->untagged == untagged && v->pvid == pvid)
2853                                 /* Nothing changed */
2854                                 return 0;
2855
2856                         /* It's the same VLAN, but some of the flags changed
2857                          * and the user did not bother to delete it first.
2858                          * Update it and trigger sja1105_build_vlan_table.
2859                          */
2860                         v->untagged = untagged;
2861                         v->pvid = pvid;
2862                         return 1;
2863                 }
2864         }
2865
2866         v = kzalloc(sizeof(*v), GFP_KERNEL);
2867         if (!v) {
2868                 dev_err(ds->dev, "Out of memory while storing VLAN\n");
2869                 return -ENOMEM;
2870         }
2871
2872         v->port = port;
2873         v->vid = vid;
2874         v->untagged = untagged;
2875         v->pvid = pvid;
2876         list_add(&v->list, vlan_list);
2877
2878         return 1;
2879 }
2880
2881 /* Returns number of VLANs deleted (0 or 1) */
2882 static int sja1105_vlan_del_one(struct dsa_switch *ds, int port, u16 vid,
2883                                 struct list_head *vlan_list)
2884 {
2885         struct sja1105_bridge_vlan *v, *n;
2886
2887         list_for_each_entry_safe(v, n, vlan_list, list) {
2888                 if (v->port == port && v->vid == vid) {
2889                         list_del(&v->list);
2890                         kfree(v);
2891                         return 1;
2892                 }
2893         }
2894
2895         return 0;
2896 }
2897
2898 static int sja1105_vlan_add(struct dsa_switch *ds, int port,
2899                             const struct switchdev_obj_port_vlan *vlan,
2900                             struct netlink_ext_ack *extack)
2901 {
2902         struct sja1105_private *priv = ds->priv;
2903         bool vlan_table_changed = false;
2904         int rc;
2905
2906         /* If the user wants best-effort VLAN filtering (aka vlan_filtering
2907          * bridge plus tagging), be sure to at least deny alterations to the
2908          * configuration done by dsa_8021q.
2909          */
2910         if (priv->vlan_state != SJA1105_VLAN_FILTERING_FULL &&
2911             vid_is_dsa_8021q(vlan->vid)) {
2912                 NL_SET_ERR_MSG_MOD(extack,
2913                                    "Range 1024-3071 reserved for dsa_8021q operation");
2914                 return -EBUSY;
2915         }
2916
2917         rc = sja1105_vlan_add_one(ds, port, vlan->vid, vlan->flags,
2918                                   &priv->bridge_vlans);
2919         if (rc < 0)
2920                 return rc;
2921         if (rc > 0)
2922                 vlan_table_changed = true;
2923
2924         if (!vlan_table_changed)
2925                 return 0;
2926
2927         return sja1105_build_vlan_table(priv, true);
2928 }
2929
2930 static int sja1105_vlan_del(struct dsa_switch *ds, int port,
2931                             const struct switchdev_obj_port_vlan *vlan)
2932 {
2933         struct sja1105_private *priv = ds->priv;
2934         bool vlan_table_changed = false;
2935         int rc;
2936
2937         rc = sja1105_vlan_del_one(ds, port, vlan->vid, &priv->bridge_vlans);
2938         if (rc > 0)
2939                 vlan_table_changed = true;
2940
2941         if (!vlan_table_changed)
2942                 return 0;
2943
2944         return sja1105_build_vlan_table(priv, true);
2945 }
2946
2947 static int sja1105_dsa_8021q_vlan_add(struct dsa_switch *ds, int port, u16 vid,
2948                                       u16 flags)
2949 {
2950         struct sja1105_private *priv = ds->priv;
2951         int rc;
2952
2953         rc = sja1105_vlan_add_one(ds, port, vid, flags, &priv->dsa_8021q_vlans);
2954         if (rc <= 0)
2955                 return rc;
2956
2957         return sja1105_build_vlan_table(priv, true);
2958 }
2959
2960 static int sja1105_dsa_8021q_vlan_del(struct dsa_switch *ds, int port, u16 vid)
2961 {
2962         struct sja1105_private *priv = ds->priv;
2963         int rc;
2964
2965         rc = sja1105_vlan_del_one(ds, port, vid, &priv->dsa_8021q_vlans);
2966         if (!rc)
2967                 return 0;
2968
2969         return sja1105_build_vlan_table(priv, true);
2970 }
2971
2972 static const struct dsa_8021q_ops sja1105_dsa_8021q_ops = {
2973         .vlan_add       = sja1105_dsa_8021q_vlan_add,
2974         .vlan_del       = sja1105_dsa_8021q_vlan_del,
2975 };
2976
2977 /* The programming model for the SJA1105 switch is "all-at-once" via static
2978  * configuration tables. Some of these can be dynamically modified at runtime,
2979  * but not the xMII mode parameters table.
2980  * Furthermode, some PHYs may not have crystals for generating their clocks
2981  * (e.g. RMII). Instead, their 50MHz clock is supplied via the SJA1105 port's
2982  * ref_clk pin. So port clocking needs to be initialized early, before
2983  * connecting to PHYs is attempted, otherwise they won't respond through MDIO.
2984  * Setting correct PHY link speed does not matter now.
2985  * But dsa_slave_phy_setup is called later than sja1105_setup, so the PHY
2986  * bindings are not yet parsed by DSA core. We need to parse early so that we
2987  * can populate the xMII mode parameters table.
2988  */
2989 static int sja1105_setup(struct dsa_switch *ds)
2990 {
2991         struct sja1105_private *priv = ds->priv;
2992         int rc;
2993
2994         rc = sja1105_parse_dt(priv);
2995         if (rc < 0) {
2996                 dev_err(ds->dev, "Failed to parse DT: %d\n", rc);
2997                 return rc;
2998         }
2999
3000         /* Error out early if internal delays are required through DT
3001          * and we can't apply them.
3002          */
3003         rc = sja1105_parse_rgmii_delays(priv);
3004         if (rc < 0) {
3005                 dev_err(ds->dev, "RGMII delay not supported\n");
3006                 return rc;
3007         }
3008
3009         rc = sja1105_ptp_clock_register(ds);
3010         if (rc < 0) {
3011                 dev_err(ds->dev, "Failed to register PTP clock: %d\n", rc);
3012                 return rc;
3013         }
3014
3015         rc = sja1105_mdiobus_register(ds);
3016         if (rc < 0) {
3017                 dev_err(ds->dev, "Failed to register MDIO bus: %pe\n",
3018                         ERR_PTR(rc));
3019                 goto out_ptp_clock_unregister;
3020         }
3021
3022         /* Create and send configuration down to device */
3023         rc = sja1105_static_config_load(priv);
3024         if (rc < 0) {
3025                 dev_err(ds->dev, "Failed to load static config: %d\n", rc);
3026                 goto out_mdiobus_unregister;
3027         }
3028         /* Configure the CGU (PHY link modes and speeds) */
3029         rc = priv->info->clocking_setup(priv);
3030         if (rc < 0) {
3031                 dev_err(ds->dev, "Failed to configure MII clocking: %d\n", rc);
3032                 goto out_static_config_free;
3033         }
3034         /* On SJA1105, VLAN filtering per se is always enabled in hardware.
3035          * The only thing we can do to disable it is lie about what the 802.1Q
3036          * EtherType is.
3037          * So it will still try to apply VLAN filtering, but all ingress
3038          * traffic (except frames received with EtherType of ETH_P_SJA1105)
3039          * will be internally tagged with a distorted VLAN header where the
3040          * TPID is ETH_P_SJA1105, and the VLAN ID is the port pvid.
3041          */
3042         ds->vlan_filtering_is_global = true;
3043
3044         /* Advertise the 8 egress queues */
3045         ds->num_tx_queues = SJA1105_NUM_TC;
3046
3047         ds->mtu_enforcement_ingress = true;
3048
3049         priv->best_effort_vlan_filtering = true;
3050
3051         rc = sja1105_devlink_setup(ds);
3052         if (rc < 0)
3053                 goto out_static_config_free;
3054
3055         /* The DSA/switchdev model brings up switch ports in standalone mode by
3056          * default, and that means vlan_filtering is 0 since they're not under
3057          * a bridge, so it's safe to set up switch tagging at this time.
3058          */
3059         rtnl_lock();
3060         rc = sja1105_setup_8021q_tagging(ds, true);
3061         rtnl_unlock();
3062         if (rc)
3063                 goto out_devlink_teardown;
3064
3065         return 0;
3066
3067 out_devlink_teardown:
3068         sja1105_devlink_teardown(ds);
3069 out_mdiobus_unregister:
3070         sja1105_mdiobus_unregister(ds);
3071 out_ptp_clock_unregister:
3072         sja1105_ptp_clock_unregister(ds);
3073 out_static_config_free:
3074         sja1105_static_config_free(&priv->static_config);
3075
3076         return rc;
3077 }
3078
3079 static void sja1105_teardown(struct dsa_switch *ds)
3080 {
3081         struct sja1105_private *priv = ds->priv;
3082         struct sja1105_bridge_vlan *v, *n;
3083         int port;
3084
3085         for (port = 0; port < ds->num_ports; port++) {
3086                 struct sja1105_port *sp = &priv->ports[port];
3087
3088                 if (!dsa_is_user_port(ds, port))
3089                         continue;
3090
3091                 if (sp->xmit_worker)
3092                         kthread_destroy_worker(sp->xmit_worker);
3093         }
3094
3095         sja1105_devlink_teardown(ds);
3096         sja1105_flower_teardown(ds);
3097         sja1105_tas_teardown(ds);
3098         sja1105_ptp_clock_unregister(ds);
3099         sja1105_static_config_free(&priv->static_config);
3100
3101         list_for_each_entry_safe(v, n, &priv->dsa_8021q_vlans, list) {
3102                 list_del(&v->list);
3103                 kfree(v);
3104         }
3105
3106         list_for_each_entry_safe(v, n, &priv->bridge_vlans, list) {
3107                 list_del(&v->list);
3108                 kfree(v);
3109         }
3110 }
3111
3112 static void sja1105_port_disable(struct dsa_switch *ds, int port)
3113 {
3114         struct sja1105_private *priv = ds->priv;
3115         struct sja1105_port *sp = &priv->ports[port];
3116
3117         if (!dsa_is_user_port(ds, port))
3118                 return;
3119
3120         kthread_cancel_work_sync(&sp->xmit_work);
3121         skb_queue_purge(&sp->xmit_queue);
3122 }
3123
3124 static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot,
3125                              struct sk_buff *skb, bool takets)
3126 {
3127         struct sja1105_mgmt_entry mgmt_route = {0};
3128         struct sja1105_private *priv = ds->priv;
3129         struct ethhdr *hdr;
3130         int timeout = 10;
3131         int rc;
3132
3133         hdr = eth_hdr(skb);
3134
3135         mgmt_route.macaddr = ether_addr_to_u64(hdr->h_dest);
3136         mgmt_route.destports = BIT(port);
3137         mgmt_route.enfport = 1;
3138         mgmt_route.tsreg = 0;
3139         mgmt_route.takets = takets;
3140
3141         rc = sja1105_dynamic_config_write(priv, BLK_IDX_MGMT_ROUTE,
3142                                           slot, &mgmt_route, true);
3143         if (rc < 0) {
3144                 kfree_skb(skb);
3145                 return rc;
3146         }
3147
3148         /* Transfer skb to the host port. */
3149         dsa_enqueue_skb(skb, dsa_to_port(ds, port)->slave);
3150
3151         /* Wait until the switch has processed the frame */
3152         do {
3153                 rc = sja1105_dynamic_config_read(priv, BLK_IDX_MGMT_ROUTE,
3154                                                  slot, &mgmt_route);
3155                 if (rc < 0) {
3156                         dev_err_ratelimited(priv->ds->dev,
3157                                             "failed to poll for mgmt route\n");
3158                         continue;
3159                 }
3160
3161                 /* UM10944: The ENFPORT flag of the respective entry is
3162                  * cleared when a match is found. The host can use this
3163                  * flag as an acknowledgment.
3164                  */
3165                 cpu_relax();
3166         } while (mgmt_route.enfport && --timeout);
3167
3168         if (!timeout) {
3169                 /* Clean up the management route so that a follow-up
3170                  * frame may not match on it by mistake.
3171                  * This is only hardware supported on P/Q/R/S - on E/T it is
3172                  * a no-op and we are silently discarding the -EOPNOTSUPP.
3173                  */
3174                 sja1105_dynamic_config_write(priv, BLK_IDX_MGMT_ROUTE,
3175                                              slot, &mgmt_route, false);
3176                 dev_err_ratelimited(priv->ds->dev, "xmit timed out\n");
3177         }
3178
3179         return NETDEV_TX_OK;
3180 }
3181
3182 #define work_to_port(work) \
3183                 container_of((work), struct sja1105_port, xmit_work)
3184 #define tagger_to_sja1105(t) \
3185                 container_of((t), struct sja1105_private, tagger_data)
3186
3187 /* Deferred work is unfortunately necessary because setting up the management
3188  * route cannot be done from atomit context (SPI transfer takes a sleepable
3189  * lock on the bus)
3190  */
3191 static void sja1105_port_deferred_xmit(struct kthread_work *work)
3192 {
3193         struct sja1105_port *sp = work_to_port(work);
3194         struct sja1105_tagger_data *tagger_data = sp->data;
3195         struct sja1105_private *priv = tagger_to_sja1105(tagger_data);
3196         int port = sp - priv->ports;
3197         struct sk_buff *skb;
3198
3199         while ((skb = skb_dequeue(&sp->xmit_queue)) != NULL) {
3200                 struct sk_buff *clone = SJA1105_SKB_CB(skb)->clone;
3201
3202                 mutex_lock(&priv->mgmt_lock);
3203
3204                 sja1105_mgmt_xmit(priv->ds, port, 0, skb, !!clone);
3205
3206                 /* The clone, if there, was made by dsa_skb_tx_timestamp */
3207                 if (clone)
3208                         sja1105_ptp_txtstamp_skb(priv->ds, port, clone);
3209
3210                 mutex_unlock(&priv->mgmt_lock);
3211         }
3212 }
3213
3214 /* The MAXAGE setting belongs to the L2 Forwarding Parameters table,
3215  * which cannot be reconfigured at runtime. So a switch reset is required.
3216  */
3217 static int sja1105_set_ageing_time(struct dsa_switch *ds,
3218                                    unsigned int ageing_time)
3219 {
3220         struct sja1105_l2_lookup_params_entry *l2_lookup_params;
3221         struct sja1105_private *priv = ds->priv;
3222         struct sja1105_table *table;
3223         unsigned int maxage;
3224
3225         table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS];
3226         l2_lookup_params = table->entries;
3227
3228         maxage = SJA1105_AGEING_TIME_MS(ageing_time);
3229
3230         if (l2_lookup_params->maxage == maxage)
3231                 return 0;
3232
3233         l2_lookup_params->maxage = maxage;
3234
3235         return sja1105_static_config_reload(priv, SJA1105_AGEING_TIME);
3236 }
3237
3238 static int sja1105_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
3239 {
3240         struct sja1105_l2_policing_entry *policing;
3241         struct sja1105_private *priv = ds->priv;
3242
3243         new_mtu += VLAN_ETH_HLEN + ETH_FCS_LEN;
3244
3245         if (dsa_is_cpu_port(ds, port))
3246                 new_mtu += VLAN_HLEN;
3247
3248         policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
3249
3250         if (policing[port].maxlen == new_mtu)
3251                 return 0;
3252
3253         policing[port].maxlen = new_mtu;
3254
3255         return sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
3256 }
3257
3258 static int sja1105_get_max_mtu(struct dsa_switch *ds, int port)
3259 {
3260         return 2043 - VLAN_ETH_HLEN - ETH_FCS_LEN;
3261 }
3262
3263 static int sja1105_port_setup_tc(struct dsa_switch *ds, int port,
3264                                  enum tc_setup_type type,
3265                                  void *type_data)
3266 {
3267         switch (type) {
3268         case TC_SETUP_QDISC_TAPRIO:
3269                 return sja1105_setup_tc_taprio(ds, port, type_data);
3270         case TC_SETUP_QDISC_CBS:
3271                 return sja1105_setup_tc_cbs(ds, port, type_data);
3272         default:
3273                 return -EOPNOTSUPP;
3274         }
3275 }
3276
3277 /* We have a single mirror (@to) port, but can configure ingress and egress
3278  * mirroring on all other (@from) ports.
3279  * We need to allow mirroring rules only as long as the @to port is always the
3280  * same, and we need to unset the @to port from mirr_port only when there is no
3281  * mirroring rule that references it.
3282  */
3283 static int sja1105_mirror_apply(struct sja1105_private *priv, int from, int to,
3284                                 bool ingress, bool enabled)
3285 {
3286         struct sja1105_general_params_entry *general_params;
3287         struct sja1105_mac_config_entry *mac;
3288         struct dsa_switch *ds = priv->ds;
3289         struct sja1105_table *table;
3290         bool already_enabled;
3291         u64 new_mirr_port;
3292         int rc;
3293
3294         table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
3295         general_params = table->entries;
3296
3297         mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
3298
3299         already_enabled = (general_params->mirr_port != ds->num_ports);
3300         if (already_enabled && enabled && general_params->mirr_port != to) {
3301                 dev_err(priv->ds->dev,
3302                         "Delete mirroring rules towards port %llu first\n",
3303                         general_params->mirr_port);
3304                 return -EBUSY;
3305         }
3306
3307         new_mirr_port = to;
3308         if (!enabled) {
3309                 bool keep = false;
3310                 int port;
3311
3312                 /* Anybody still referencing mirr_port? */
3313                 for (port = 0; port < ds->num_ports; port++) {
3314                         if (mac[port].ing_mirr || mac[port].egr_mirr) {
3315                                 keep = true;
3316                                 break;
3317                         }
3318                 }
3319                 /* Unset already_enabled for next time */
3320                 if (!keep)
3321                         new_mirr_port = ds->num_ports;
3322         }
3323         if (new_mirr_port != general_params->mirr_port) {
3324                 general_params->mirr_port = new_mirr_port;
3325
3326                 rc = sja1105_dynamic_config_write(priv, BLK_IDX_GENERAL_PARAMS,
3327                                                   0, general_params, true);
3328                 if (rc < 0)
3329                         return rc;
3330         }
3331
3332         if (ingress)
3333                 mac[from].ing_mirr = enabled;
3334         else
3335                 mac[from].egr_mirr = enabled;
3336
3337         return sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, from,
3338                                             &mac[from], true);
3339 }
3340
3341 static int sja1105_mirror_add(struct dsa_switch *ds, int port,
3342                               struct dsa_mall_mirror_tc_entry *mirror,
3343                               bool ingress)
3344 {
3345         return sja1105_mirror_apply(ds->priv, port, mirror->to_local_port,
3346                                     ingress, true);
3347 }
3348
3349 static void sja1105_mirror_del(struct dsa_switch *ds, int port,
3350                                struct dsa_mall_mirror_tc_entry *mirror)
3351 {
3352         sja1105_mirror_apply(ds->priv, port, mirror->to_local_port,
3353                              mirror->ingress, false);
3354 }
3355
3356 static int sja1105_port_policer_add(struct dsa_switch *ds, int port,
3357                                     struct dsa_mall_policer_tc_entry *policer)
3358 {
3359         struct sja1105_l2_policing_entry *policing;
3360         struct sja1105_private *priv = ds->priv;
3361
3362         policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
3363
3364         /* In hardware, every 8 microseconds the credit level is incremented by
3365          * the value of RATE bytes divided by 64, up to a maximum of SMAX
3366          * bytes.
3367          */
3368         policing[port].rate = div_u64(512 * policer->rate_bytes_per_sec,
3369                                       1000000);
3370         policing[port].smax = policer->burst;
3371
3372         return sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
3373 }
3374
3375 static void sja1105_port_policer_del(struct dsa_switch *ds, int port)
3376 {
3377         struct sja1105_l2_policing_entry *policing;
3378         struct sja1105_private *priv = ds->priv;
3379
3380         policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
3381
3382         policing[port].rate = SJA1105_RATE_MBPS(1000);
3383         policing[port].smax = 65535;
3384
3385         sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
3386 }
3387
3388 static int sja1105_port_set_learning(struct sja1105_private *priv, int port,
3389                                      bool enabled)
3390 {
3391         struct sja1105_mac_config_entry *mac;
3392         int rc;
3393
3394         mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
3395
3396         mac[port].dyn_learn = enabled;
3397
3398         rc = sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
3399                                           &mac[port], true);
3400         if (rc)
3401                 return rc;
3402
3403         if (enabled)
3404                 priv->learn_ena |= BIT(port);
3405         else
3406                 priv->learn_ena &= ~BIT(port);
3407
3408         return 0;
3409 }
3410
3411 static int sja1105_port_ucast_bcast_flood(struct sja1105_private *priv, int to,
3412                                           struct switchdev_brport_flags flags)
3413 {
3414         if (flags.mask & BR_FLOOD) {
3415                 if (flags.val & BR_FLOOD)
3416                         priv->ucast_egress_floods |= BIT(to);
3417                 else
3418                         priv->ucast_egress_floods &= ~BIT(to);
3419         }
3420
3421         if (flags.mask & BR_BCAST_FLOOD) {
3422                 if (flags.val & BR_BCAST_FLOOD)
3423                         priv->bcast_egress_floods |= BIT(to);
3424                 else
3425                         priv->bcast_egress_floods &= ~BIT(to);
3426         }
3427
3428         return sja1105_manage_flood_domains(priv);
3429 }
3430
3431 static int sja1105_port_mcast_flood(struct sja1105_private *priv, int to,
3432                                     struct switchdev_brport_flags flags,
3433                                     struct netlink_ext_ack *extack)
3434 {
3435         struct sja1105_l2_lookup_entry *l2_lookup;
3436         struct sja1105_table *table;
3437         int match;
3438
3439         table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP];
3440         l2_lookup = table->entries;
3441
3442         for (match = 0; match < table->entry_count; match++)
3443                 if (l2_lookup[match].macaddr == SJA1105_UNKNOWN_MULTICAST &&
3444                     l2_lookup[match].mask_macaddr == SJA1105_UNKNOWN_MULTICAST)
3445                         break;
3446
3447         if (match == table->entry_count) {
3448                 NL_SET_ERR_MSG_MOD(extack,
3449                                    "Could not find FDB entry for unknown multicast");
3450                 return -ENOSPC;
3451         }
3452
3453         if (flags.val & BR_MCAST_FLOOD)
3454                 l2_lookup[match].destports |= BIT(to);
3455         else
3456                 l2_lookup[match].destports &= ~BIT(to);
3457
3458         return sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
3459                                             l2_lookup[match].index,
3460                                             &l2_lookup[match],
3461                                             true);
3462 }
3463
3464 static int sja1105_port_pre_bridge_flags(struct dsa_switch *ds, int port,
3465                                          struct switchdev_brport_flags flags,
3466                                          struct netlink_ext_ack *extack)
3467 {
3468         struct sja1105_private *priv = ds->priv;
3469
3470         if (flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD |
3471                            BR_BCAST_FLOOD))
3472                 return -EINVAL;
3473
3474         if (flags.mask & (BR_FLOOD | BR_MCAST_FLOOD) &&
3475             !priv->info->can_limit_mcast_flood) {
3476                 bool multicast = !!(flags.val & BR_MCAST_FLOOD);
3477                 bool unicast = !!(flags.val & BR_FLOOD);
3478
3479                 if (unicast != multicast) {
3480                         NL_SET_ERR_MSG_MOD(extack,
3481                                            "This chip cannot configure multicast flooding independently of unicast");
3482                         return -EINVAL;
3483                 }
3484         }
3485
3486         return 0;
3487 }
3488
3489 static int sja1105_port_bridge_flags(struct dsa_switch *ds, int port,
3490                                      struct switchdev_brport_flags flags,
3491                                      struct netlink_ext_ack *extack)
3492 {
3493         struct sja1105_private *priv = ds->priv;
3494         int rc;
3495
3496         if (flags.mask & BR_LEARNING) {
3497                 bool learn_ena = !!(flags.val & BR_LEARNING);
3498
3499                 rc = sja1105_port_set_learning(priv, port, learn_ena);
3500                 if (rc)
3501                         return rc;
3502         }
3503
3504         if (flags.mask & (BR_FLOOD | BR_BCAST_FLOOD)) {
3505                 rc = sja1105_port_ucast_bcast_flood(priv, port, flags);
3506                 if (rc)
3507                         return rc;
3508         }
3509
3510         /* For chips that can't offload BR_MCAST_FLOOD independently, there
3511          * is nothing to do here, we ensured the configuration is in sync by
3512          * offloading BR_FLOOD.
3513          */
3514         if (flags.mask & BR_MCAST_FLOOD && priv->info->can_limit_mcast_flood) {
3515                 rc = sja1105_port_mcast_flood(priv, port, flags,
3516                                               extack);
3517                 if (rc)
3518                         return rc;
3519         }
3520
3521         return 0;
3522 }
3523
3524 static const struct dsa_switch_ops sja1105_switch_ops = {
3525         .get_tag_protocol       = sja1105_get_tag_protocol,
3526         .setup                  = sja1105_setup,
3527         .teardown               = sja1105_teardown,
3528         .set_ageing_time        = sja1105_set_ageing_time,
3529         .port_change_mtu        = sja1105_change_mtu,
3530         .port_max_mtu           = sja1105_get_max_mtu,
3531         .phylink_validate       = sja1105_phylink_validate,
3532         .phylink_mac_config     = sja1105_mac_config,
3533         .phylink_mac_link_up    = sja1105_mac_link_up,
3534         .phylink_mac_link_down  = sja1105_mac_link_down,
3535         .get_strings            = sja1105_get_strings,
3536         .get_ethtool_stats      = sja1105_get_ethtool_stats,
3537         .get_sset_count         = sja1105_get_sset_count,
3538         .get_ts_info            = sja1105_get_ts_info,
3539         .port_disable           = sja1105_port_disable,
3540         .port_fdb_dump          = sja1105_fdb_dump,
3541         .port_fdb_add           = sja1105_fdb_add,
3542         .port_fdb_del           = sja1105_fdb_del,
3543         .port_bridge_join       = sja1105_bridge_join,
3544         .port_bridge_leave      = sja1105_bridge_leave,
3545         .port_pre_bridge_flags  = sja1105_port_pre_bridge_flags,
3546         .port_bridge_flags      = sja1105_port_bridge_flags,
3547         .port_stp_state_set     = sja1105_bridge_stp_state_set,
3548         .port_vlan_filtering    = sja1105_vlan_filtering,
3549         .port_vlan_add          = sja1105_vlan_add,
3550         .port_vlan_del          = sja1105_vlan_del,
3551         .port_mdb_add           = sja1105_mdb_add,
3552         .port_mdb_del           = sja1105_mdb_del,
3553         .port_hwtstamp_get      = sja1105_hwtstamp_get,
3554         .port_hwtstamp_set      = sja1105_hwtstamp_set,
3555         .port_rxtstamp          = sja1105_port_rxtstamp,
3556         .port_txtstamp          = sja1105_port_txtstamp,
3557         .port_setup_tc          = sja1105_port_setup_tc,
3558         .port_mirror_add        = sja1105_mirror_add,
3559         .port_mirror_del        = sja1105_mirror_del,
3560         .port_policer_add       = sja1105_port_policer_add,
3561         .port_policer_del       = sja1105_port_policer_del,
3562         .cls_flower_add         = sja1105_cls_flower_add,
3563         .cls_flower_del         = sja1105_cls_flower_del,
3564         .cls_flower_stats       = sja1105_cls_flower_stats,
3565         .crosschip_bridge_join  = sja1105_crosschip_bridge_join,
3566         .crosschip_bridge_leave = sja1105_crosschip_bridge_leave,
3567         .devlink_param_get      = sja1105_devlink_param_get,
3568         .devlink_param_set      = sja1105_devlink_param_set,
3569         .devlink_info_get       = sja1105_devlink_info_get,
3570 };
3571
3572 static const struct of_device_id sja1105_dt_ids[];
3573
3574 static int sja1105_check_device_id(struct sja1105_private *priv)
3575 {
3576         const struct sja1105_regs *regs = priv->info->regs;
3577         u8 prod_id[SJA1105_SIZE_DEVICE_ID] = {0};
3578         struct device *dev = &priv->spidev->dev;
3579         const struct of_device_id *match;
3580         u32 device_id;
3581         u64 part_no;
3582         int rc;
3583
3584         rc = sja1105_xfer_u32(priv, SPI_READ, regs->device_id, &device_id,
3585                               NULL);
3586         if (rc < 0)
3587                 return rc;
3588
3589         rc = sja1105_xfer_buf(priv, SPI_READ, regs->prod_id, prod_id,
3590                               SJA1105_SIZE_DEVICE_ID);
3591         if (rc < 0)
3592                 return rc;
3593
3594         sja1105_unpack(prod_id, &part_no, 19, 4, SJA1105_SIZE_DEVICE_ID);
3595
3596         for (match = sja1105_dt_ids; match->compatible[0]; match++) {
3597                 const struct sja1105_info *info = match->data;
3598
3599                 /* Is what's been probed in our match table at all? */
3600                 if (info->device_id != device_id || info->part_no != part_no)
3601                         continue;
3602
3603                 /* But is it what's in the device tree? */
3604                 if (priv->info->device_id != device_id ||
3605                     priv->info->part_no != part_no) {
3606                         dev_warn(dev, "Device tree specifies chip %s but found %s, please fix it!\n",
3607                                  priv->info->name, info->name);
3608                         /* It isn't. No problem, pick that up. */
3609                         priv->info = info;
3610                 }
3611
3612                 return 0;
3613         }
3614
3615         dev_err(dev, "Unexpected {device ID, part number}: 0x%x 0x%llx\n",
3616                 device_id, part_no);
3617
3618         return -ENODEV;
3619 }
3620
3621 static int sja1105_probe(struct spi_device *spi)
3622 {
3623         struct sja1105_tagger_data *tagger_data;
3624         struct device *dev = &spi->dev;
3625         struct sja1105_private *priv;
3626         size_t max_xfer, max_msg;
3627         struct dsa_switch *ds;
3628         int rc, port;
3629
3630         if (!dev->of_node) {
3631                 dev_err(dev, "No DTS bindings for SJA1105 driver\n");
3632                 return -EINVAL;
3633         }
3634
3635         priv = devm_kzalloc(dev, sizeof(struct sja1105_private), GFP_KERNEL);
3636         if (!priv)
3637                 return -ENOMEM;
3638
3639         /* Configure the optional reset pin and bring up switch */
3640         priv->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
3641         if (IS_ERR(priv->reset_gpio))
3642                 dev_dbg(dev, "reset-gpios not defined, ignoring\n");
3643         else
3644                 sja1105_hw_reset(priv->reset_gpio, 1, 1);
3645
3646         /* Populate our driver private structure (priv) based on
3647          * the device tree node that was probed (spi)
3648          */
3649         priv->spidev = spi;
3650         spi_set_drvdata(spi, priv);
3651
3652         /* Configure the SPI bus */
3653         spi->bits_per_word = 8;
3654         rc = spi_setup(spi);
3655         if (rc < 0) {
3656                 dev_err(dev, "Could not init SPI\n");
3657                 return rc;
3658         }
3659
3660         /* In sja1105_xfer, we send spi_messages composed of two spi_transfers:
3661          * a small one for the message header and another one for the current
3662          * chunk of the packed buffer.
3663          * Check that the restrictions imposed by the SPI controller are
3664          * respected: the chunk buffer is smaller than the max transfer size,
3665          * and the total length of the chunk plus its message header is smaller
3666          * than the max message size.
3667          * We do that during probe time since the maximum transfer size is a
3668          * runtime invariant.
3669          */
3670         max_xfer = spi_max_transfer_size(spi);
3671         max_msg = spi_max_message_size(spi);
3672
3673         /* We need to send at least one 64-bit word of SPI payload per message
3674          * in order to be able to make useful progress.
3675          */
3676         if (max_msg < SJA1105_SIZE_SPI_MSG_HEADER + 8) {
3677                 dev_err(dev, "SPI master cannot send large enough buffers, aborting\n");
3678                 return -EINVAL;
3679         }
3680
3681         priv->max_xfer_len = SJA1105_SIZE_SPI_MSG_MAXLEN;
3682         if (priv->max_xfer_len > max_xfer)
3683                 priv->max_xfer_len = max_xfer;
3684         if (priv->max_xfer_len > max_msg - SJA1105_SIZE_SPI_MSG_HEADER)
3685                 priv->max_xfer_len = max_msg - SJA1105_SIZE_SPI_MSG_HEADER;
3686
3687         priv->info = of_device_get_match_data(dev);
3688
3689         /* Detect hardware device */
3690         rc = sja1105_check_device_id(priv);
3691         if (rc < 0) {
3692                 dev_err(dev, "Device ID check failed: %d\n", rc);
3693                 return rc;
3694         }
3695
3696         dev_info(dev, "Probed switch chip: %s\n", priv->info->name);
3697
3698         ds = devm_kzalloc(dev, sizeof(*ds), GFP_KERNEL);
3699         if (!ds)
3700                 return -ENOMEM;
3701
3702         ds->dev = dev;
3703         ds->num_ports = priv->info->num_ports;
3704         ds->ops = &sja1105_switch_ops;
3705         ds->priv = priv;
3706         priv->ds = ds;
3707
3708         tagger_data = &priv->tagger_data;
3709
3710         mutex_init(&priv->ptp_data.lock);
3711         mutex_init(&priv->mgmt_lock);
3712
3713         priv->dsa_8021q_ctx = devm_kzalloc(dev, sizeof(*priv->dsa_8021q_ctx),
3714                                            GFP_KERNEL);
3715         if (!priv->dsa_8021q_ctx)
3716                 return -ENOMEM;
3717
3718         priv->dsa_8021q_ctx->ops = &sja1105_dsa_8021q_ops;
3719         priv->dsa_8021q_ctx->proto = htons(ETH_P_8021Q);
3720         priv->dsa_8021q_ctx->ds = ds;
3721
3722         INIT_LIST_HEAD(&priv->dsa_8021q_ctx->crosschip_links);
3723         INIT_LIST_HEAD(&priv->bridge_vlans);
3724         INIT_LIST_HEAD(&priv->dsa_8021q_vlans);
3725
3726         sja1105_tas_setup(ds);
3727         sja1105_flower_setup(ds);
3728
3729         rc = dsa_register_switch(priv->ds);
3730         if (rc)
3731                 return rc;
3732
3733         if (IS_ENABLED(CONFIG_NET_SCH_CBS)) {
3734                 priv->cbs = devm_kcalloc(dev, priv->info->num_cbs_shapers,
3735                                          sizeof(struct sja1105_cbs_entry),
3736                                          GFP_KERNEL);
3737                 if (!priv->cbs) {
3738                         rc = -ENOMEM;
3739                         goto out_unregister_switch;
3740                 }
3741         }
3742
3743         /* Connections between dsa_port and sja1105_port */
3744         for (port = 0; port < ds->num_ports; port++) {
3745                 struct sja1105_port *sp = &priv->ports[port];
3746                 struct dsa_port *dp = dsa_to_port(ds, port);
3747                 struct net_device *slave;
3748                 int subvlan;
3749
3750                 if (!dsa_is_user_port(ds, port))
3751                         continue;
3752
3753                 dp->priv = sp;
3754                 sp->dp = dp;
3755                 sp->data = tagger_data;
3756                 slave = dp->slave;
3757                 kthread_init_work(&sp->xmit_work, sja1105_port_deferred_xmit);
3758                 sp->xmit_worker = kthread_create_worker(0, "%s_xmit",
3759                                                         slave->name);
3760                 if (IS_ERR(sp->xmit_worker)) {
3761                         rc = PTR_ERR(sp->xmit_worker);
3762                         dev_err(ds->dev,
3763                                 "failed to create deferred xmit thread: %d\n",
3764                                 rc);
3765                         goto out_destroy_workers;
3766                 }
3767                 skb_queue_head_init(&sp->xmit_queue);
3768                 sp->xmit_tpid = ETH_P_SJA1105;
3769
3770                 for (subvlan = 0; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++)
3771                         sp->subvlan_map[subvlan] = VLAN_N_VID;
3772         }
3773
3774         return 0;
3775
3776 out_destroy_workers:
3777         while (port-- > 0) {
3778                 struct sja1105_port *sp = &priv->ports[port];
3779
3780                 if (!dsa_is_user_port(ds, port))
3781                         continue;
3782
3783                 kthread_destroy_worker(sp->xmit_worker);
3784         }
3785
3786 out_unregister_switch:
3787         dsa_unregister_switch(ds);
3788
3789         return rc;
3790 }
3791
3792 static int sja1105_remove(struct spi_device *spi)
3793 {
3794         struct sja1105_private *priv = spi_get_drvdata(spi);
3795
3796         dsa_unregister_switch(priv->ds);
3797         return 0;
3798 }
3799
3800 static const struct of_device_id sja1105_dt_ids[] = {
3801         { .compatible = "nxp,sja1105e", .data = &sja1105e_info },
3802         { .compatible = "nxp,sja1105t", .data = &sja1105t_info },
3803         { .compatible = "nxp,sja1105p", .data = &sja1105p_info },
3804         { .compatible = "nxp,sja1105q", .data = &sja1105q_info },
3805         { .compatible = "nxp,sja1105r", .data = &sja1105r_info },
3806         { .compatible = "nxp,sja1105s", .data = &sja1105s_info },
3807         { .compatible = "nxp,sja1110a", .data = &sja1110a_info },
3808         { .compatible = "nxp,sja1110b", .data = &sja1110b_info },
3809         { .compatible = "nxp,sja1110c", .data = &sja1110c_info },
3810         { .compatible = "nxp,sja1110d", .data = &sja1110d_info },
3811         { /* sentinel */ },
3812 };
3813 MODULE_DEVICE_TABLE(of, sja1105_dt_ids);
3814
3815 static struct spi_driver sja1105_driver = {
3816         .driver = {
3817                 .name  = "sja1105",
3818                 .owner = THIS_MODULE,
3819                 .of_match_table = of_match_ptr(sja1105_dt_ids),
3820         },
3821         .probe  = sja1105_probe,
3822         .remove = sja1105_remove,
3823 };
3824
3825 module_spi_driver(sja1105_driver);
3826
3827 MODULE_AUTHOR("Vladimir Oltean <olteanv@gmail.com>");
3828 MODULE_AUTHOR("Georg Waibel <georg.waibel@sensor-technik.de>");
3829 MODULE_DESCRIPTION("SJA1105 Driver");
3830 MODULE_LICENSE("GPL v2");