net: dsa: sja1105: register the MDIO buses for 100base-T1 and 100base-TX
[linux-2.6-microblaze.git] / drivers / net / dsa / sja1105 / sja1105_main.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Sensor-Technik Wiedemann GmbH
3  * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
4  */
5
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
8 #include <linux/delay.h>
9 #include <linux/module.h>
10 #include <linux/printk.h>
11 #include <linux/spi/spi.h>
12 #include <linux/errno.h>
13 #include <linux/gpio/consumer.h>
14 #include <linux/phylink.h>
15 #include <linux/of.h>
16 #include <linux/of_net.h>
17 #include <linux/of_mdio.h>
18 #include <linux/of_device.h>
19 #include <linux/netdev_features.h>
20 #include <linux/netdevice.h>
21 #include <linux/if_bridge.h>
22 #include <linux/if_ether.h>
23 #include <linux/dsa/8021q.h>
24 #include "sja1105.h"
25 #include "sja1105_sgmii.h"
26 #include "sja1105_tas.h"
27
28 #define SJA1105_UNKNOWN_MULTICAST       0x010000000000ull
29 #define SJA1105_DEFAULT_VLAN            (VLAN_N_VID - 1)
30
31 static const struct dsa_switch_ops sja1105_switch_ops;
32
33 static void sja1105_hw_reset(struct gpio_desc *gpio, unsigned int pulse_len,
34                              unsigned int startup_delay)
35 {
36         gpiod_set_value_cansleep(gpio, 1);
37         /* Wait for minimum reset pulse length */
38         msleep(pulse_len);
39         gpiod_set_value_cansleep(gpio, 0);
40         /* Wait until chip is ready after reset */
41         msleep(startup_delay);
42 }
43
44 static void
45 sja1105_port_allow_traffic(struct sja1105_l2_forwarding_entry *l2_fwd,
46                            int from, int to, bool allow)
47 {
48         if (allow)
49                 l2_fwd[from].reach_port |= BIT(to);
50         else
51                 l2_fwd[from].reach_port &= ~BIT(to);
52 }
53
54 static bool sja1105_can_forward(struct sja1105_l2_forwarding_entry *l2_fwd,
55                                 int from, int to)
56 {
57         return !!(l2_fwd[from].reach_port & BIT(to));
58 }
59
60 static int sja1105_init_mac_settings(struct sja1105_private *priv)
61 {
62         struct sja1105_mac_config_entry default_mac = {
63                 /* Enable all 8 priority queues on egress.
64                  * Every queue i holds top[i] - base[i] frames.
65                  * Sum of top[i] - base[i] is 511 (max hardware limit).
66                  */
67                 .top  = {0x3F, 0x7F, 0xBF, 0xFF, 0x13F, 0x17F, 0x1BF, 0x1FF},
68                 .base = {0x0, 0x40, 0x80, 0xC0, 0x100, 0x140, 0x180, 0x1C0},
69                 .enabled = {true, true, true, true, true, true, true, true},
70                 /* Keep standard IFG of 12 bytes on egress. */
71                 .ifg = 0,
72                 /* Always put the MAC speed in automatic mode, where it can be
73                  * adjusted at runtime by PHYLINK.
74                  */
75                 .speed = priv->info->port_speed[SJA1105_SPEED_AUTO],
76                 /* No static correction for 1-step 1588 events */
77                 .tp_delin = 0,
78                 .tp_delout = 0,
79                 /* Disable aging for critical TTEthernet traffic */
80                 .maxage = 0xFF,
81                 /* Internal VLAN (pvid) to apply to untagged ingress */
82                 .vlanprio = 0,
83                 .vlanid = 1,
84                 .ing_mirr = false,
85                 .egr_mirr = false,
86                 /* Don't drop traffic with other EtherType than ETH_P_IP */
87                 .drpnona664 = false,
88                 /* Don't drop double-tagged traffic */
89                 .drpdtag = false,
90                 /* Don't drop untagged traffic */
91                 .drpuntag = false,
92                 /* Don't retag 802.1p (VID 0) traffic with the pvid */
93                 .retag = false,
94                 /* Disable learning and I/O on user ports by default -
95                  * STP will enable it.
96                  */
97                 .dyn_learn = false,
98                 .egress = false,
99                 .ingress = false,
100         };
101         struct sja1105_mac_config_entry *mac;
102         struct dsa_switch *ds = priv->ds;
103         struct sja1105_table *table;
104         int i;
105
106         table = &priv->static_config.tables[BLK_IDX_MAC_CONFIG];
107
108         /* Discard previous MAC Configuration Table */
109         if (table->entry_count) {
110                 kfree(table->entries);
111                 table->entry_count = 0;
112         }
113
114         table->entries = kcalloc(table->ops->max_entry_count,
115                                  table->ops->unpacked_entry_size, GFP_KERNEL);
116         if (!table->entries)
117                 return -ENOMEM;
118
119         table->entry_count = table->ops->max_entry_count;
120
121         mac = table->entries;
122
123         for (i = 0; i < ds->num_ports; i++) {
124                 mac[i] = default_mac;
125                 if (i == dsa_upstream_port(priv->ds, i)) {
126                         /* STP doesn't get called for CPU port, so we need to
127                          * set the I/O parameters statically.
128                          */
129                         mac[i].dyn_learn = true;
130                         mac[i].ingress = true;
131                         mac[i].egress = true;
132                 }
133         }
134
135         return 0;
136 }
137
138 static int sja1105_init_mii_settings(struct sja1105_private *priv)
139 {
140         struct device *dev = &priv->spidev->dev;
141         struct sja1105_xmii_params_entry *mii;
142         struct dsa_switch *ds = priv->ds;
143         struct sja1105_table *table;
144         int i;
145
146         table = &priv->static_config.tables[BLK_IDX_XMII_PARAMS];
147
148         /* Discard previous xMII Mode Parameters Table */
149         if (table->entry_count) {
150                 kfree(table->entries);
151                 table->entry_count = 0;
152         }
153
154         table->entries = kcalloc(table->ops->max_entry_count,
155                                  table->ops->unpacked_entry_size, GFP_KERNEL);
156         if (!table->entries)
157                 return -ENOMEM;
158
159         /* Override table based on PHYLINK DT bindings */
160         table->entry_count = table->ops->max_entry_count;
161
162         mii = table->entries;
163
164         for (i = 0; i < ds->num_ports; i++) {
165                 sja1105_mii_role_t role = XMII_MAC;
166
167                 if (dsa_is_unused_port(priv->ds, i))
168                         continue;
169
170                 switch (priv->phy_mode[i]) {
171                 case PHY_INTERFACE_MODE_INTERNAL:
172                         if (priv->info->internal_phy[i] == SJA1105_NO_PHY)
173                                 goto unsupported;
174
175                         mii->xmii_mode[i] = XMII_MODE_MII;
176                         if (priv->info->internal_phy[i] == SJA1105_PHY_BASE_TX)
177                                 mii->special[i] = true;
178
179                         break;
180                 case PHY_INTERFACE_MODE_REVMII:
181                         role = XMII_PHY;
182                         fallthrough;
183                 case PHY_INTERFACE_MODE_MII:
184                         if (!priv->info->supports_mii[i])
185                                 goto unsupported;
186
187                         mii->xmii_mode[i] = XMII_MODE_MII;
188                         break;
189                 case PHY_INTERFACE_MODE_REVRMII:
190                         role = XMII_PHY;
191                         fallthrough;
192                 case PHY_INTERFACE_MODE_RMII:
193                         if (!priv->info->supports_rmii[i])
194                                 goto unsupported;
195
196                         mii->xmii_mode[i] = XMII_MODE_RMII;
197                         break;
198                 case PHY_INTERFACE_MODE_RGMII:
199                 case PHY_INTERFACE_MODE_RGMII_ID:
200                 case PHY_INTERFACE_MODE_RGMII_RXID:
201                 case PHY_INTERFACE_MODE_RGMII_TXID:
202                         if (!priv->info->supports_rgmii[i])
203                                 goto unsupported;
204
205                         mii->xmii_mode[i] = XMII_MODE_RGMII;
206                         break;
207                 case PHY_INTERFACE_MODE_SGMII:
208                         if (!priv->info->supports_sgmii[i])
209                                 goto unsupported;
210
211                         mii->xmii_mode[i] = XMII_MODE_SGMII;
212                         break;
213                 case PHY_INTERFACE_MODE_2500BASEX:
214                         if (!priv->info->supports_2500basex[i])
215                                 goto unsupported;
216
217                         mii->xmii_mode[i] = XMII_MODE_SGMII;
218                         break;
219 unsupported:
220                 default:
221                         dev_err(dev, "Unsupported PHY mode %s on port %d!\n",
222                                 phy_modes(priv->phy_mode[i]), i);
223                         return -EINVAL;
224                 }
225
226                 mii->phy_mac[i] = role;
227         }
228         return 0;
229 }
230
231 static int sja1105_init_static_fdb(struct sja1105_private *priv)
232 {
233         struct sja1105_l2_lookup_entry *l2_lookup;
234         struct sja1105_table *table;
235         int port;
236
237         table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP];
238
239         /* We only populate the FDB table through dynamic L2 Address Lookup
240          * entries, except for a special entry at the end which is a catch-all
241          * for unknown multicast and will be used to control flooding domain.
242          */
243         if (table->entry_count) {
244                 kfree(table->entries);
245                 table->entry_count = 0;
246         }
247
248         if (!priv->info->can_limit_mcast_flood)
249                 return 0;
250
251         table->entries = kcalloc(1, table->ops->unpacked_entry_size,
252                                  GFP_KERNEL);
253         if (!table->entries)
254                 return -ENOMEM;
255
256         table->entry_count = 1;
257         l2_lookup = table->entries;
258
259         /* All L2 multicast addresses have an odd first octet */
260         l2_lookup[0].macaddr = SJA1105_UNKNOWN_MULTICAST;
261         l2_lookup[0].mask_macaddr = SJA1105_UNKNOWN_MULTICAST;
262         l2_lookup[0].lockeds = true;
263         l2_lookup[0].index = SJA1105_MAX_L2_LOOKUP_COUNT - 1;
264
265         /* Flood multicast to every port by default */
266         for (port = 0; port < priv->ds->num_ports; port++)
267                 if (!dsa_is_unused_port(priv->ds, port))
268                         l2_lookup[0].destports |= BIT(port);
269
270         return 0;
271 }
272
273 static int sja1105_init_l2_lookup_params(struct sja1105_private *priv)
274 {
275         struct sja1105_l2_lookup_params_entry default_l2_lookup_params = {
276                 /* Learned FDB entries are forgotten after 300 seconds */
277                 .maxage = SJA1105_AGEING_TIME_MS(300000),
278                 /* All entries within a FDB bin are available for learning */
279                 .dyn_tbsz = SJA1105ET_FDB_BIN_SIZE,
280                 /* And the P/Q/R/S equivalent setting: */
281                 .start_dynspc = 0,
282                 /* 2^8 + 2^5 + 2^3 + 2^2 + 2^1 + 1 in Koopman notation */
283                 .poly = 0x97,
284                 /* This selects between Independent VLAN Learning (IVL) and
285                  * Shared VLAN Learning (SVL)
286                  */
287                 .shared_learn = true,
288                 /* Don't discard management traffic based on ENFPORT -
289                  * we don't perform SMAC port enforcement anyway, so
290                  * what we are setting here doesn't matter.
291                  */
292                 .no_enf_hostprt = false,
293                 /* Don't learn SMAC for mac_fltres1 and mac_fltres0.
294                  * Maybe correlate with no_linklocal_learn from bridge driver?
295                  */
296                 .no_mgmt_learn = true,
297                 /* P/Q/R/S only */
298                 .use_static = true,
299                 /* Dynamically learned FDB entries can overwrite other (older)
300                  * dynamic FDB entries
301                  */
302                 .owr_dyn = true,
303                 .drpnolearn = true,
304         };
305         struct dsa_switch *ds = priv->ds;
306         int port, num_used_ports = 0;
307         struct sja1105_table *table;
308         u64 max_fdb_entries;
309
310         for (port = 0; port < ds->num_ports; port++)
311                 if (!dsa_is_unused_port(ds, port))
312                         num_used_ports++;
313
314         max_fdb_entries = SJA1105_MAX_L2_LOOKUP_COUNT / num_used_ports;
315
316         for (port = 0; port < ds->num_ports; port++) {
317                 if (dsa_is_unused_port(ds, port))
318                         continue;
319
320                 default_l2_lookup_params.maxaddrp[port] = max_fdb_entries;
321         }
322
323         table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS];
324
325         if (table->entry_count) {
326                 kfree(table->entries);
327                 table->entry_count = 0;
328         }
329
330         table->entries = kcalloc(table->ops->max_entry_count,
331                                  table->ops->unpacked_entry_size, GFP_KERNEL);
332         if (!table->entries)
333                 return -ENOMEM;
334
335         table->entry_count = table->ops->max_entry_count;
336
337         /* This table only has a single entry */
338         ((struct sja1105_l2_lookup_params_entry *)table->entries)[0] =
339                                 default_l2_lookup_params;
340
341         return 0;
342 }
343
344 /* Set up a default VLAN for untagged traffic injected from the CPU
345  * using management routes (e.g. STP, PTP) as opposed to tag_8021q.
346  * All DT-defined ports are members of this VLAN, and there are no
347  * restrictions on forwarding (since the CPU selects the destination).
348  * Frames from this VLAN will always be transmitted as untagged, and
349  * neither the bridge nor the 8021q module cannot create this VLAN ID.
350  */
351 static int sja1105_init_static_vlan(struct sja1105_private *priv)
352 {
353         struct sja1105_table *table;
354         struct sja1105_vlan_lookup_entry pvid = {
355                 .type_entry = SJA1110_VLAN_D_TAG,
356                 .ving_mirr = 0,
357                 .vegr_mirr = 0,
358                 .vmemb_port = 0,
359                 .vlan_bc = 0,
360                 .tag_port = 0,
361                 .vlanid = SJA1105_DEFAULT_VLAN,
362         };
363         struct dsa_switch *ds = priv->ds;
364         int port;
365
366         table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
367
368         if (table->entry_count) {
369                 kfree(table->entries);
370                 table->entry_count = 0;
371         }
372
373         table->entries = kzalloc(table->ops->unpacked_entry_size,
374                                  GFP_KERNEL);
375         if (!table->entries)
376                 return -ENOMEM;
377
378         table->entry_count = 1;
379
380         for (port = 0; port < ds->num_ports; port++) {
381                 struct sja1105_bridge_vlan *v;
382
383                 if (dsa_is_unused_port(ds, port))
384                         continue;
385
386                 pvid.vmemb_port |= BIT(port);
387                 pvid.vlan_bc |= BIT(port);
388                 pvid.tag_port &= ~BIT(port);
389
390                 v = kzalloc(sizeof(*v), GFP_KERNEL);
391                 if (!v)
392                         return -ENOMEM;
393
394                 v->port = port;
395                 v->vid = SJA1105_DEFAULT_VLAN;
396                 v->untagged = true;
397                 if (dsa_is_cpu_port(ds, port))
398                         v->pvid = true;
399                 list_add(&v->list, &priv->dsa_8021q_vlans);
400         }
401
402         ((struct sja1105_vlan_lookup_entry *)table->entries)[0] = pvid;
403         return 0;
404 }
405
406 static int sja1105_init_l2_forwarding(struct sja1105_private *priv)
407 {
408         struct sja1105_l2_forwarding_entry *l2fwd;
409         struct dsa_switch *ds = priv->ds;
410         struct sja1105_table *table;
411         int i, j;
412
413         table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING];
414
415         if (table->entry_count) {
416                 kfree(table->entries);
417                 table->entry_count = 0;
418         }
419
420         table->entries = kcalloc(table->ops->max_entry_count,
421                                  table->ops->unpacked_entry_size, GFP_KERNEL);
422         if (!table->entries)
423                 return -ENOMEM;
424
425         table->entry_count = table->ops->max_entry_count;
426
427         l2fwd = table->entries;
428
429         /* First 5 entries define the forwarding rules */
430         for (i = 0; i < ds->num_ports; i++) {
431                 unsigned int upstream = dsa_upstream_port(priv->ds, i);
432
433                 if (dsa_is_unused_port(ds, i))
434                         continue;
435
436                 for (j = 0; j < SJA1105_NUM_TC; j++)
437                         l2fwd[i].vlan_pmap[j] = j;
438
439                 /* All ports start up with egress flooding enabled,
440                  * including the CPU port.
441                  */
442                 priv->ucast_egress_floods |= BIT(i);
443                 priv->bcast_egress_floods |= BIT(i);
444
445                 if (i == upstream)
446                         continue;
447
448                 sja1105_port_allow_traffic(l2fwd, i, upstream, true);
449                 sja1105_port_allow_traffic(l2fwd, upstream, i, true);
450
451                 l2fwd[i].bc_domain = BIT(upstream);
452                 l2fwd[i].fl_domain = BIT(upstream);
453
454                 l2fwd[upstream].bc_domain |= BIT(i);
455                 l2fwd[upstream].fl_domain |= BIT(i);
456         }
457
458         /* Next 8 entries define VLAN PCP mapping from ingress to egress.
459          * Create a one-to-one mapping.
460          */
461         for (i = 0; i < SJA1105_NUM_TC; i++) {
462                 for (j = 0; j < ds->num_ports; j++) {
463                         if (dsa_is_unused_port(ds, j))
464                                 continue;
465
466                         l2fwd[ds->num_ports + i].vlan_pmap[j] = i;
467                 }
468
469                 l2fwd[ds->num_ports + i].type_egrpcp2outputq = true;
470         }
471
472         return 0;
473 }
474
475 static int sja1110_init_pcp_remapping(struct sja1105_private *priv)
476 {
477         struct sja1110_pcp_remapping_entry *pcp_remap;
478         struct dsa_switch *ds = priv->ds;
479         struct sja1105_table *table;
480         int port, tc;
481
482         table = &priv->static_config.tables[BLK_IDX_PCP_REMAPPING];
483
484         /* Nothing to do for SJA1105 */
485         if (!table->ops->max_entry_count)
486                 return 0;
487
488         if (table->entry_count) {
489                 kfree(table->entries);
490                 table->entry_count = 0;
491         }
492
493         table->entries = kcalloc(table->ops->max_entry_count,
494                                  table->ops->unpacked_entry_size, GFP_KERNEL);
495         if (!table->entries)
496                 return -ENOMEM;
497
498         table->entry_count = table->ops->max_entry_count;
499
500         pcp_remap = table->entries;
501
502         /* Repeat the configuration done for vlan_pmap */
503         for (port = 0; port < ds->num_ports; port++) {
504                 if (dsa_is_unused_port(ds, port))
505                         continue;
506
507                 for (tc = 0; tc < SJA1105_NUM_TC; tc++)
508                         pcp_remap[port].egrpcp[tc] = tc;
509         }
510
511         return 0;
512 }
513
514 static int sja1105_init_l2_forwarding_params(struct sja1105_private *priv)
515 {
516         struct sja1105_l2_forwarding_params_entry *l2fwd_params;
517         struct sja1105_table *table;
518
519         table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING_PARAMS];
520
521         if (table->entry_count) {
522                 kfree(table->entries);
523                 table->entry_count = 0;
524         }
525
526         table->entries = kcalloc(table->ops->max_entry_count,
527                                  table->ops->unpacked_entry_size, GFP_KERNEL);
528         if (!table->entries)
529                 return -ENOMEM;
530
531         table->entry_count = table->ops->max_entry_count;
532
533         /* This table only has a single entry */
534         l2fwd_params = table->entries;
535
536         /* Disallow dynamic reconfiguration of vlan_pmap */
537         l2fwd_params->max_dynp = 0;
538         /* Use a single memory partition for all ingress queues */
539         l2fwd_params->part_spc[0] = priv->info->max_frame_mem;
540
541         return 0;
542 }
543
544 void sja1105_frame_memory_partitioning(struct sja1105_private *priv)
545 {
546         struct sja1105_l2_forwarding_params_entry *l2_fwd_params;
547         struct sja1105_vl_forwarding_params_entry *vl_fwd_params;
548         int max_mem = priv->info->max_frame_mem;
549         struct sja1105_table *table;
550
551         /* VLAN retagging is implemented using a loopback port that consumes
552          * frame buffers. That leaves less for us.
553          */
554         if (priv->vlan_state == SJA1105_VLAN_BEST_EFFORT)
555                 max_mem -= SJA1105_FRAME_MEMORY_RETAGGING_OVERHEAD;
556
557         table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING_PARAMS];
558         l2_fwd_params = table->entries;
559         l2_fwd_params->part_spc[0] = max_mem;
560
561         /* If we have any critical-traffic virtual links, we need to reserve
562          * some frame buffer memory for them. At the moment, hardcode the value
563          * at 100 blocks of 128 bytes of memory each. This leaves 829 blocks
564          * remaining for best-effort traffic. TODO: figure out a more flexible
565          * way to perform the frame buffer partitioning.
566          */
567         if (!priv->static_config.tables[BLK_IDX_VL_FORWARDING].entry_count)
568                 return;
569
570         table = &priv->static_config.tables[BLK_IDX_VL_FORWARDING_PARAMS];
571         vl_fwd_params = table->entries;
572
573         l2_fwd_params->part_spc[0] -= SJA1105_VL_FRAME_MEMORY;
574         vl_fwd_params->partspc[0] = SJA1105_VL_FRAME_MEMORY;
575 }
576
577 /* SJA1110 TDMACONFIGIDX values:
578  *
579  *      | 100 Mbps ports |  1Gbps ports  | 2.5Gbps ports | Disabled ports
580  * -----+----------------+---------------+---------------+---------------
581  *   0  |   0, [5:10]    |     [1:2]     |     [3:4]     |     retag
582  *   1  |0, [5:10], retag|     [1:2]     |     [3:4]     |       -
583  *   2  |   0, [5:10]    |  [1:3], retag |       4       |       -
584  *   3  |   0, [5:10]    |[1:2], 4, retag|       3       |       -
585  *   4  |  0, 2, [5:10]  |    1, retag   |     [3:4]     |       -
586  *   5  |  0, 1, [5:10]  |    2, retag   |     [3:4]     |       -
587  *  14  |   0, [5:10]    | [1:4], retag  |       -       |       -
588  *  15  |     [5:10]     | [0:4], retag  |       -       |       -
589  */
590 static void sja1110_select_tdmaconfigidx(struct sja1105_private *priv)
591 {
592         struct sja1105_general_params_entry *general_params;
593         struct sja1105_table *table;
594         bool port_1_is_base_tx;
595         bool port_3_is_2500;
596         bool port_4_is_2500;
597         u64 tdmaconfigidx;
598
599         if (priv->info->device_id != SJA1110_DEVICE_ID)
600                 return;
601
602         table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
603         general_params = table->entries;
604
605         /* All the settings below are "as opposed to SGMII", which is the
606          * other pinmuxing option.
607          */
608         port_1_is_base_tx = priv->phy_mode[1] == PHY_INTERFACE_MODE_INTERNAL;
609         port_3_is_2500 = priv->phy_mode[3] == PHY_INTERFACE_MODE_2500BASEX;
610         port_4_is_2500 = priv->phy_mode[4] == PHY_INTERFACE_MODE_2500BASEX;
611
612         if (port_1_is_base_tx)
613                 /* Retagging port will operate at 1 Gbps */
614                 tdmaconfigidx = 5;
615         else if (port_3_is_2500 && port_4_is_2500)
616                 /* Retagging port will operate at 100 Mbps */
617                 tdmaconfigidx = 1;
618         else if (port_3_is_2500)
619                 /* Retagging port will operate at 1 Gbps */
620                 tdmaconfigidx = 3;
621         else if (port_4_is_2500)
622                 /* Retagging port will operate at 1 Gbps */
623                 tdmaconfigidx = 2;
624         else
625                 /* Retagging port will operate at 1 Gbps */
626                 tdmaconfigidx = 14;
627
628         general_params->tdmaconfigidx = tdmaconfigidx;
629 }
630
631 static int sja1105_init_general_params(struct sja1105_private *priv)
632 {
633         struct sja1105_general_params_entry default_general_params = {
634                 /* Allow dynamic changing of the mirror port */
635                 .mirr_ptacu = true,
636                 .switchid = priv->ds->index,
637                 /* Priority queue for link-local management frames
638                  * (both ingress to and egress from CPU - PTP, STP etc)
639                  */
640                 .hostprio = 7,
641                 .mac_fltres1 = SJA1105_LINKLOCAL_FILTER_A,
642                 .mac_flt1    = SJA1105_LINKLOCAL_FILTER_A_MASK,
643                 .incl_srcpt1 = false,
644                 .send_meta1  = false,
645                 .mac_fltres0 = SJA1105_LINKLOCAL_FILTER_B,
646                 .mac_flt0    = SJA1105_LINKLOCAL_FILTER_B_MASK,
647                 .incl_srcpt0 = false,
648                 .send_meta0  = false,
649                 /* The destination for traffic matching mac_fltres1 and
650                  * mac_fltres0 on all ports except host_port. Such traffic
651                  * receieved on host_port itself would be dropped, except
652                  * by installing a temporary 'management route'
653                  */
654                 .host_port = priv->ds->num_ports,
655                 /* Default to an invalid value */
656                 .mirr_port = priv->ds->num_ports,
657                 /* Link-local traffic received on casc_port will be forwarded
658                  * to host_port without embedding the source port and device ID
659                  * info in the destination MAC address (presumably because it
660                  * is a cascaded port and a downstream SJA switch already did
661                  * that). Default to an invalid port (to disable the feature)
662                  * and overwrite this if we find any DSA (cascaded) ports.
663                  */
664                 .casc_port = priv->ds->num_ports,
665                 /* No TTEthernet */
666                 .vllupformat = SJA1105_VL_FORMAT_PSFP,
667                 .vlmarker = 0,
668                 .vlmask = 0,
669                 /* Only update correctionField for 1-step PTP (L2 transport) */
670                 .ignore2stf = 0,
671                 /* Forcefully disable VLAN filtering by telling
672                  * the switch that VLAN has a different EtherType.
673                  */
674                 .tpid = ETH_P_SJA1105,
675                 .tpid2 = ETH_P_SJA1105,
676         };
677         struct dsa_switch *ds = priv->ds;
678         struct sja1105_table *table;
679         int port;
680
681         for (port = 0; port < ds->num_ports; port++) {
682                 if (dsa_is_cpu_port(ds, port)) {
683                         default_general_params.host_port = port;
684                         break;
685                 }
686         }
687
688         table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
689
690         if (table->entry_count) {
691                 kfree(table->entries);
692                 table->entry_count = 0;
693         }
694
695         table->entries = kcalloc(table->ops->max_entry_count,
696                                  table->ops->unpacked_entry_size, GFP_KERNEL);
697         if (!table->entries)
698                 return -ENOMEM;
699
700         table->entry_count = table->ops->max_entry_count;
701
702         /* This table only has a single entry */
703         ((struct sja1105_general_params_entry *)table->entries)[0] =
704                                 default_general_params;
705
706         sja1110_select_tdmaconfigidx(priv);
707
708         return 0;
709 }
710
711 static int sja1105_init_avb_params(struct sja1105_private *priv)
712 {
713         struct sja1105_avb_params_entry *avb;
714         struct sja1105_table *table;
715
716         table = &priv->static_config.tables[BLK_IDX_AVB_PARAMS];
717
718         /* Discard previous AVB Parameters Table */
719         if (table->entry_count) {
720                 kfree(table->entries);
721                 table->entry_count = 0;
722         }
723
724         table->entries = kcalloc(table->ops->max_entry_count,
725                                  table->ops->unpacked_entry_size, GFP_KERNEL);
726         if (!table->entries)
727                 return -ENOMEM;
728
729         table->entry_count = table->ops->max_entry_count;
730
731         avb = table->entries;
732
733         /* Configure the MAC addresses for meta frames */
734         avb->destmeta = SJA1105_META_DMAC;
735         avb->srcmeta  = SJA1105_META_SMAC;
736         /* On P/Q/R/S, configure the direction of the PTP_CLK pin as input by
737          * default. This is because there might be boards with a hardware
738          * layout where enabling the pin as output might cause an electrical
739          * clash. On E/T the pin is always an output, which the board designers
740          * probably already knew, so even if there are going to be electrical
741          * issues, there's nothing we can do.
742          */
743         avb->cas_master = false;
744
745         return 0;
746 }
747
748 /* The L2 policing table is 2-stage. The table is looked up for each frame
749  * according to the ingress port, whether it was broadcast or not, and the
750  * classified traffic class (given by VLAN PCP). This portion of the lookup is
751  * fixed, and gives access to the SHARINDX, an indirection register pointing
752  * within the policing table itself, which is used to resolve the policer that
753  * will be used for this frame.
754  *
755  *  Stage 1                              Stage 2
756  * +------------+--------+              +---------------------------------+
757  * |Port 0 TC 0 |SHARINDX|              | Policer 0: Rate, Burst, MTU     |
758  * +------------+--------+              +---------------------------------+
759  * |Port 0 TC 1 |SHARINDX|              | Policer 1: Rate, Burst, MTU     |
760  * +------------+--------+              +---------------------------------+
761  *    ...                               | Policer 2: Rate, Burst, MTU     |
762  * +------------+--------+              +---------------------------------+
763  * |Port 0 TC 7 |SHARINDX|              | Policer 3: Rate, Burst, MTU     |
764  * +------------+--------+              +---------------------------------+
765  * |Port 1 TC 0 |SHARINDX|              | Policer 4: Rate, Burst, MTU     |
766  * +------------+--------+              +---------------------------------+
767  *    ...                               | Policer 5: Rate, Burst, MTU     |
768  * +------------+--------+              +---------------------------------+
769  * |Port 1 TC 7 |SHARINDX|              | Policer 6: Rate, Burst, MTU     |
770  * +------------+--------+              +---------------------------------+
771  *    ...                               | Policer 7: Rate, Burst, MTU     |
772  * +------------+--------+              +---------------------------------+
773  * |Port 4 TC 7 |SHARINDX|                 ...
774  * +------------+--------+
775  * |Port 0 BCAST|SHARINDX|                 ...
776  * +------------+--------+
777  * |Port 1 BCAST|SHARINDX|                 ...
778  * +------------+--------+
779  *    ...                                  ...
780  * +------------+--------+              +---------------------------------+
781  * |Port 4 BCAST|SHARINDX|              | Policer 44: Rate, Burst, MTU    |
782  * +------------+--------+              +---------------------------------+
783  *
784  * In this driver, we shall use policers 0-4 as statically alocated port
785  * (matchall) policers. So we need to make the SHARINDX for all lookups
786  * corresponding to this ingress port (8 VLAN PCP lookups and 1 broadcast
787  * lookup) equal.
788  * The remaining policers (40) shall be dynamically allocated for flower
789  * policers, where the key is either vlan_prio or dst_mac ff:ff:ff:ff:ff:ff.
790  */
791 #define SJA1105_RATE_MBPS(speed) (((speed) * 64000) / 1000)
792
793 static int sja1105_init_l2_policing(struct sja1105_private *priv)
794 {
795         struct sja1105_l2_policing_entry *policing;
796         struct dsa_switch *ds = priv->ds;
797         struct sja1105_table *table;
798         int port, tc;
799
800         table = &priv->static_config.tables[BLK_IDX_L2_POLICING];
801
802         /* Discard previous L2 Policing Table */
803         if (table->entry_count) {
804                 kfree(table->entries);
805                 table->entry_count = 0;
806         }
807
808         table->entries = kcalloc(table->ops->max_entry_count,
809                                  table->ops->unpacked_entry_size, GFP_KERNEL);
810         if (!table->entries)
811                 return -ENOMEM;
812
813         table->entry_count = table->ops->max_entry_count;
814
815         policing = table->entries;
816
817         /* Setup shared indices for the matchall policers */
818         for (port = 0; port < ds->num_ports; port++) {
819                 int mcast = (ds->num_ports * (SJA1105_NUM_TC + 1)) + port;
820                 int bcast = (ds->num_ports * SJA1105_NUM_TC) + port;
821
822                 for (tc = 0; tc < SJA1105_NUM_TC; tc++)
823                         policing[port * SJA1105_NUM_TC + tc].sharindx = port;
824
825                 policing[bcast].sharindx = port;
826                 /* Only SJA1110 has multicast policers */
827                 if (mcast <= table->ops->max_entry_count)
828                         policing[mcast].sharindx = port;
829         }
830
831         /* Setup the matchall policer parameters */
832         for (port = 0; port < ds->num_ports; port++) {
833                 int mtu = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN;
834
835                 if (dsa_is_cpu_port(priv->ds, port))
836                         mtu += VLAN_HLEN;
837
838                 policing[port].smax = 65535; /* Burst size in bytes */
839                 policing[port].rate = SJA1105_RATE_MBPS(1000);
840                 policing[port].maxlen = mtu;
841                 policing[port].partition = 0;
842         }
843
844         return 0;
845 }
846
847 static int sja1105_static_config_load(struct sja1105_private *priv)
848 {
849         int rc;
850
851         sja1105_static_config_free(&priv->static_config);
852         rc = sja1105_static_config_init(&priv->static_config,
853                                         priv->info->static_ops,
854                                         priv->info->device_id);
855         if (rc)
856                 return rc;
857
858         /* Build static configuration */
859         rc = sja1105_init_mac_settings(priv);
860         if (rc < 0)
861                 return rc;
862         rc = sja1105_init_mii_settings(priv);
863         if (rc < 0)
864                 return rc;
865         rc = sja1105_init_static_fdb(priv);
866         if (rc < 0)
867                 return rc;
868         rc = sja1105_init_static_vlan(priv);
869         if (rc < 0)
870                 return rc;
871         rc = sja1105_init_l2_lookup_params(priv);
872         if (rc < 0)
873                 return rc;
874         rc = sja1105_init_l2_forwarding(priv);
875         if (rc < 0)
876                 return rc;
877         rc = sja1105_init_l2_forwarding_params(priv);
878         if (rc < 0)
879                 return rc;
880         rc = sja1105_init_l2_policing(priv);
881         if (rc < 0)
882                 return rc;
883         rc = sja1105_init_general_params(priv);
884         if (rc < 0)
885                 return rc;
886         rc = sja1105_init_avb_params(priv);
887         if (rc < 0)
888                 return rc;
889         rc = sja1110_init_pcp_remapping(priv);
890         if (rc < 0)
891                 return rc;
892
893         /* Send initial configuration to hardware via SPI */
894         return sja1105_static_config_upload(priv);
895 }
896
897 static int sja1105_parse_rgmii_delays(struct sja1105_private *priv)
898 {
899         struct dsa_switch *ds = priv->ds;
900         int port;
901
902         for (port = 0; port < ds->num_ports; port++) {
903                 if (!priv->fixed_link[port])
904                         continue;
905
906                 if (priv->phy_mode[port] == PHY_INTERFACE_MODE_RGMII_RXID ||
907                     priv->phy_mode[port] == PHY_INTERFACE_MODE_RGMII_ID)
908                         priv->rgmii_rx_delay[port] = true;
909
910                 if (priv->phy_mode[port] == PHY_INTERFACE_MODE_RGMII_TXID ||
911                     priv->phy_mode[port] == PHY_INTERFACE_MODE_RGMII_ID)
912                         priv->rgmii_tx_delay[port] = true;
913
914                 if ((priv->rgmii_rx_delay[port] || priv->rgmii_tx_delay[port]) &&
915                     !priv->info->setup_rgmii_delay)
916                         return -EINVAL;
917         }
918         return 0;
919 }
920
921 static int sja1105_parse_ports_node(struct sja1105_private *priv,
922                                     struct device_node *ports_node)
923 {
924         struct device *dev = &priv->spidev->dev;
925         struct device_node *child;
926
927         for_each_available_child_of_node(ports_node, child) {
928                 struct device_node *phy_node;
929                 phy_interface_t phy_mode;
930                 u32 index;
931                 int err;
932
933                 /* Get switch port number from DT */
934                 if (of_property_read_u32(child, "reg", &index) < 0) {
935                         dev_err(dev, "Port number not defined in device tree "
936                                 "(property \"reg\")\n");
937                         of_node_put(child);
938                         return -ENODEV;
939                 }
940
941                 /* Get PHY mode from DT */
942                 err = of_get_phy_mode(child, &phy_mode);
943                 if (err) {
944                         dev_err(dev, "Failed to read phy-mode or "
945                                 "phy-interface-type property for port %d\n",
946                                 index);
947                         of_node_put(child);
948                         return -ENODEV;
949                 }
950
951                 phy_node = of_parse_phandle(child, "phy-handle", 0);
952                 if (!phy_node) {
953                         if (!of_phy_is_fixed_link(child)) {
954                                 dev_err(dev, "phy-handle or fixed-link "
955                                         "properties missing!\n");
956                                 of_node_put(child);
957                                 return -ENODEV;
958                         }
959                         /* phy-handle is missing, but fixed-link isn't.
960                          * So it's a fixed link. Default to PHY role.
961                          */
962                         priv->fixed_link[index] = true;
963                 } else {
964                         of_node_put(phy_node);
965                 }
966
967                 priv->phy_mode[index] = phy_mode;
968         }
969
970         return 0;
971 }
972
973 static int sja1105_parse_dt(struct sja1105_private *priv)
974 {
975         struct device *dev = &priv->spidev->dev;
976         struct device_node *switch_node = dev->of_node;
977         struct device_node *ports_node;
978         int rc;
979
980         ports_node = of_get_child_by_name(switch_node, "ports");
981         if (!ports_node)
982                 ports_node = of_get_child_by_name(switch_node, "ethernet-ports");
983         if (!ports_node) {
984                 dev_err(dev, "Incorrect bindings: absent \"ports\" node\n");
985                 return -ENODEV;
986         }
987
988         rc = sja1105_parse_ports_node(priv, ports_node);
989         of_node_put(ports_node);
990
991         return rc;
992 }
993
994 static int sja1105_sgmii_read(struct sja1105_private *priv, int port, int mmd,
995                               int pcs_reg)
996 {
997         u64 addr = (mmd << 16) | pcs_reg;
998         u32 val;
999         int rc;
1000
1001         if (port != SJA1105_SGMII_PORT)
1002                 return -ENODEV;
1003
1004         rc = sja1105_xfer_u32(priv, SPI_READ, addr, &val, NULL);
1005         if (rc < 0)
1006                 return rc;
1007
1008         return val;
1009 }
1010
1011 static int sja1105_sgmii_write(struct sja1105_private *priv, int port, int mmd,
1012                                int pcs_reg, u16 pcs_val)
1013 {
1014         u64 addr = (mmd << 16) | pcs_reg;
1015         u32 val = pcs_val;
1016         int rc;
1017
1018         if (port != SJA1105_SGMII_PORT)
1019                 return -ENODEV;
1020
1021         rc = sja1105_xfer_u32(priv, SPI_WRITE, addr, &val, NULL);
1022         if (rc < 0)
1023                 return rc;
1024
1025         return val;
1026 }
1027
1028 static void sja1105_sgmii_pcs_config(struct sja1105_private *priv, int port,
1029                                      bool an_enabled, bool an_master)
1030 {
1031         u16 ac = SJA1105_AC_AUTONEG_MODE_SGMII;
1032
1033         /* DIGITAL_CONTROL_1: Enable vendor-specific MMD1, allow the PHY to
1034          * stop the clock during LPI mode, make the MAC reconfigure
1035          * autonomously after PCS autoneg is done, flush the internal FIFOs.
1036          */
1037         sja1105_sgmii_write(priv, port, MDIO_MMD_VEND2, SJA1105_DC1,
1038                             SJA1105_DC1_EN_VSMMD1 |
1039                             SJA1105_DC1_CLOCK_STOP_EN |
1040                             SJA1105_DC1_MAC_AUTO_SW |
1041                             SJA1105_DC1_INIT);
1042         /* DIGITAL_CONTROL_2: No polarity inversion for TX and RX lanes */
1043         sja1105_sgmii_write(priv, port, MDIO_MMD_VEND2, SJA1105_DC2,
1044                             SJA1105_DC2_TX_POL_INV_DISABLE);
1045         /* AUTONEG_CONTROL: Use SGMII autoneg */
1046         if (an_master)
1047                 ac |= SJA1105_AC_PHY_MODE | SJA1105_AC_SGMII_LINK;
1048         sja1105_sgmii_write(priv, port, MDIO_MMD_VEND2, SJA1105_AC, ac);
1049         /* BASIC_CONTROL: enable in-band AN now, if requested. Otherwise,
1050          * sja1105_sgmii_pcs_force_speed must be called later for the link
1051          * to become operational.
1052          */
1053         if (an_enabled)
1054                 sja1105_sgmii_write(priv, port, MDIO_MMD_VEND2, MDIO_CTRL1,
1055                                     BMCR_ANENABLE | BMCR_ANRESTART);
1056 }
1057
1058 static void sja1105_sgmii_pcs_force_speed(struct sja1105_private *priv,
1059                                           int port, int speed)
1060 {
1061         int pcs_speed;
1062
1063         switch (speed) {
1064         case SPEED_1000:
1065                 pcs_speed = BMCR_SPEED1000;
1066                 break;
1067         case SPEED_100:
1068                 pcs_speed = BMCR_SPEED100;
1069                 break;
1070         case SPEED_10:
1071                 pcs_speed = BMCR_SPEED10;
1072                 break;
1073         default:
1074                 dev_err(priv->ds->dev, "Invalid speed %d\n", speed);
1075                 return;
1076         }
1077         sja1105_sgmii_write(priv, port, MDIO_MMD_VEND2, MDIO_CTRL1,
1078                             pcs_speed | BMCR_FULLDPLX);
1079 }
1080
1081 /* Convert link speed from SJA1105 to ethtool encoding */
1082 static int sja1105_port_speed_to_ethtool(struct sja1105_private *priv,
1083                                          u64 speed)
1084 {
1085         if (speed == priv->info->port_speed[SJA1105_SPEED_10MBPS])
1086                 return SPEED_10;
1087         if (speed == priv->info->port_speed[SJA1105_SPEED_100MBPS])
1088                 return SPEED_100;
1089         if (speed == priv->info->port_speed[SJA1105_SPEED_1000MBPS])
1090                 return SPEED_1000;
1091         if (speed == priv->info->port_speed[SJA1105_SPEED_2500MBPS])
1092                 return SPEED_2500;
1093         return SPEED_UNKNOWN;
1094 }
1095
1096 /* Set link speed in the MAC configuration for a specific port. */
1097 static int sja1105_adjust_port_config(struct sja1105_private *priv, int port,
1098                                       int speed_mbps)
1099 {
1100         struct sja1105_mac_config_entry *mac;
1101         struct device *dev = priv->ds->dev;
1102         u64 speed;
1103         int rc;
1104
1105         /* On P/Q/R/S, one can read from the device via the MAC reconfiguration
1106          * tables. On E/T, MAC reconfig tables are not readable, only writable.
1107          * We have to *know* what the MAC looks like.  For the sake of keeping
1108          * the code common, we'll use the static configuration tables as a
1109          * reasonable approximation for both E/T and P/Q/R/S.
1110          */
1111         mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
1112
1113         switch (speed_mbps) {
1114         case SPEED_UNKNOWN:
1115                 /* PHYLINK called sja1105_mac_config() to inform us about
1116                  * the state->interface, but AN has not completed and the
1117                  * speed is not yet valid. UM10944.pdf says that setting
1118                  * SJA1105_SPEED_AUTO at runtime disables the port, so that is
1119                  * ok for power consumption in case AN will never complete -
1120                  * otherwise PHYLINK should come back with a new update.
1121                  */
1122                 speed = priv->info->port_speed[SJA1105_SPEED_AUTO];
1123                 break;
1124         case SPEED_10:
1125                 speed = priv->info->port_speed[SJA1105_SPEED_10MBPS];
1126                 break;
1127         case SPEED_100:
1128                 speed = priv->info->port_speed[SJA1105_SPEED_100MBPS];
1129                 break;
1130         case SPEED_1000:
1131                 speed = priv->info->port_speed[SJA1105_SPEED_1000MBPS];
1132                 break;
1133         default:
1134                 dev_err(dev, "Invalid speed %iMbps\n", speed_mbps);
1135                 return -EINVAL;
1136         }
1137
1138         /* Overwrite SJA1105_SPEED_AUTO from the static MAC configuration
1139          * table, since this will be used for the clocking setup, and we no
1140          * longer need to store it in the static config (already told hardware
1141          * we want auto during upload phase).
1142          * Actually for the SGMII port, the MAC is fixed at 1 Gbps and
1143          * we need to configure the PCS only (if even that).
1144          */
1145         if (priv->phy_mode[port] == PHY_INTERFACE_MODE_SGMII)
1146                 mac[port].speed = priv->info->port_speed[SJA1105_SPEED_1000MBPS];
1147         else
1148                 mac[port].speed = speed;
1149
1150         /* Write to the dynamic reconfiguration tables */
1151         rc = sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
1152                                           &mac[port], true);
1153         if (rc < 0) {
1154                 dev_err(dev, "Failed to write MAC config: %d\n", rc);
1155                 return rc;
1156         }
1157
1158         /* Reconfigure the PLLs for the RGMII interfaces (required 125 MHz at
1159          * gigabit, 25 MHz at 100 Mbps and 2.5 MHz at 10 Mbps). For MII and
1160          * RMII no change of the clock setup is required. Actually, changing
1161          * the clock setup does interrupt the clock signal for a certain time
1162          * which causes trouble for all PHYs relying on this signal.
1163          */
1164         if (!phy_interface_mode_is_rgmii(priv->phy_mode[port]))
1165                 return 0;
1166
1167         return sja1105_clocking_setup_port(priv, port);
1168 }
1169
1170 /* The SJA1105 MAC programming model is through the static config (the xMII
1171  * Mode table cannot be dynamically reconfigured), and we have to program
1172  * that early (earlier than PHYLINK calls us, anyway).
1173  * So just error out in case the connected PHY attempts to change the initial
1174  * system interface MII protocol from what is defined in the DT, at least for
1175  * now.
1176  */
1177 static bool sja1105_phy_mode_mismatch(struct sja1105_private *priv, int port,
1178                                       phy_interface_t interface)
1179 {
1180         return priv->phy_mode[port] != interface;
1181 }
1182
1183 static void sja1105_mac_config(struct dsa_switch *ds, int port,
1184                                unsigned int mode,
1185                                const struct phylink_link_state *state)
1186 {
1187         struct sja1105_private *priv = ds->priv;
1188         bool is_sgmii;
1189
1190         is_sgmii = (state->interface == PHY_INTERFACE_MODE_SGMII);
1191
1192         if (sja1105_phy_mode_mismatch(priv, port, state->interface)) {
1193                 dev_err(ds->dev, "Changing PHY mode to %s not supported!\n",
1194                         phy_modes(state->interface));
1195                 return;
1196         }
1197
1198         if (phylink_autoneg_inband(mode) && !is_sgmii) {
1199                 dev_err(ds->dev, "In-band AN not supported!\n");
1200                 return;
1201         }
1202
1203         if (is_sgmii)
1204                 sja1105_sgmii_pcs_config(priv, port,
1205                                          phylink_autoneg_inband(mode),
1206                                          false);
1207 }
1208
1209 static void sja1105_mac_link_down(struct dsa_switch *ds, int port,
1210                                   unsigned int mode,
1211                                   phy_interface_t interface)
1212 {
1213         sja1105_inhibit_tx(ds->priv, BIT(port), true);
1214 }
1215
1216 static void sja1105_mac_link_up(struct dsa_switch *ds, int port,
1217                                 unsigned int mode,
1218                                 phy_interface_t interface,
1219                                 struct phy_device *phydev,
1220                                 int speed, int duplex,
1221                                 bool tx_pause, bool rx_pause)
1222 {
1223         struct sja1105_private *priv = ds->priv;
1224
1225         sja1105_adjust_port_config(priv, port, speed);
1226
1227         if (priv->phy_mode[port] == PHY_INTERFACE_MODE_SGMII &&
1228             !phylink_autoneg_inband(mode))
1229                 sja1105_sgmii_pcs_force_speed(priv, port, speed);
1230
1231         sja1105_inhibit_tx(priv, BIT(port), false);
1232 }
1233
1234 static void sja1105_phylink_validate(struct dsa_switch *ds, int port,
1235                                      unsigned long *supported,
1236                                      struct phylink_link_state *state)
1237 {
1238         /* Construct a new mask which exhaustively contains all link features
1239          * supported by the MAC, and then apply that (logical AND) to what will
1240          * be sent to the PHY for "marketing".
1241          */
1242         __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
1243         struct sja1105_private *priv = ds->priv;
1244         struct sja1105_xmii_params_entry *mii;
1245
1246         mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries;
1247
1248         /* include/linux/phylink.h says:
1249          *     When @state->interface is %PHY_INTERFACE_MODE_NA, phylink
1250          *     expects the MAC driver to return all supported link modes.
1251          */
1252         if (state->interface != PHY_INTERFACE_MODE_NA &&
1253             sja1105_phy_mode_mismatch(priv, port, state->interface)) {
1254                 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
1255                 return;
1256         }
1257
1258         /* The MAC does not support pause frames, and also doesn't
1259          * support half-duplex traffic modes.
1260          */
1261         phylink_set(mask, Autoneg);
1262         phylink_set(mask, MII);
1263         phylink_set(mask, 10baseT_Full);
1264         phylink_set(mask, 100baseT_Full);
1265         phylink_set(mask, 100baseT1_Full);
1266         if (mii->xmii_mode[port] == XMII_MODE_RGMII ||
1267             mii->xmii_mode[port] == XMII_MODE_SGMII)
1268                 phylink_set(mask, 1000baseT_Full);
1269
1270         bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
1271         bitmap_and(state->advertising, state->advertising, mask,
1272                    __ETHTOOL_LINK_MODE_MASK_NBITS);
1273 }
1274
1275 static int sja1105_mac_pcs_get_state(struct dsa_switch *ds, int port,
1276                                      struct phylink_link_state *state)
1277 {
1278         struct sja1105_private *priv = ds->priv;
1279         int ais;
1280
1281         /* Read the vendor-specific AUTONEG_INTR_STATUS register */
1282         ais = sja1105_sgmii_read(priv, port, MDIO_MMD_VEND2, SJA1105_AIS);
1283         if (ais < 0)
1284                 return ais;
1285
1286         switch (SJA1105_AIS_SPEED(ais)) {
1287         case 0:
1288                 state->speed = SPEED_10;
1289                 break;
1290         case 1:
1291                 state->speed = SPEED_100;
1292                 break;
1293         case 2:
1294                 state->speed = SPEED_1000;
1295                 break;
1296         default:
1297                 dev_err(ds->dev, "Invalid SGMII PCS speed %lu\n",
1298                         SJA1105_AIS_SPEED(ais));
1299         }
1300         state->duplex = SJA1105_AIS_DUPLEX_MODE(ais);
1301         state->an_complete = SJA1105_AIS_COMPLETE(ais);
1302         state->link = SJA1105_AIS_LINK_STATUS(ais);
1303
1304         return 0;
1305 }
1306
1307 static int
1308 sja1105_find_static_fdb_entry(struct sja1105_private *priv, int port,
1309                               const struct sja1105_l2_lookup_entry *requested)
1310 {
1311         struct sja1105_l2_lookup_entry *l2_lookup;
1312         struct sja1105_table *table;
1313         int i;
1314
1315         table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP];
1316         l2_lookup = table->entries;
1317
1318         for (i = 0; i < table->entry_count; i++)
1319                 if (l2_lookup[i].macaddr == requested->macaddr &&
1320                     l2_lookup[i].vlanid == requested->vlanid &&
1321                     l2_lookup[i].destports & BIT(port))
1322                         return i;
1323
1324         return -1;
1325 }
1326
1327 /* We want FDB entries added statically through the bridge command to persist
1328  * across switch resets, which are a common thing during normal SJA1105
1329  * operation. So we have to back them up in the static configuration tables
1330  * and hence apply them on next static config upload... yay!
1331  */
1332 static int
1333 sja1105_static_fdb_change(struct sja1105_private *priv, int port,
1334                           const struct sja1105_l2_lookup_entry *requested,
1335                           bool keep)
1336 {
1337         struct sja1105_l2_lookup_entry *l2_lookup;
1338         struct sja1105_table *table;
1339         int rc, match;
1340
1341         table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP];
1342
1343         match = sja1105_find_static_fdb_entry(priv, port, requested);
1344         if (match < 0) {
1345                 /* Can't delete a missing entry. */
1346                 if (!keep)
1347                         return 0;
1348
1349                 /* No match => new entry */
1350                 rc = sja1105_table_resize(table, table->entry_count + 1);
1351                 if (rc)
1352                         return rc;
1353
1354                 match = table->entry_count - 1;
1355         }
1356
1357         /* Assign pointer after the resize (it may be new memory) */
1358         l2_lookup = table->entries;
1359
1360         /* We have a match.
1361          * If the job was to add this FDB entry, it's already done (mostly
1362          * anyway, since the port forwarding mask may have changed, case in
1363          * which we update it).
1364          * Otherwise we have to delete it.
1365          */
1366         if (keep) {
1367                 l2_lookup[match] = *requested;
1368                 return 0;
1369         }
1370
1371         /* To remove, the strategy is to overwrite the element with
1372          * the last one, and then reduce the array size by 1
1373          */
1374         l2_lookup[match] = l2_lookup[table->entry_count - 1];
1375         return sja1105_table_resize(table, table->entry_count - 1);
1376 }
1377
1378 /* First-generation switches have a 4-way set associative TCAM that
1379  * holds the FDB entries. An FDB index spans from 0 to 1023 and is comprised of
1380  * a "bin" (grouping of 4 entries) and a "way" (an entry within a bin).
1381  * For the placement of a newly learnt FDB entry, the switch selects the bin
1382  * based on a hash function, and the way within that bin incrementally.
1383  */
1384 static int sja1105et_fdb_index(int bin, int way)
1385 {
1386         return bin * SJA1105ET_FDB_BIN_SIZE + way;
1387 }
1388
1389 static int sja1105et_is_fdb_entry_in_bin(struct sja1105_private *priv, int bin,
1390                                          const u8 *addr, u16 vid,
1391                                          struct sja1105_l2_lookup_entry *match,
1392                                          int *last_unused)
1393 {
1394         int way;
1395
1396         for (way = 0; way < SJA1105ET_FDB_BIN_SIZE; way++) {
1397                 struct sja1105_l2_lookup_entry l2_lookup = {0};
1398                 int index = sja1105et_fdb_index(bin, way);
1399
1400                 /* Skip unused entries, optionally marking them
1401                  * into the return value
1402                  */
1403                 if (sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
1404                                                 index, &l2_lookup)) {
1405                         if (last_unused)
1406                                 *last_unused = way;
1407                         continue;
1408                 }
1409
1410                 if (l2_lookup.macaddr == ether_addr_to_u64(addr) &&
1411                     l2_lookup.vlanid == vid) {
1412                         if (match)
1413                                 *match = l2_lookup;
1414                         return way;
1415                 }
1416         }
1417         /* Return an invalid entry index if not found */
1418         return -1;
1419 }
1420
1421 int sja1105et_fdb_add(struct dsa_switch *ds, int port,
1422                       const unsigned char *addr, u16 vid)
1423 {
1424         struct sja1105_l2_lookup_entry l2_lookup = {0};
1425         struct sja1105_private *priv = ds->priv;
1426         struct device *dev = ds->dev;
1427         int last_unused = -1;
1428         int bin, way, rc;
1429
1430         bin = sja1105et_fdb_hash(priv, addr, vid);
1431
1432         way = sja1105et_is_fdb_entry_in_bin(priv, bin, addr, vid,
1433                                             &l2_lookup, &last_unused);
1434         if (way >= 0) {
1435                 /* We have an FDB entry. Is our port in the destination
1436                  * mask? If yes, we need to do nothing. If not, we need
1437                  * to rewrite the entry by adding this port to it.
1438                  */
1439                 if (l2_lookup.destports & BIT(port))
1440                         return 0;
1441                 l2_lookup.destports |= BIT(port);
1442         } else {
1443                 int index = sja1105et_fdb_index(bin, way);
1444
1445                 /* We don't have an FDB entry. We construct a new one and
1446                  * try to find a place for it within the FDB table.
1447                  */
1448                 l2_lookup.macaddr = ether_addr_to_u64(addr);
1449                 l2_lookup.destports = BIT(port);
1450                 l2_lookup.vlanid = vid;
1451
1452                 if (last_unused >= 0) {
1453                         way = last_unused;
1454                 } else {
1455                         /* Bin is full, need to evict somebody.
1456                          * Choose victim at random. If you get these messages
1457                          * often, you may need to consider changing the
1458                          * distribution function:
1459                          * static_config[BLK_IDX_L2_LOOKUP_PARAMS].entries->poly
1460                          */
1461                         get_random_bytes(&way, sizeof(u8));
1462                         way %= SJA1105ET_FDB_BIN_SIZE;
1463                         dev_warn(dev, "Warning, FDB bin %d full while adding entry for %pM. Evicting entry %u.\n",
1464                                  bin, addr, way);
1465                         /* Evict entry */
1466                         sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
1467                                                      index, NULL, false);
1468                 }
1469         }
1470         l2_lookup.index = sja1105et_fdb_index(bin, way);
1471
1472         rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
1473                                           l2_lookup.index, &l2_lookup,
1474                                           true);
1475         if (rc < 0)
1476                 return rc;
1477
1478         return sja1105_static_fdb_change(priv, port, &l2_lookup, true);
1479 }
1480
1481 int sja1105et_fdb_del(struct dsa_switch *ds, int port,
1482                       const unsigned char *addr, u16 vid)
1483 {
1484         struct sja1105_l2_lookup_entry l2_lookup = {0};
1485         struct sja1105_private *priv = ds->priv;
1486         int index, bin, way, rc;
1487         bool keep;
1488
1489         bin = sja1105et_fdb_hash(priv, addr, vid);
1490         way = sja1105et_is_fdb_entry_in_bin(priv, bin, addr, vid,
1491                                             &l2_lookup, NULL);
1492         if (way < 0)
1493                 return 0;
1494         index = sja1105et_fdb_index(bin, way);
1495
1496         /* We have an FDB entry. Is our port in the destination mask? If yes,
1497          * we need to remove it. If the resulting port mask becomes empty, we
1498          * need to completely evict the FDB entry.
1499          * Otherwise we just write it back.
1500          */
1501         l2_lookup.destports &= ~BIT(port);
1502
1503         if (l2_lookup.destports)
1504                 keep = true;
1505         else
1506                 keep = false;
1507
1508         rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
1509                                           index, &l2_lookup, keep);
1510         if (rc < 0)
1511                 return rc;
1512
1513         return sja1105_static_fdb_change(priv, port, &l2_lookup, keep);
1514 }
1515
1516 int sja1105pqrs_fdb_add(struct dsa_switch *ds, int port,
1517                         const unsigned char *addr, u16 vid)
1518 {
1519         struct sja1105_l2_lookup_entry l2_lookup = {0};
1520         struct sja1105_private *priv = ds->priv;
1521         int rc, i;
1522
1523         /* Search for an existing entry in the FDB table */
1524         l2_lookup.macaddr = ether_addr_to_u64(addr);
1525         l2_lookup.vlanid = vid;
1526         l2_lookup.iotag = SJA1105_S_TAG;
1527         l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0);
1528         if (priv->vlan_state != SJA1105_VLAN_UNAWARE) {
1529                 l2_lookup.mask_vlanid = VLAN_VID_MASK;
1530                 l2_lookup.mask_iotag = BIT(0);
1531         } else {
1532                 l2_lookup.mask_vlanid = 0;
1533                 l2_lookup.mask_iotag = 0;
1534         }
1535         l2_lookup.destports = BIT(port);
1536
1537         rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
1538                                          SJA1105_SEARCH, &l2_lookup);
1539         if (rc == 0) {
1540                 /* Found and this port is already in the entry's
1541                  * port mask => job done
1542                  */
1543                 if (l2_lookup.destports & BIT(port))
1544                         return 0;
1545                 /* l2_lookup.index is populated by the switch in case it
1546                  * found something.
1547                  */
1548                 l2_lookup.destports |= BIT(port);
1549                 goto skip_finding_an_index;
1550         }
1551
1552         /* Not found, so try to find an unused spot in the FDB.
1553          * This is slightly inefficient because the strategy is knock-knock at
1554          * every possible position from 0 to 1023.
1555          */
1556         for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) {
1557                 rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
1558                                                  i, NULL);
1559                 if (rc < 0)
1560                         break;
1561         }
1562         if (i == SJA1105_MAX_L2_LOOKUP_COUNT) {
1563                 dev_err(ds->dev, "FDB is full, cannot add entry.\n");
1564                 return -EINVAL;
1565         }
1566         l2_lookup.lockeds = true;
1567         l2_lookup.index = i;
1568
1569 skip_finding_an_index:
1570         rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
1571                                           l2_lookup.index, &l2_lookup,
1572                                           true);
1573         if (rc < 0)
1574                 return rc;
1575
1576         return sja1105_static_fdb_change(priv, port, &l2_lookup, true);
1577 }
1578
1579 int sja1105pqrs_fdb_del(struct dsa_switch *ds, int port,
1580                         const unsigned char *addr, u16 vid)
1581 {
1582         struct sja1105_l2_lookup_entry l2_lookup = {0};
1583         struct sja1105_private *priv = ds->priv;
1584         bool keep;
1585         int rc;
1586
1587         l2_lookup.macaddr = ether_addr_to_u64(addr);
1588         l2_lookup.vlanid = vid;
1589         l2_lookup.iotag = SJA1105_S_TAG;
1590         l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0);
1591         if (priv->vlan_state != SJA1105_VLAN_UNAWARE) {
1592                 l2_lookup.mask_vlanid = VLAN_VID_MASK;
1593                 l2_lookup.mask_iotag = BIT(0);
1594         } else {
1595                 l2_lookup.mask_vlanid = 0;
1596                 l2_lookup.mask_iotag = 0;
1597         }
1598         l2_lookup.destports = BIT(port);
1599
1600         rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
1601                                          SJA1105_SEARCH, &l2_lookup);
1602         if (rc < 0)
1603                 return 0;
1604
1605         l2_lookup.destports &= ~BIT(port);
1606
1607         /* Decide whether we remove just this port from the FDB entry,
1608          * or if we remove it completely.
1609          */
1610         if (l2_lookup.destports)
1611                 keep = true;
1612         else
1613                 keep = false;
1614
1615         rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
1616                                           l2_lookup.index, &l2_lookup, keep);
1617         if (rc < 0)
1618                 return rc;
1619
1620         return sja1105_static_fdb_change(priv, port, &l2_lookup, keep);
1621 }
1622
1623 static int sja1105_fdb_add(struct dsa_switch *ds, int port,
1624                            const unsigned char *addr, u16 vid)
1625 {
1626         struct sja1105_private *priv = ds->priv;
1627
1628         /* dsa_8021q is in effect when the bridge's vlan_filtering isn't,
1629          * so the switch still does some VLAN processing internally.
1630          * But Shared VLAN Learning (SVL) is also active, and it will take
1631          * care of autonomous forwarding between the unique pvid's of each
1632          * port.  Here we just make sure that users can't add duplicate FDB
1633          * entries when in this mode - the actual VID doesn't matter except
1634          * for what gets printed in 'bridge fdb show'.  In the case of zero,
1635          * no VID gets printed at all.
1636          */
1637         if (priv->vlan_state != SJA1105_VLAN_FILTERING_FULL)
1638                 vid = 0;
1639
1640         return priv->info->fdb_add_cmd(ds, port, addr, vid);
1641 }
1642
1643 static int sja1105_fdb_del(struct dsa_switch *ds, int port,
1644                            const unsigned char *addr, u16 vid)
1645 {
1646         struct sja1105_private *priv = ds->priv;
1647
1648         if (priv->vlan_state != SJA1105_VLAN_FILTERING_FULL)
1649                 vid = 0;
1650
1651         return priv->info->fdb_del_cmd(ds, port, addr, vid);
1652 }
1653
1654 static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
1655                             dsa_fdb_dump_cb_t *cb, void *data)
1656 {
1657         struct sja1105_private *priv = ds->priv;
1658         struct device *dev = ds->dev;
1659         int i;
1660
1661         for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) {
1662                 struct sja1105_l2_lookup_entry l2_lookup = {0};
1663                 u8 macaddr[ETH_ALEN];
1664                 int rc;
1665
1666                 rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
1667                                                  i, &l2_lookup);
1668                 /* No fdb entry at i, not an issue */
1669                 if (rc == -ENOENT)
1670                         continue;
1671                 if (rc) {
1672                         dev_err(dev, "Failed to dump FDB: %d\n", rc);
1673                         return rc;
1674                 }
1675
1676                 /* FDB dump callback is per port. This means we have to
1677                  * disregard a valid entry if it's not for this port, even if
1678                  * only to revisit it later. This is inefficient because the
1679                  * 1024-sized FDB table needs to be traversed 4 times through
1680                  * SPI during a 'bridge fdb show' command.
1681                  */
1682                 if (!(l2_lookup.destports & BIT(port)))
1683                         continue;
1684
1685                 /* We need to hide the FDB entry for unknown multicast */
1686                 if (l2_lookup.macaddr == SJA1105_UNKNOWN_MULTICAST &&
1687                     l2_lookup.mask_macaddr == SJA1105_UNKNOWN_MULTICAST)
1688                         continue;
1689
1690                 u64_to_ether_addr(l2_lookup.macaddr, macaddr);
1691
1692                 /* We need to hide the dsa_8021q VLANs from the user. */
1693                 if (priv->vlan_state == SJA1105_VLAN_UNAWARE)
1694                         l2_lookup.vlanid = 0;
1695                 cb(macaddr, l2_lookup.vlanid, l2_lookup.lockeds, data);
1696         }
1697         return 0;
1698 }
1699
1700 static int sja1105_mdb_add(struct dsa_switch *ds, int port,
1701                            const struct switchdev_obj_port_mdb *mdb)
1702 {
1703         return sja1105_fdb_add(ds, port, mdb->addr, mdb->vid);
1704 }
1705
1706 static int sja1105_mdb_del(struct dsa_switch *ds, int port,
1707                            const struct switchdev_obj_port_mdb *mdb)
1708 {
1709         return sja1105_fdb_del(ds, port, mdb->addr, mdb->vid);
1710 }
1711
1712 /* Common function for unicast and broadcast flood configuration.
1713  * Flooding is configured between each {ingress, egress} port pair, and since
1714  * the bridge's semantics are those of "egress flooding", it means we must
1715  * enable flooding towards this port from all ingress ports that are in the
1716  * same forwarding domain.
1717  */
1718 static int sja1105_manage_flood_domains(struct sja1105_private *priv)
1719 {
1720         struct sja1105_l2_forwarding_entry *l2_fwd;
1721         struct dsa_switch *ds = priv->ds;
1722         int from, to, rc;
1723
1724         l2_fwd = priv->static_config.tables[BLK_IDX_L2_FORWARDING].entries;
1725
1726         for (from = 0; from < ds->num_ports; from++) {
1727                 u64 fl_domain = 0, bc_domain = 0;
1728
1729                 for (to = 0; to < priv->ds->num_ports; to++) {
1730                         if (!sja1105_can_forward(l2_fwd, from, to))
1731                                 continue;
1732
1733                         if (priv->ucast_egress_floods & BIT(to))
1734                                 fl_domain |= BIT(to);
1735                         if (priv->bcast_egress_floods & BIT(to))
1736                                 bc_domain |= BIT(to);
1737                 }
1738
1739                 /* Nothing changed, nothing to do */
1740                 if (l2_fwd[from].fl_domain == fl_domain &&
1741                     l2_fwd[from].bc_domain == bc_domain)
1742                         continue;
1743
1744                 l2_fwd[from].fl_domain = fl_domain;
1745                 l2_fwd[from].bc_domain = bc_domain;
1746
1747                 rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_FORWARDING,
1748                                                   from, &l2_fwd[from], true);
1749                 if (rc < 0)
1750                         return rc;
1751         }
1752
1753         return 0;
1754 }
1755
1756 static int sja1105_bridge_member(struct dsa_switch *ds, int port,
1757                                  struct net_device *br, bool member)
1758 {
1759         struct sja1105_l2_forwarding_entry *l2_fwd;
1760         struct sja1105_private *priv = ds->priv;
1761         int i, rc;
1762
1763         l2_fwd = priv->static_config.tables[BLK_IDX_L2_FORWARDING].entries;
1764
1765         for (i = 0; i < ds->num_ports; i++) {
1766                 /* Add this port to the forwarding matrix of the
1767                  * other ports in the same bridge, and viceversa.
1768                  */
1769                 if (!dsa_is_user_port(ds, i))
1770                         continue;
1771                 /* For the ports already under the bridge, only one thing needs
1772                  * to be done, and that is to add this port to their
1773                  * reachability domain. So we can perform the SPI write for
1774                  * them immediately. However, for this port itself (the one
1775                  * that is new to the bridge), we need to add all other ports
1776                  * to its reachability domain. So we do that incrementally in
1777                  * this loop, and perform the SPI write only at the end, once
1778                  * the domain contains all other bridge ports.
1779                  */
1780                 if (i == port)
1781                         continue;
1782                 if (dsa_to_port(ds, i)->bridge_dev != br)
1783                         continue;
1784                 sja1105_port_allow_traffic(l2_fwd, i, port, member);
1785                 sja1105_port_allow_traffic(l2_fwd, port, i, member);
1786
1787                 rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_FORWARDING,
1788                                                   i, &l2_fwd[i], true);
1789                 if (rc < 0)
1790                         return rc;
1791         }
1792
1793         rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_FORWARDING,
1794                                           port, &l2_fwd[port], true);
1795         if (rc)
1796                 return rc;
1797
1798         return sja1105_manage_flood_domains(priv);
1799 }
1800
1801 static void sja1105_bridge_stp_state_set(struct dsa_switch *ds, int port,
1802                                          u8 state)
1803 {
1804         struct sja1105_private *priv = ds->priv;
1805         struct sja1105_mac_config_entry *mac;
1806
1807         mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
1808
1809         switch (state) {
1810         case BR_STATE_DISABLED:
1811         case BR_STATE_BLOCKING:
1812                 /* From UM10944 description of DRPDTAG (why put this there?):
1813                  * "Management traffic flows to the port regardless of the state
1814                  * of the INGRESS flag". So BPDUs are still be allowed to pass.
1815                  * At the moment no difference between DISABLED and BLOCKING.
1816                  */
1817                 mac[port].ingress   = false;
1818                 mac[port].egress    = false;
1819                 mac[port].dyn_learn = false;
1820                 break;
1821         case BR_STATE_LISTENING:
1822                 mac[port].ingress   = true;
1823                 mac[port].egress    = false;
1824                 mac[port].dyn_learn = false;
1825                 break;
1826         case BR_STATE_LEARNING:
1827                 mac[port].ingress   = true;
1828                 mac[port].egress    = false;
1829                 mac[port].dyn_learn = !!(priv->learn_ena & BIT(port));
1830                 break;
1831         case BR_STATE_FORWARDING:
1832                 mac[port].ingress   = true;
1833                 mac[port].egress    = true;
1834                 mac[port].dyn_learn = !!(priv->learn_ena & BIT(port));
1835                 break;
1836         default:
1837                 dev_err(ds->dev, "invalid STP state: %d\n", state);
1838                 return;
1839         }
1840
1841         sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
1842                                      &mac[port], true);
1843 }
1844
1845 static int sja1105_bridge_join(struct dsa_switch *ds, int port,
1846                                struct net_device *br)
1847 {
1848         return sja1105_bridge_member(ds, port, br, true);
1849 }
1850
1851 static void sja1105_bridge_leave(struct dsa_switch *ds, int port,
1852                                  struct net_device *br)
1853 {
1854         sja1105_bridge_member(ds, port, br, false);
1855 }
1856
1857 #define BYTES_PER_KBIT (1000LL / 8)
1858
1859 static int sja1105_find_unused_cbs_shaper(struct sja1105_private *priv)
1860 {
1861         int i;
1862
1863         for (i = 0; i < priv->info->num_cbs_shapers; i++)
1864                 if (!priv->cbs[i].idle_slope && !priv->cbs[i].send_slope)
1865                         return i;
1866
1867         return -1;
1868 }
1869
1870 static int sja1105_delete_cbs_shaper(struct sja1105_private *priv, int port,
1871                                      int prio)
1872 {
1873         int i;
1874
1875         for (i = 0; i < priv->info->num_cbs_shapers; i++) {
1876                 struct sja1105_cbs_entry *cbs = &priv->cbs[i];
1877
1878                 if (cbs->port == port && cbs->prio == prio) {
1879                         memset(cbs, 0, sizeof(*cbs));
1880                         return sja1105_dynamic_config_write(priv, BLK_IDX_CBS,
1881                                                             i, cbs, true);
1882                 }
1883         }
1884
1885         return 0;
1886 }
1887
1888 static int sja1105_setup_tc_cbs(struct dsa_switch *ds, int port,
1889                                 struct tc_cbs_qopt_offload *offload)
1890 {
1891         struct sja1105_private *priv = ds->priv;
1892         struct sja1105_cbs_entry *cbs;
1893         int index;
1894
1895         if (!offload->enable)
1896                 return sja1105_delete_cbs_shaper(priv, port, offload->queue);
1897
1898         index = sja1105_find_unused_cbs_shaper(priv);
1899         if (index < 0)
1900                 return -ENOSPC;
1901
1902         cbs = &priv->cbs[index];
1903         cbs->port = port;
1904         cbs->prio = offload->queue;
1905         /* locredit and sendslope are negative by definition. In hardware,
1906          * positive values must be provided, and the negative sign is implicit.
1907          */
1908         cbs->credit_hi = offload->hicredit;
1909         cbs->credit_lo = abs(offload->locredit);
1910         /* User space is in kbits/sec, hardware in bytes/sec */
1911         cbs->idle_slope = offload->idleslope * BYTES_PER_KBIT;
1912         cbs->send_slope = abs(offload->sendslope * BYTES_PER_KBIT);
1913         /* Convert the negative values from 64-bit 2's complement
1914          * to 32-bit 2's complement (for the case of 0x80000000 whose
1915          * negative is still negative).
1916          */
1917         cbs->credit_lo &= GENMASK_ULL(31, 0);
1918         cbs->send_slope &= GENMASK_ULL(31, 0);
1919
1920         return sja1105_dynamic_config_write(priv, BLK_IDX_CBS, index, cbs,
1921                                             true);
1922 }
1923
1924 static int sja1105_reload_cbs(struct sja1105_private *priv)
1925 {
1926         int rc = 0, i;
1927
1928         for (i = 0; i < priv->info->num_cbs_shapers; i++) {
1929                 struct sja1105_cbs_entry *cbs = &priv->cbs[i];
1930
1931                 if (!cbs->idle_slope && !cbs->send_slope)
1932                         continue;
1933
1934                 rc = sja1105_dynamic_config_write(priv, BLK_IDX_CBS, i, cbs,
1935                                                   true);
1936                 if (rc)
1937                         break;
1938         }
1939
1940         return rc;
1941 }
1942
1943 static const char * const sja1105_reset_reasons[] = {
1944         [SJA1105_VLAN_FILTERING] = "VLAN filtering",
1945         [SJA1105_RX_HWTSTAMPING] = "RX timestamping",
1946         [SJA1105_AGEING_TIME] = "Ageing time",
1947         [SJA1105_SCHEDULING] = "Time-aware scheduling",
1948         [SJA1105_BEST_EFFORT_POLICING] = "Best-effort policing",
1949         [SJA1105_VIRTUAL_LINKS] = "Virtual links",
1950 };
1951
1952 /* For situations where we need to change a setting at runtime that is only
1953  * available through the static configuration, resetting the switch in order
1954  * to upload the new static config is unavoidable. Back up the settings we
1955  * modify at runtime (currently only MAC) and restore them after uploading,
1956  * such that this operation is relatively seamless.
1957  */
1958 int sja1105_static_config_reload(struct sja1105_private *priv,
1959                                  enum sja1105_reset_reason reason)
1960 {
1961         struct ptp_system_timestamp ptp_sts_before;
1962         struct ptp_system_timestamp ptp_sts_after;
1963         int speed_mbps[SJA1105_MAX_NUM_PORTS];
1964         u16 bmcr[SJA1105_MAX_NUM_PORTS] = {0};
1965         struct sja1105_mac_config_entry *mac;
1966         struct dsa_switch *ds = priv->ds;
1967         s64 t1, t2, t3, t4;
1968         s64 t12, t34;
1969         int rc, i;
1970         s64 now;
1971
1972         mutex_lock(&priv->mgmt_lock);
1973
1974         mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
1975
1976         /* Back up the dynamic link speed changed by sja1105_adjust_port_config
1977          * in order to temporarily restore it to SJA1105_SPEED_AUTO - which the
1978          * switch wants to see in the static config in order to allow us to
1979          * change it through the dynamic interface later.
1980          */
1981         for (i = 0; i < ds->num_ports; i++) {
1982                 speed_mbps[i] = sja1105_port_speed_to_ethtool(priv,
1983                                                               mac[i].speed);
1984                 mac[i].speed = priv->info->port_speed[SJA1105_SPEED_AUTO];
1985
1986                 if (priv->phy_mode[i] == PHY_INTERFACE_MODE_SGMII)
1987                         bmcr[i] = sja1105_sgmii_read(priv, i,
1988                                                      MDIO_MMD_VEND2,
1989                                                      MDIO_CTRL1);
1990         }
1991
1992         /* No PTP operations can run right now */
1993         mutex_lock(&priv->ptp_data.lock);
1994
1995         rc = __sja1105_ptp_gettimex(ds, &now, &ptp_sts_before);
1996         if (rc < 0)
1997                 goto out_unlock_ptp;
1998
1999         /* Reset switch and send updated static configuration */
2000         rc = sja1105_static_config_upload(priv);
2001         if (rc < 0)
2002                 goto out_unlock_ptp;
2003
2004         rc = __sja1105_ptp_settime(ds, 0, &ptp_sts_after);
2005         if (rc < 0)
2006                 goto out_unlock_ptp;
2007
2008         t1 = timespec64_to_ns(&ptp_sts_before.pre_ts);
2009         t2 = timespec64_to_ns(&ptp_sts_before.post_ts);
2010         t3 = timespec64_to_ns(&ptp_sts_after.pre_ts);
2011         t4 = timespec64_to_ns(&ptp_sts_after.post_ts);
2012         /* Mid point, corresponds to pre-reset PTPCLKVAL */
2013         t12 = t1 + (t2 - t1) / 2;
2014         /* Mid point, corresponds to post-reset PTPCLKVAL, aka 0 */
2015         t34 = t3 + (t4 - t3) / 2;
2016         /* Advance PTPCLKVAL by the time it took since its readout */
2017         now += (t34 - t12);
2018
2019         __sja1105_ptp_adjtime(ds, now);
2020
2021 out_unlock_ptp:
2022         mutex_unlock(&priv->ptp_data.lock);
2023
2024         dev_info(priv->ds->dev,
2025                  "Reset switch and programmed static config. Reason: %s\n",
2026                  sja1105_reset_reasons[reason]);
2027
2028         /* Configure the CGU (PLLs) for MII and RMII PHYs.
2029          * For these interfaces there is no dynamic configuration
2030          * needed, since PLLs have same settings at all speeds.
2031          */
2032         rc = priv->info->clocking_setup(priv);
2033         if (rc < 0)
2034                 goto out;
2035
2036         for (i = 0; i < ds->num_ports; i++) {
2037                 bool an_enabled;
2038
2039                 rc = sja1105_adjust_port_config(priv, i, speed_mbps[i]);
2040                 if (rc < 0)
2041                         goto out;
2042
2043                 if (priv->phy_mode[i] != PHY_INTERFACE_MODE_SGMII)
2044                         continue;
2045
2046                 an_enabled = !!(bmcr[i] & BMCR_ANENABLE);
2047
2048                 sja1105_sgmii_pcs_config(priv, i, an_enabled, false);
2049
2050                 if (!an_enabled) {
2051                         int speed = SPEED_UNKNOWN;
2052
2053                         if (bmcr[i] & BMCR_SPEED1000)
2054                                 speed = SPEED_1000;
2055                         else if (bmcr[i] & BMCR_SPEED100)
2056                                 speed = SPEED_100;
2057                         else
2058                                 speed = SPEED_10;
2059
2060                         sja1105_sgmii_pcs_force_speed(priv, i, speed);
2061                 }
2062         }
2063
2064         rc = sja1105_reload_cbs(priv);
2065         if (rc < 0)
2066                 goto out;
2067 out:
2068         mutex_unlock(&priv->mgmt_lock);
2069
2070         return rc;
2071 }
2072
2073 static int sja1105_pvid_apply(struct sja1105_private *priv, int port, u16 pvid)
2074 {
2075         struct sja1105_mac_config_entry *mac;
2076
2077         mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
2078
2079         mac[port].vlanid = pvid;
2080
2081         return sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
2082                                            &mac[port], true);
2083 }
2084
2085 static int sja1105_crosschip_bridge_join(struct dsa_switch *ds,
2086                                          int tree_index, int sw_index,
2087                                          int other_port, struct net_device *br)
2088 {
2089         struct dsa_switch *other_ds = dsa_switch_find(tree_index, sw_index);
2090         struct sja1105_private *other_priv = other_ds->priv;
2091         struct sja1105_private *priv = ds->priv;
2092         int port, rc;
2093
2094         if (other_ds->ops != &sja1105_switch_ops)
2095                 return 0;
2096
2097         for (port = 0; port < ds->num_ports; port++) {
2098                 if (!dsa_is_user_port(ds, port))
2099                         continue;
2100                 if (dsa_to_port(ds, port)->bridge_dev != br)
2101                         continue;
2102
2103                 rc = dsa_8021q_crosschip_bridge_join(priv->dsa_8021q_ctx,
2104                                                      port,
2105                                                      other_priv->dsa_8021q_ctx,
2106                                                      other_port);
2107                 if (rc)
2108                         return rc;
2109
2110                 rc = dsa_8021q_crosschip_bridge_join(other_priv->dsa_8021q_ctx,
2111                                                      other_port,
2112                                                      priv->dsa_8021q_ctx,
2113                                                      port);
2114                 if (rc)
2115                         return rc;
2116         }
2117
2118         return 0;
2119 }
2120
2121 static void sja1105_crosschip_bridge_leave(struct dsa_switch *ds,
2122                                            int tree_index, int sw_index,
2123                                            int other_port,
2124                                            struct net_device *br)
2125 {
2126         struct dsa_switch *other_ds = dsa_switch_find(tree_index, sw_index);
2127         struct sja1105_private *other_priv = other_ds->priv;
2128         struct sja1105_private *priv = ds->priv;
2129         int port;
2130
2131         if (other_ds->ops != &sja1105_switch_ops)
2132                 return;
2133
2134         for (port = 0; port < ds->num_ports; port++) {
2135                 if (!dsa_is_user_port(ds, port))
2136                         continue;
2137                 if (dsa_to_port(ds, port)->bridge_dev != br)
2138                         continue;
2139
2140                 dsa_8021q_crosschip_bridge_leave(priv->dsa_8021q_ctx, port,
2141                                                  other_priv->dsa_8021q_ctx,
2142                                                  other_port);
2143
2144                 dsa_8021q_crosschip_bridge_leave(other_priv->dsa_8021q_ctx,
2145                                                  other_port,
2146                                                  priv->dsa_8021q_ctx, port);
2147         }
2148 }
2149
2150 static int sja1105_setup_8021q_tagging(struct dsa_switch *ds, bool enabled)
2151 {
2152         struct sja1105_private *priv = ds->priv;
2153         int rc;
2154
2155         rc = dsa_8021q_setup(priv->dsa_8021q_ctx, enabled);
2156         if (rc)
2157                 return rc;
2158
2159         dev_info(ds->dev, "%s switch tagging\n",
2160                  enabled ? "Enabled" : "Disabled");
2161         return 0;
2162 }
2163
2164 static enum dsa_tag_protocol
2165 sja1105_get_tag_protocol(struct dsa_switch *ds, int port,
2166                          enum dsa_tag_protocol mp)
2167 {
2168         return DSA_TAG_PROTO_SJA1105;
2169 }
2170
2171 static int sja1105_find_free_subvlan(u16 *subvlan_map, bool pvid)
2172 {
2173         int subvlan;
2174
2175         if (pvid)
2176                 return 0;
2177
2178         for (subvlan = 1; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++)
2179                 if (subvlan_map[subvlan] == VLAN_N_VID)
2180                         return subvlan;
2181
2182         return -1;
2183 }
2184
2185 static int sja1105_find_subvlan(u16 *subvlan_map, u16 vid)
2186 {
2187         int subvlan;
2188
2189         for (subvlan = 0; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++)
2190                 if (subvlan_map[subvlan] == vid)
2191                         return subvlan;
2192
2193         return -1;
2194 }
2195
2196 static int sja1105_find_committed_subvlan(struct sja1105_private *priv,
2197                                           int port, u16 vid)
2198 {
2199         struct sja1105_port *sp = &priv->ports[port];
2200
2201         return sja1105_find_subvlan(sp->subvlan_map, vid);
2202 }
2203
2204 static void sja1105_init_subvlan_map(u16 *subvlan_map)
2205 {
2206         int subvlan;
2207
2208         for (subvlan = 0; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++)
2209                 subvlan_map[subvlan] = VLAN_N_VID;
2210 }
2211
2212 static void sja1105_commit_subvlan_map(struct sja1105_private *priv, int port,
2213                                        u16 *subvlan_map)
2214 {
2215         struct sja1105_port *sp = &priv->ports[port];
2216         int subvlan;
2217
2218         for (subvlan = 0; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++)
2219                 sp->subvlan_map[subvlan] = subvlan_map[subvlan];
2220 }
2221
2222 static int sja1105_is_vlan_configured(struct sja1105_private *priv, u16 vid)
2223 {
2224         struct sja1105_vlan_lookup_entry *vlan;
2225         int count, i;
2226
2227         vlan = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entries;
2228         count = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entry_count;
2229
2230         for (i = 0; i < count; i++)
2231                 if (vlan[i].vlanid == vid)
2232                         return i;
2233
2234         /* Return an invalid entry index if not found */
2235         return -1;
2236 }
2237
2238 static int
2239 sja1105_find_retagging_entry(struct sja1105_retagging_entry *retagging,
2240                              int count, int from_port, u16 from_vid,
2241                              u16 to_vid)
2242 {
2243         int i;
2244
2245         for (i = 0; i < count; i++)
2246                 if (retagging[i].ing_port == BIT(from_port) &&
2247                     retagging[i].vlan_ing == from_vid &&
2248                     retagging[i].vlan_egr == to_vid)
2249                         return i;
2250
2251         /* Return an invalid entry index if not found */
2252         return -1;
2253 }
2254
2255 static int sja1105_commit_vlans(struct sja1105_private *priv,
2256                                 struct sja1105_vlan_lookup_entry *new_vlan,
2257                                 struct sja1105_retagging_entry *new_retagging,
2258                                 int num_retagging)
2259 {
2260         struct sja1105_retagging_entry *retagging;
2261         struct sja1105_vlan_lookup_entry *vlan;
2262         struct sja1105_table *table;
2263         int num_vlans = 0;
2264         int rc, i, k = 0;
2265
2266         /* VLAN table */
2267         table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
2268         vlan = table->entries;
2269
2270         for (i = 0; i < VLAN_N_VID; i++) {
2271                 int match = sja1105_is_vlan_configured(priv, i);
2272
2273                 if (new_vlan[i].vlanid != VLAN_N_VID)
2274                         num_vlans++;
2275
2276                 if (new_vlan[i].vlanid == VLAN_N_VID && match >= 0) {
2277                         /* Was there before, no longer is. Delete */
2278                         dev_dbg(priv->ds->dev, "Deleting VLAN %d\n", i);
2279                         rc = sja1105_dynamic_config_write(priv,
2280                                                           BLK_IDX_VLAN_LOOKUP,
2281                                                           i, &vlan[match], false);
2282                         if (rc < 0)
2283                                 return rc;
2284                 } else if (new_vlan[i].vlanid != VLAN_N_VID) {
2285                         /* Nothing changed, don't do anything */
2286                         if (match >= 0 &&
2287                             vlan[match].vlanid == new_vlan[i].vlanid &&
2288                             vlan[match].tag_port == new_vlan[i].tag_port &&
2289                             vlan[match].vlan_bc == new_vlan[i].vlan_bc &&
2290                             vlan[match].vmemb_port == new_vlan[i].vmemb_port)
2291                                 continue;
2292                         /* Update entry */
2293                         dev_dbg(priv->ds->dev, "Updating VLAN %d\n", i);
2294                         rc = sja1105_dynamic_config_write(priv,
2295                                                           BLK_IDX_VLAN_LOOKUP,
2296                                                           i, &new_vlan[i],
2297                                                           true);
2298                         if (rc < 0)
2299                                 return rc;
2300                 }
2301         }
2302
2303         if (table->entry_count)
2304                 kfree(table->entries);
2305
2306         table->entries = kcalloc(num_vlans, table->ops->unpacked_entry_size,
2307                                  GFP_KERNEL);
2308         if (!table->entries)
2309                 return -ENOMEM;
2310
2311         table->entry_count = num_vlans;
2312         vlan = table->entries;
2313
2314         for (i = 0; i < VLAN_N_VID; i++) {
2315                 if (new_vlan[i].vlanid == VLAN_N_VID)
2316                         continue;
2317                 vlan[k++] = new_vlan[i];
2318         }
2319
2320         /* VLAN Retagging Table */
2321         table = &priv->static_config.tables[BLK_IDX_RETAGGING];
2322         retagging = table->entries;
2323
2324         for (i = 0; i < table->entry_count; i++) {
2325                 rc = sja1105_dynamic_config_write(priv, BLK_IDX_RETAGGING,
2326                                                   i, &retagging[i], false);
2327                 if (rc)
2328                         return rc;
2329         }
2330
2331         if (table->entry_count)
2332                 kfree(table->entries);
2333
2334         table->entries = kcalloc(num_retagging, table->ops->unpacked_entry_size,
2335                                  GFP_KERNEL);
2336         if (!table->entries)
2337                 return -ENOMEM;
2338
2339         table->entry_count = num_retagging;
2340         retagging = table->entries;
2341
2342         for (i = 0; i < num_retagging; i++) {
2343                 retagging[i] = new_retagging[i];
2344
2345                 /* Update entry */
2346                 rc = sja1105_dynamic_config_write(priv, BLK_IDX_RETAGGING,
2347                                                   i, &retagging[i], true);
2348                 if (rc < 0)
2349                         return rc;
2350         }
2351
2352         return 0;
2353 }
2354
2355 struct sja1105_crosschip_vlan {
2356         struct list_head list;
2357         u16 vid;
2358         bool untagged;
2359         int port;
2360         int other_port;
2361         struct dsa_8021q_context *other_ctx;
2362 };
2363
2364 struct sja1105_crosschip_switch {
2365         struct list_head list;
2366         struct dsa_8021q_context *other_ctx;
2367 };
2368
2369 static int sja1105_commit_pvid(struct sja1105_private *priv)
2370 {
2371         struct sja1105_bridge_vlan *v;
2372         struct list_head *vlan_list;
2373         int rc = 0;
2374
2375         if (priv->vlan_state == SJA1105_VLAN_FILTERING_FULL)
2376                 vlan_list = &priv->bridge_vlans;
2377         else
2378                 vlan_list = &priv->dsa_8021q_vlans;
2379
2380         list_for_each_entry(v, vlan_list, list) {
2381                 if (v->pvid) {
2382                         rc = sja1105_pvid_apply(priv, v->port, v->vid);
2383                         if (rc)
2384                                 break;
2385                 }
2386         }
2387
2388         return rc;
2389 }
2390
2391 static int
2392 sja1105_build_bridge_vlans(struct sja1105_private *priv,
2393                            struct sja1105_vlan_lookup_entry *new_vlan)
2394 {
2395         struct sja1105_bridge_vlan *v;
2396
2397         if (priv->vlan_state == SJA1105_VLAN_UNAWARE)
2398                 return 0;
2399
2400         list_for_each_entry(v, &priv->bridge_vlans, list) {
2401                 int match = v->vid;
2402
2403                 new_vlan[match].vlanid = v->vid;
2404                 new_vlan[match].vmemb_port |= BIT(v->port);
2405                 new_vlan[match].vlan_bc |= BIT(v->port);
2406                 if (!v->untagged)
2407                         new_vlan[match].tag_port |= BIT(v->port);
2408                 new_vlan[match].type_entry = SJA1110_VLAN_D_TAG;
2409         }
2410
2411         return 0;
2412 }
2413
2414 static int
2415 sja1105_build_dsa_8021q_vlans(struct sja1105_private *priv,
2416                               struct sja1105_vlan_lookup_entry *new_vlan)
2417 {
2418         struct sja1105_bridge_vlan *v;
2419
2420         if (priv->vlan_state == SJA1105_VLAN_FILTERING_FULL)
2421                 return 0;
2422
2423         list_for_each_entry(v, &priv->dsa_8021q_vlans, list) {
2424                 int match = v->vid;
2425
2426                 new_vlan[match].vlanid = v->vid;
2427                 new_vlan[match].vmemb_port |= BIT(v->port);
2428                 new_vlan[match].vlan_bc |= BIT(v->port);
2429                 if (!v->untagged)
2430                         new_vlan[match].tag_port |= BIT(v->port);
2431                 new_vlan[match].type_entry = SJA1110_VLAN_D_TAG;
2432         }
2433
2434         return 0;
2435 }
2436
2437 static int sja1105_build_subvlans(struct sja1105_private *priv,
2438                                   u16 subvlan_map[][DSA_8021Q_N_SUBVLAN],
2439                                   struct sja1105_vlan_lookup_entry *new_vlan,
2440                                   struct sja1105_retagging_entry *new_retagging,
2441                                   int *num_retagging)
2442 {
2443         struct sja1105_bridge_vlan *v;
2444         int k = *num_retagging;
2445
2446         if (priv->vlan_state != SJA1105_VLAN_BEST_EFFORT)
2447                 return 0;
2448
2449         list_for_each_entry(v, &priv->bridge_vlans, list) {
2450                 int upstream = dsa_upstream_port(priv->ds, v->port);
2451                 int match, subvlan;
2452                 u16 rx_vid;
2453
2454                 /* Only sub-VLANs on user ports need to be applied.
2455                  * Bridge VLANs also include VLANs added automatically
2456                  * by DSA on the CPU port.
2457                  */
2458                 if (!dsa_is_user_port(priv->ds, v->port))
2459                         continue;
2460
2461                 subvlan = sja1105_find_subvlan(subvlan_map[v->port],
2462                                                v->vid);
2463                 if (subvlan < 0) {
2464                         subvlan = sja1105_find_free_subvlan(subvlan_map[v->port],
2465                                                             v->pvid);
2466                         if (subvlan < 0) {
2467                                 dev_err(priv->ds->dev, "No more free subvlans\n");
2468                                 return -ENOSPC;
2469                         }
2470                 }
2471
2472                 rx_vid = dsa_8021q_rx_vid_subvlan(priv->ds, v->port, subvlan);
2473
2474                 /* @v->vid on @v->port needs to be retagged to @rx_vid
2475                  * on @upstream. Assume @v->vid on @v->port and on
2476                  * @upstream was already configured by the previous
2477                  * iteration over bridge_vlans.
2478                  */
2479                 match = rx_vid;
2480                 new_vlan[match].vlanid = rx_vid;
2481                 new_vlan[match].vmemb_port |= BIT(v->port);
2482                 new_vlan[match].vmemb_port |= BIT(upstream);
2483                 new_vlan[match].vlan_bc |= BIT(v->port);
2484                 new_vlan[match].vlan_bc |= BIT(upstream);
2485                 /* The "untagged" flag is set the same as for the
2486                  * original VLAN
2487                  */
2488                 if (!v->untagged)
2489                         new_vlan[match].tag_port |= BIT(v->port);
2490                 /* But it's always tagged towards the CPU */
2491                 new_vlan[match].tag_port |= BIT(upstream);
2492                 new_vlan[match].type_entry = SJA1110_VLAN_D_TAG;
2493
2494                 /* The Retagging Table generates packet *clones* with
2495                  * the new VLAN. This is a very odd hardware quirk
2496                  * which we need to suppress by dropping the original
2497                  * packet.
2498                  * Deny egress of the original VLAN towards the CPU
2499                  * port. This will force the switch to drop it, and
2500                  * we'll see only the retagged packets.
2501                  */
2502                 match = v->vid;
2503                 new_vlan[match].vlan_bc &= ~BIT(upstream);
2504
2505                 /* And the retagging itself */
2506                 new_retagging[k].vlan_ing = v->vid;
2507                 new_retagging[k].vlan_egr = rx_vid;
2508                 new_retagging[k].ing_port = BIT(v->port);
2509                 new_retagging[k].egr_port = BIT(upstream);
2510                 if (k++ == SJA1105_MAX_RETAGGING_COUNT) {
2511                         dev_err(priv->ds->dev, "No more retagging rules\n");
2512                         return -ENOSPC;
2513                 }
2514
2515                 subvlan_map[v->port][subvlan] = v->vid;
2516         }
2517
2518         *num_retagging = k;
2519
2520         return 0;
2521 }
2522
2523 /* Sadly, in crosschip scenarios where the CPU port is also the link to another
2524  * switch, we should retag backwards (the dsa_8021q vid to the original vid) on
2525  * the CPU port of neighbour switches.
2526  */
2527 static int
2528 sja1105_build_crosschip_subvlans(struct sja1105_private *priv,
2529                                  struct sja1105_vlan_lookup_entry *new_vlan,
2530                                  struct sja1105_retagging_entry *new_retagging,
2531                                  int *num_retagging)
2532 {
2533         struct sja1105_crosschip_vlan *tmp, *pos;
2534         struct dsa_8021q_crosschip_link *c;
2535         struct sja1105_bridge_vlan *v, *w;
2536         struct list_head crosschip_vlans;
2537         int k = *num_retagging;
2538         int rc = 0;
2539
2540         if (priv->vlan_state != SJA1105_VLAN_BEST_EFFORT)
2541                 return 0;
2542
2543         INIT_LIST_HEAD(&crosschip_vlans);
2544
2545         list_for_each_entry(c, &priv->dsa_8021q_ctx->crosschip_links, list) {
2546                 struct sja1105_private *other_priv = c->other_ctx->ds->priv;
2547
2548                 if (other_priv->vlan_state == SJA1105_VLAN_FILTERING_FULL)
2549                         continue;
2550
2551                 /* Crosschip links are also added to the CPU ports.
2552                  * Ignore those.
2553                  */
2554                 if (!dsa_is_user_port(priv->ds, c->port))
2555                         continue;
2556                 if (!dsa_is_user_port(c->other_ctx->ds, c->other_port))
2557                         continue;
2558
2559                 /* Search for VLANs on the remote port */
2560                 list_for_each_entry(v, &other_priv->bridge_vlans, list) {
2561                         bool already_added = false;
2562                         bool we_have_it = false;
2563
2564                         if (v->port != c->other_port)
2565                                 continue;
2566
2567                         /* If @v is a pvid on @other_ds, it does not need
2568                          * re-retagging, because its SVL field is 0 and we
2569                          * already allow that, via the dsa_8021q crosschip
2570                          * links.
2571                          */
2572                         if (v->pvid)
2573                                 continue;
2574
2575                         /* Search for the VLAN on our local port */
2576                         list_for_each_entry(w, &priv->bridge_vlans, list) {
2577                                 if (w->port == c->port && w->vid == v->vid) {
2578                                         we_have_it = true;
2579                                         break;
2580                                 }
2581                         }
2582
2583                         if (!we_have_it)
2584                                 continue;
2585
2586                         list_for_each_entry(tmp, &crosschip_vlans, list) {
2587                                 if (tmp->vid == v->vid &&
2588                                     tmp->untagged == v->untagged &&
2589                                     tmp->port == c->port &&
2590                                     tmp->other_port == v->port &&
2591                                     tmp->other_ctx == c->other_ctx) {
2592                                         already_added = true;
2593                                         break;
2594                                 }
2595                         }
2596
2597                         if (already_added)
2598                                 continue;
2599
2600                         tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
2601                         if (!tmp) {
2602                                 dev_err(priv->ds->dev, "Failed to allocate memory\n");
2603                                 rc = -ENOMEM;
2604                                 goto out;
2605                         }
2606                         tmp->vid = v->vid;
2607                         tmp->port = c->port;
2608                         tmp->other_port = v->port;
2609                         tmp->other_ctx = c->other_ctx;
2610                         tmp->untagged = v->untagged;
2611                         list_add(&tmp->list, &crosschip_vlans);
2612                 }
2613         }
2614
2615         list_for_each_entry(tmp, &crosschip_vlans, list) {
2616                 struct sja1105_private *other_priv = tmp->other_ctx->ds->priv;
2617                 int upstream = dsa_upstream_port(priv->ds, tmp->port);
2618                 int match, subvlan;
2619                 u16 rx_vid;
2620
2621                 subvlan = sja1105_find_committed_subvlan(other_priv,
2622                                                          tmp->other_port,
2623                                                          tmp->vid);
2624                 /* If this happens, it's a bug. The neighbour switch does not
2625                  * have a subvlan for tmp->vid on tmp->other_port, but it
2626                  * should, since we already checked for its vlan_state.
2627                  */
2628                 if (WARN_ON(subvlan < 0)) {
2629                         rc = -EINVAL;
2630                         goto out;
2631                 }
2632
2633                 rx_vid = dsa_8021q_rx_vid_subvlan(tmp->other_ctx->ds,
2634                                                   tmp->other_port,
2635                                                   subvlan);
2636
2637                 /* The @rx_vid retagged from @tmp->vid on
2638                  * {@tmp->other_ds, @tmp->other_port} needs to be
2639                  * re-retagged to @tmp->vid on the way back to us.
2640                  *
2641                  * Assume the original @tmp->vid is already configured
2642                  * on this local switch, otherwise we wouldn't be
2643                  * retagging its subvlan on the other switch in the
2644                  * first place. We just need to add a reverse retagging
2645                  * rule for @rx_vid and install @rx_vid on our ports.
2646                  */
2647                 match = rx_vid;
2648                 new_vlan[match].vlanid = rx_vid;
2649                 new_vlan[match].vmemb_port |= BIT(tmp->port);
2650                 new_vlan[match].vmemb_port |= BIT(upstream);
2651                 /* The "untagged" flag is set the same as for the
2652                  * original VLAN. And towards the CPU, it doesn't
2653                  * really matter, because @rx_vid will only receive
2654                  * traffic on that port. For consistency with other dsa_8021q
2655                  * VLANs, we'll keep the CPU port tagged.
2656                  */
2657                 if (!tmp->untagged)
2658                         new_vlan[match].tag_port |= BIT(tmp->port);
2659                 new_vlan[match].tag_port |= BIT(upstream);
2660                 new_vlan[match].type_entry = SJA1110_VLAN_D_TAG;
2661                 /* Deny egress of @rx_vid towards our front-panel port.
2662                  * This will force the switch to drop it, and we'll see
2663                  * only the re-retagged packets (having the original,
2664                  * pre-initial-retagging, VLAN @tmp->vid).
2665                  */
2666                 new_vlan[match].vlan_bc &= ~BIT(tmp->port);
2667
2668                 /* On reverse retagging, the same ingress VLAN goes to multiple
2669                  * ports. So we have an opportunity to create composite rules
2670                  * to not waste the limited space in the retagging table.
2671                  */
2672                 k = sja1105_find_retagging_entry(new_retagging, *num_retagging,
2673                                                  upstream, rx_vid, tmp->vid);
2674                 if (k < 0) {
2675                         if (*num_retagging == SJA1105_MAX_RETAGGING_COUNT) {
2676                                 dev_err(priv->ds->dev, "No more retagging rules\n");
2677                                 rc = -ENOSPC;
2678                                 goto out;
2679                         }
2680                         k = (*num_retagging)++;
2681                 }
2682                 /* And the retagging itself */
2683                 new_retagging[k].vlan_ing = rx_vid;
2684                 new_retagging[k].vlan_egr = tmp->vid;
2685                 new_retagging[k].ing_port = BIT(upstream);
2686                 new_retagging[k].egr_port |= BIT(tmp->port);
2687         }
2688
2689 out:
2690         list_for_each_entry_safe(tmp, pos, &crosschip_vlans, list) {
2691                 list_del(&tmp->list);
2692                 kfree(tmp);
2693         }
2694
2695         return rc;
2696 }
2697
2698 static int sja1105_build_vlan_table(struct sja1105_private *priv, bool notify);
2699
2700 static int sja1105_notify_crosschip_switches(struct sja1105_private *priv)
2701 {
2702         struct sja1105_crosschip_switch *s, *pos;
2703         struct list_head crosschip_switches;
2704         struct dsa_8021q_crosschip_link *c;
2705         int rc = 0;
2706
2707         INIT_LIST_HEAD(&crosschip_switches);
2708
2709         list_for_each_entry(c, &priv->dsa_8021q_ctx->crosschip_links, list) {
2710                 bool already_added = false;
2711
2712                 list_for_each_entry(s, &crosschip_switches, list) {
2713                         if (s->other_ctx == c->other_ctx) {
2714                                 already_added = true;
2715                                 break;
2716                         }
2717                 }
2718
2719                 if (already_added)
2720                         continue;
2721
2722                 s = kzalloc(sizeof(*s), GFP_KERNEL);
2723                 if (!s) {
2724                         dev_err(priv->ds->dev, "Failed to allocate memory\n");
2725                         rc = -ENOMEM;
2726                         goto out;
2727                 }
2728                 s->other_ctx = c->other_ctx;
2729                 list_add(&s->list, &crosschip_switches);
2730         }
2731
2732         list_for_each_entry(s, &crosschip_switches, list) {
2733                 struct sja1105_private *other_priv = s->other_ctx->ds->priv;
2734
2735                 rc = sja1105_build_vlan_table(other_priv, false);
2736                 if (rc)
2737                         goto out;
2738         }
2739
2740 out:
2741         list_for_each_entry_safe(s, pos, &crosschip_switches, list) {
2742                 list_del(&s->list);
2743                 kfree(s);
2744         }
2745
2746         return rc;
2747 }
2748
2749 static int sja1105_build_vlan_table(struct sja1105_private *priv, bool notify)
2750 {
2751         u16 subvlan_map[SJA1105_MAX_NUM_PORTS][DSA_8021Q_N_SUBVLAN];
2752         struct sja1105_retagging_entry *new_retagging;
2753         struct sja1105_vlan_lookup_entry *new_vlan;
2754         struct sja1105_table *table;
2755         int i, num_retagging = 0;
2756         int rc;
2757
2758         table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
2759         new_vlan = kcalloc(VLAN_N_VID,
2760                            table->ops->unpacked_entry_size, GFP_KERNEL);
2761         if (!new_vlan)
2762                 return -ENOMEM;
2763
2764         table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
2765         new_retagging = kcalloc(SJA1105_MAX_RETAGGING_COUNT,
2766                                 table->ops->unpacked_entry_size, GFP_KERNEL);
2767         if (!new_retagging) {
2768                 kfree(new_vlan);
2769                 return -ENOMEM;
2770         }
2771
2772         for (i = 0; i < VLAN_N_VID; i++)
2773                 new_vlan[i].vlanid = VLAN_N_VID;
2774
2775         for (i = 0; i < SJA1105_MAX_RETAGGING_COUNT; i++)
2776                 new_retagging[i].vlan_ing = VLAN_N_VID;
2777
2778         for (i = 0; i < priv->ds->num_ports; i++)
2779                 sja1105_init_subvlan_map(subvlan_map[i]);
2780
2781         /* Bridge VLANs */
2782         rc = sja1105_build_bridge_vlans(priv, new_vlan);
2783         if (rc)
2784                 goto out;
2785
2786         /* VLANs necessary for dsa_8021q operation, given to us by tag_8021q.c:
2787          * - RX VLANs
2788          * - TX VLANs
2789          * - Crosschip links
2790          */
2791         rc = sja1105_build_dsa_8021q_vlans(priv, new_vlan);
2792         if (rc)
2793                 goto out;
2794
2795         /* Private VLANs necessary for dsa_8021q operation, which we need to
2796          * determine on our own:
2797          * - Sub-VLANs
2798          * - Sub-VLANs of crosschip switches
2799          */
2800         rc = sja1105_build_subvlans(priv, subvlan_map, new_vlan, new_retagging,
2801                                     &num_retagging);
2802         if (rc)
2803                 goto out;
2804
2805         rc = sja1105_build_crosschip_subvlans(priv, new_vlan, new_retagging,
2806                                               &num_retagging);
2807         if (rc)
2808                 goto out;
2809
2810         rc = sja1105_commit_vlans(priv, new_vlan, new_retagging, num_retagging);
2811         if (rc)
2812                 goto out;
2813
2814         rc = sja1105_commit_pvid(priv);
2815         if (rc)
2816                 goto out;
2817
2818         for (i = 0; i < priv->ds->num_ports; i++)
2819                 sja1105_commit_subvlan_map(priv, i, subvlan_map[i]);
2820
2821         if (notify) {
2822                 rc = sja1105_notify_crosschip_switches(priv);
2823                 if (rc)
2824                         goto out;
2825         }
2826
2827 out:
2828         kfree(new_vlan);
2829         kfree(new_retagging);
2830
2831         return rc;
2832 }
2833
2834 /* The TPID setting belongs to the General Parameters table,
2835  * which can only be partially reconfigured at runtime (and not the TPID).
2836  * So a switch reset is required.
2837  */
2838 int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled,
2839                            struct netlink_ext_ack *extack)
2840 {
2841         struct sja1105_l2_lookup_params_entry *l2_lookup_params;
2842         struct sja1105_general_params_entry *general_params;
2843         struct sja1105_private *priv = ds->priv;
2844         enum sja1105_vlan_state state;
2845         struct sja1105_table *table;
2846         struct sja1105_rule *rule;
2847         bool want_tagging;
2848         u16 tpid, tpid2;
2849         int rc;
2850
2851         list_for_each_entry(rule, &priv->flow_block.rules, list) {
2852                 if (rule->type == SJA1105_RULE_VL) {
2853                         NL_SET_ERR_MSG_MOD(extack,
2854                                            "Cannot change VLAN filtering with active VL rules");
2855                         return -EBUSY;
2856                 }
2857         }
2858
2859         if (enabled) {
2860                 /* Enable VLAN filtering. */
2861                 tpid  = ETH_P_8021Q;
2862                 tpid2 = ETH_P_8021AD;
2863         } else {
2864                 /* Disable VLAN filtering. */
2865                 tpid  = ETH_P_SJA1105;
2866                 tpid2 = ETH_P_SJA1105;
2867         }
2868
2869         for (port = 0; port < ds->num_ports; port++) {
2870                 struct sja1105_port *sp = &priv->ports[port];
2871
2872                 if (enabled)
2873                         sp->xmit_tpid = priv->info->qinq_tpid;
2874                 else
2875                         sp->xmit_tpid = ETH_P_SJA1105;
2876         }
2877
2878         if (!enabled)
2879                 state = SJA1105_VLAN_UNAWARE;
2880         else if (priv->best_effort_vlan_filtering)
2881                 state = SJA1105_VLAN_BEST_EFFORT;
2882         else
2883                 state = SJA1105_VLAN_FILTERING_FULL;
2884
2885         if (priv->vlan_state == state)
2886                 return 0;
2887
2888         priv->vlan_state = state;
2889         want_tagging = (state == SJA1105_VLAN_UNAWARE ||
2890                         state == SJA1105_VLAN_BEST_EFFORT);
2891
2892         table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
2893         general_params = table->entries;
2894         /* EtherType used to identify inner tagged (C-tag) VLAN traffic */
2895         general_params->tpid = tpid;
2896         /* EtherType used to identify outer tagged (S-tag) VLAN traffic */
2897         general_params->tpid2 = tpid2;
2898         /* When VLAN filtering is on, we need to at least be able to
2899          * decode management traffic through the "backup plan".
2900          */
2901         general_params->incl_srcpt1 = enabled;
2902         general_params->incl_srcpt0 = enabled;
2903
2904         want_tagging = priv->best_effort_vlan_filtering || !enabled;
2905
2906         /* VLAN filtering => independent VLAN learning.
2907          * No VLAN filtering (or best effort) => shared VLAN learning.
2908          *
2909          * In shared VLAN learning mode, untagged traffic still gets
2910          * pvid-tagged, and the FDB table gets populated with entries
2911          * containing the "real" (pvid or from VLAN tag) VLAN ID.
2912          * However the switch performs a masked L2 lookup in the FDB,
2913          * effectively only looking up a frame's DMAC (and not VID) for the
2914          * forwarding decision.
2915          *
2916          * This is extremely convenient for us, because in modes with
2917          * vlan_filtering=0, dsa_8021q actually installs unique pvid's into
2918          * each front panel port. This is good for identification but breaks
2919          * learning badly - the VID of the learnt FDB entry is unique, aka
2920          * no frames coming from any other port are going to have it. So
2921          * for forwarding purposes, this is as though learning was broken
2922          * (all frames get flooded).
2923          */
2924         table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS];
2925         l2_lookup_params = table->entries;
2926         l2_lookup_params->shared_learn = want_tagging;
2927
2928         sja1105_frame_memory_partitioning(priv);
2929
2930         rc = sja1105_build_vlan_table(priv, false);
2931         if (rc)
2932                 return rc;
2933
2934         rc = sja1105_static_config_reload(priv, SJA1105_VLAN_FILTERING);
2935         if (rc)
2936                 NL_SET_ERR_MSG_MOD(extack, "Failed to change VLAN Ethertype");
2937
2938         /* Switch port identification based on 802.1Q is only passable
2939          * if we are not under a vlan_filtering bridge. So make sure
2940          * the two configurations are mutually exclusive (of course, the
2941          * user may know better, i.e. best_effort_vlan_filtering).
2942          */
2943         return sja1105_setup_8021q_tagging(ds, want_tagging);
2944 }
2945
2946 /* Returns number of VLANs added (0 or 1) on success,
2947  * or a negative error code.
2948  */
2949 static int sja1105_vlan_add_one(struct dsa_switch *ds, int port, u16 vid,
2950                                 u16 flags, struct list_head *vlan_list)
2951 {
2952         bool untagged = flags & BRIDGE_VLAN_INFO_UNTAGGED;
2953         bool pvid = flags & BRIDGE_VLAN_INFO_PVID;
2954         struct sja1105_bridge_vlan *v;
2955
2956         list_for_each_entry(v, vlan_list, list) {
2957                 if (v->port == port && v->vid == vid) {
2958                         /* Already added */
2959                         if (v->untagged == untagged && v->pvid == pvid)
2960                                 /* Nothing changed */
2961                                 return 0;
2962
2963                         /* It's the same VLAN, but some of the flags changed
2964                          * and the user did not bother to delete it first.
2965                          * Update it and trigger sja1105_build_vlan_table.
2966                          */
2967                         v->untagged = untagged;
2968                         v->pvid = pvid;
2969                         return 1;
2970                 }
2971         }
2972
2973         v = kzalloc(sizeof(*v), GFP_KERNEL);
2974         if (!v) {
2975                 dev_err(ds->dev, "Out of memory while storing VLAN\n");
2976                 return -ENOMEM;
2977         }
2978
2979         v->port = port;
2980         v->vid = vid;
2981         v->untagged = untagged;
2982         v->pvid = pvid;
2983         list_add(&v->list, vlan_list);
2984
2985         return 1;
2986 }
2987
2988 /* Returns number of VLANs deleted (0 or 1) */
2989 static int sja1105_vlan_del_one(struct dsa_switch *ds, int port, u16 vid,
2990                                 struct list_head *vlan_list)
2991 {
2992         struct sja1105_bridge_vlan *v, *n;
2993
2994         list_for_each_entry_safe(v, n, vlan_list, list) {
2995                 if (v->port == port && v->vid == vid) {
2996                         list_del(&v->list);
2997                         kfree(v);
2998                         return 1;
2999                 }
3000         }
3001
3002         return 0;
3003 }
3004
3005 static int sja1105_vlan_add(struct dsa_switch *ds, int port,
3006                             const struct switchdev_obj_port_vlan *vlan,
3007                             struct netlink_ext_ack *extack)
3008 {
3009         struct sja1105_private *priv = ds->priv;
3010         bool vlan_table_changed = false;
3011         int rc;
3012
3013         /* If the user wants best-effort VLAN filtering (aka vlan_filtering
3014          * bridge plus tagging), be sure to at least deny alterations to the
3015          * configuration done by dsa_8021q.
3016          */
3017         if (priv->vlan_state != SJA1105_VLAN_FILTERING_FULL &&
3018             vid_is_dsa_8021q(vlan->vid)) {
3019                 NL_SET_ERR_MSG_MOD(extack,
3020                                    "Range 1024-3071 reserved for dsa_8021q operation");
3021                 return -EBUSY;
3022         }
3023
3024         rc = sja1105_vlan_add_one(ds, port, vlan->vid, vlan->flags,
3025                                   &priv->bridge_vlans);
3026         if (rc < 0)
3027                 return rc;
3028         if (rc > 0)
3029                 vlan_table_changed = true;
3030
3031         if (!vlan_table_changed)
3032                 return 0;
3033
3034         return sja1105_build_vlan_table(priv, true);
3035 }
3036
3037 static int sja1105_vlan_del(struct dsa_switch *ds, int port,
3038                             const struct switchdev_obj_port_vlan *vlan)
3039 {
3040         struct sja1105_private *priv = ds->priv;
3041         bool vlan_table_changed = false;
3042         int rc;
3043
3044         rc = sja1105_vlan_del_one(ds, port, vlan->vid, &priv->bridge_vlans);
3045         if (rc > 0)
3046                 vlan_table_changed = true;
3047
3048         if (!vlan_table_changed)
3049                 return 0;
3050
3051         return sja1105_build_vlan_table(priv, true);
3052 }
3053
3054 static int sja1105_dsa_8021q_vlan_add(struct dsa_switch *ds, int port, u16 vid,
3055                                       u16 flags)
3056 {
3057         struct sja1105_private *priv = ds->priv;
3058         int rc;
3059
3060         rc = sja1105_vlan_add_one(ds, port, vid, flags, &priv->dsa_8021q_vlans);
3061         if (rc <= 0)
3062                 return rc;
3063
3064         return sja1105_build_vlan_table(priv, true);
3065 }
3066
3067 static int sja1105_dsa_8021q_vlan_del(struct dsa_switch *ds, int port, u16 vid)
3068 {
3069         struct sja1105_private *priv = ds->priv;
3070         int rc;
3071
3072         rc = sja1105_vlan_del_one(ds, port, vid, &priv->dsa_8021q_vlans);
3073         if (!rc)
3074                 return 0;
3075
3076         return sja1105_build_vlan_table(priv, true);
3077 }
3078
3079 static const struct dsa_8021q_ops sja1105_dsa_8021q_ops = {
3080         .vlan_add       = sja1105_dsa_8021q_vlan_add,
3081         .vlan_del       = sja1105_dsa_8021q_vlan_del,
3082 };
3083
3084 /* The programming model for the SJA1105 switch is "all-at-once" via static
3085  * configuration tables. Some of these can be dynamically modified at runtime,
3086  * but not the xMII mode parameters table.
3087  * Furthermode, some PHYs may not have crystals for generating their clocks
3088  * (e.g. RMII). Instead, their 50MHz clock is supplied via the SJA1105 port's
3089  * ref_clk pin. So port clocking needs to be initialized early, before
3090  * connecting to PHYs is attempted, otherwise they won't respond through MDIO.
3091  * Setting correct PHY link speed does not matter now.
3092  * But dsa_slave_phy_setup is called later than sja1105_setup, so the PHY
3093  * bindings are not yet parsed by DSA core. We need to parse early so that we
3094  * can populate the xMII mode parameters table.
3095  */
3096 static int sja1105_setup(struct dsa_switch *ds)
3097 {
3098         struct sja1105_private *priv = ds->priv;
3099         int rc;
3100
3101         rc = sja1105_parse_dt(priv);
3102         if (rc < 0) {
3103                 dev_err(ds->dev, "Failed to parse DT: %d\n", rc);
3104                 return rc;
3105         }
3106
3107         /* Error out early if internal delays are required through DT
3108          * and we can't apply them.
3109          */
3110         rc = sja1105_parse_rgmii_delays(priv);
3111         if (rc < 0) {
3112                 dev_err(ds->dev, "RGMII delay not supported\n");
3113                 return rc;
3114         }
3115
3116         rc = sja1105_ptp_clock_register(ds);
3117         if (rc < 0) {
3118                 dev_err(ds->dev, "Failed to register PTP clock: %d\n", rc);
3119                 return rc;
3120         }
3121
3122         rc = sja1105_mdiobus_register(ds);
3123         if (rc < 0) {
3124                 dev_err(ds->dev, "Failed to register MDIO bus: %pe\n",
3125                         ERR_PTR(rc));
3126                 goto out_ptp_clock_unregister;
3127         }
3128
3129         /* Create and send configuration down to device */
3130         rc = sja1105_static_config_load(priv);
3131         if (rc < 0) {
3132                 dev_err(ds->dev, "Failed to load static config: %d\n", rc);
3133                 goto out_mdiobus_unregister;
3134         }
3135         /* Configure the CGU (PHY link modes and speeds) */
3136         rc = priv->info->clocking_setup(priv);
3137         if (rc < 0) {
3138                 dev_err(ds->dev, "Failed to configure MII clocking: %d\n", rc);
3139                 goto out_static_config_free;
3140         }
3141         /* On SJA1105, VLAN filtering per se is always enabled in hardware.
3142          * The only thing we can do to disable it is lie about what the 802.1Q
3143          * EtherType is.
3144          * So it will still try to apply VLAN filtering, but all ingress
3145          * traffic (except frames received with EtherType of ETH_P_SJA1105)
3146          * will be internally tagged with a distorted VLAN header where the
3147          * TPID is ETH_P_SJA1105, and the VLAN ID is the port pvid.
3148          */
3149         ds->vlan_filtering_is_global = true;
3150
3151         /* Advertise the 8 egress queues */
3152         ds->num_tx_queues = SJA1105_NUM_TC;
3153
3154         ds->mtu_enforcement_ingress = true;
3155
3156         priv->best_effort_vlan_filtering = true;
3157
3158         rc = sja1105_devlink_setup(ds);
3159         if (rc < 0)
3160                 goto out_static_config_free;
3161
3162         /* The DSA/switchdev model brings up switch ports in standalone mode by
3163          * default, and that means vlan_filtering is 0 since they're not under
3164          * a bridge, so it's safe to set up switch tagging at this time.
3165          */
3166         rtnl_lock();
3167         rc = sja1105_setup_8021q_tagging(ds, true);
3168         rtnl_unlock();
3169         if (rc)
3170                 goto out_devlink_teardown;
3171
3172         return 0;
3173
3174 out_devlink_teardown:
3175         sja1105_devlink_teardown(ds);
3176 out_mdiobus_unregister:
3177         sja1105_mdiobus_unregister(ds);
3178 out_ptp_clock_unregister:
3179         sja1105_ptp_clock_unregister(ds);
3180 out_static_config_free:
3181         sja1105_static_config_free(&priv->static_config);
3182
3183         return rc;
3184 }
3185
3186 static void sja1105_teardown(struct dsa_switch *ds)
3187 {
3188         struct sja1105_private *priv = ds->priv;
3189         struct sja1105_bridge_vlan *v, *n;
3190         int port;
3191
3192         for (port = 0; port < ds->num_ports; port++) {
3193                 struct sja1105_port *sp = &priv->ports[port];
3194
3195                 if (!dsa_is_user_port(ds, port))
3196                         continue;
3197
3198                 if (sp->xmit_worker)
3199                         kthread_destroy_worker(sp->xmit_worker);
3200         }
3201
3202         sja1105_devlink_teardown(ds);
3203         sja1105_flower_teardown(ds);
3204         sja1105_tas_teardown(ds);
3205         sja1105_ptp_clock_unregister(ds);
3206         sja1105_static_config_free(&priv->static_config);
3207
3208         list_for_each_entry_safe(v, n, &priv->dsa_8021q_vlans, list) {
3209                 list_del(&v->list);
3210                 kfree(v);
3211         }
3212
3213         list_for_each_entry_safe(v, n, &priv->bridge_vlans, list) {
3214                 list_del(&v->list);
3215                 kfree(v);
3216         }
3217 }
3218
3219 static void sja1105_port_disable(struct dsa_switch *ds, int port)
3220 {
3221         struct sja1105_private *priv = ds->priv;
3222         struct sja1105_port *sp = &priv->ports[port];
3223
3224         if (!dsa_is_user_port(ds, port))
3225                 return;
3226
3227         kthread_cancel_work_sync(&sp->xmit_work);
3228         skb_queue_purge(&sp->xmit_queue);
3229 }
3230
3231 static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot,
3232                              struct sk_buff *skb, bool takets)
3233 {
3234         struct sja1105_mgmt_entry mgmt_route = {0};
3235         struct sja1105_private *priv = ds->priv;
3236         struct ethhdr *hdr;
3237         int timeout = 10;
3238         int rc;
3239
3240         hdr = eth_hdr(skb);
3241
3242         mgmt_route.macaddr = ether_addr_to_u64(hdr->h_dest);
3243         mgmt_route.destports = BIT(port);
3244         mgmt_route.enfport = 1;
3245         mgmt_route.tsreg = 0;
3246         mgmt_route.takets = takets;
3247
3248         rc = sja1105_dynamic_config_write(priv, BLK_IDX_MGMT_ROUTE,
3249                                           slot, &mgmt_route, true);
3250         if (rc < 0) {
3251                 kfree_skb(skb);
3252                 return rc;
3253         }
3254
3255         /* Transfer skb to the host port. */
3256         dsa_enqueue_skb(skb, dsa_to_port(ds, port)->slave);
3257
3258         /* Wait until the switch has processed the frame */
3259         do {
3260                 rc = sja1105_dynamic_config_read(priv, BLK_IDX_MGMT_ROUTE,
3261                                                  slot, &mgmt_route);
3262                 if (rc < 0) {
3263                         dev_err_ratelimited(priv->ds->dev,
3264                                             "failed to poll for mgmt route\n");
3265                         continue;
3266                 }
3267
3268                 /* UM10944: The ENFPORT flag of the respective entry is
3269                  * cleared when a match is found. The host can use this
3270                  * flag as an acknowledgment.
3271                  */
3272                 cpu_relax();
3273         } while (mgmt_route.enfport && --timeout);
3274
3275         if (!timeout) {
3276                 /* Clean up the management route so that a follow-up
3277                  * frame may not match on it by mistake.
3278                  * This is only hardware supported on P/Q/R/S - on E/T it is
3279                  * a no-op and we are silently discarding the -EOPNOTSUPP.
3280                  */
3281                 sja1105_dynamic_config_write(priv, BLK_IDX_MGMT_ROUTE,
3282                                              slot, &mgmt_route, false);
3283                 dev_err_ratelimited(priv->ds->dev, "xmit timed out\n");
3284         }
3285
3286         return NETDEV_TX_OK;
3287 }
3288
3289 #define work_to_port(work) \
3290                 container_of((work), struct sja1105_port, xmit_work)
3291 #define tagger_to_sja1105(t) \
3292                 container_of((t), struct sja1105_private, tagger_data)
3293
3294 /* Deferred work is unfortunately necessary because setting up the management
3295  * route cannot be done from atomit context (SPI transfer takes a sleepable
3296  * lock on the bus)
3297  */
3298 static void sja1105_port_deferred_xmit(struct kthread_work *work)
3299 {
3300         struct sja1105_port *sp = work_to_port(work);
3301         struct sja1105_tagger_data *tagger_data = sp->data;
3302         struct sja1105_private *priv = tagger_to_sja1105(tagger_data);
3303         int port = sp - priv->ports;
3304         struct sk_buff *skb;
3305
3306         while ((skb = skb_dequeue(&sp->xmit_queue)) != NULL) {
3307                 struct sk_buff *clone = SJA1105_SKB_CB(skb)->clone;
3308
3309                 mutex_lock(&priv->mgmt_lock);
3310
3311                 sja1105_mgmt_xmit(priv->ds, port, 0, skb, !!clone);
3312
3313                 /* The clone, if there, was made by dsa_skb_tx_timestamp */
3314                 if (clone)
3315                         sja1105_ptp_txtstamp_skb(priv->ds, port, clone);
3316
3317                 mutex_unlock(&priv->mgmt_lock);
3318         }
3319 }
3320
3321 /* The MAXAGE setting belongs to the L2 Forwarding Parameters table,
3322  * which cannot be reconfigured at runtime. So a switch reset is required.
3323  */
3324 static int sja1105_set_ageing_time(struct dsa_switch *ds,
3325                                    unsigned int ageing_time)
3326 {
3327         struct sja1105_l2_lookup_params_entry *l2_lookup_params;
3328         struct sja1105_private *priv = ds->priv;
3329         struct sja1105_table *table;
3330         unsigned int maxage;
3331
3332         table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS];
3333         l2_lookup_params = table->entries;
3334
3335         maxage = SJA1105_AGEING_TIME_MS(ageing_time);
3336
3337         if (l2_lookup_params->maxage == maxage)
3338                 return 0;
3339
3340         l2_lookup_params->maxage = maxage;
3341
3342         return sja1105_static_config_reload(priv, SJA1105_AGEING_TIME);
3343 }
3344
3345 static int sja1105_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
3346 {
3347         struct sja1105_l2_policing_entry *policing;
3348         struct sja1105_private *priv = ds->priv;
3349
3350         new_mtu += VLAN_ETH_HLEN + ETH_FCS_LEN;
3351
3352         if (dsa_is_cpu_port(ds, port))
3353                 new_mtu += VLAN_HLEN;
3354
3355         policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
3356
3357         if (policing[port].maxlen == new_mtu)
3358                 return 0;
3359
3360         policing[port].maxlen = new_mtu;
3361
3362         return sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
3363 }
3364
3365 static int sja1105_get_max_mtu(struct dsa_switch *ds, int port)
3366 {
3367         return 2043 - VLAN_ETH_HLEN - ETH_FCS_LEN;
3368 }
3369
3370 static int sja1105_port_setup_tc(struct dsa_switch *ds, int port,
3371                                  enum tc_setup_type type,
3372                                  void *type_data)
3373 {
3374         switch (type) {
3375         case TC_SETUP_QDISC_TAPRIO:
3376                 return sja1105_setup_tc_taprio(ds, port, type_data);
3377         case TC_SETUP_QDISC_CBS:
3378                 return sja1105_setup_tc_cbs(ds, port, type_data);
3379         default:
3380                 return -EOPNOTSUPP;
3381         }
3382 }
3383
3384 /* We have a single mirror (@to) port, but can configure ingress and egress
3385  * mirroring on all other (@from) ports.
3386  * We need to allow mirroring rules only as long as the @to port is always the
3387  * same, and we need to unset the @to port from mirr_port only when there is no
3388  * mirroring rule that references it.
3389  */
3390 static int sja1105_mirror_apply(struct sja1105_private *priv, int from, int to,
3391                                 bool ingress, bool enabled)
3392 {
3393         struct sja1105_general_params_entry *general_params;
3394         struct sja1105_mac_config_entry *mac;
3395         struct dsa_switch *ds = priv->ds;
3396         struct sja1105_table *table;
3397         bool already_enabled;
3398         u64 new_mirr_port;
3399         int rc;
3400
3401         table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
3402         general_params = table->entries;
3403
3404         mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
3405
3406         already_enabled = (general_params->mirr_port != ds->num_ports);
3407         if (already_enabled && enabled && general_params->mirr_port != to) {
3408                 dev_err(priv->ds->dev,
3409                         "Delete mirroring rules towards port %llu first\n",
3410                         general_params->mirr_port);
3411                 return -EBUSY;
3412         }
3413
3414         new_mirr_port = to;
3415         if (!enabled) {
3416                 bool keep = false;
3417                 int port;
3418
3419                 /* Anybody still referencing mirr_port? */
3420                 for (port = 0; port < ds->num_ports; port++) {
3421                         if (mac[port].ing_mirr || mac[port].egr_mirr) {
3422                                 keep = true;
3423                                 break;
3424                         }
3425                 }
3426                 /* Unset already_enabled for next time */
3427                 if (!keep)
3428                         new_mirr_port = ds->num_ports;
3429         }
3430         if (new_mirr_port != general_params->mirr_port) {
3431                 general_params->mirr_port = new_mirr_port;
3432
3433                 rc = sja1105_dynamic_config_write(priv, BLK_IDX_GENERAL_PARAMS,
3434                                                   0, general_params, true);
3435                 if (rc < 0)
3436                         return rc;
3437         }
3438
3439         if (ingress)
3440                 mac[from].ing_mirr = enabled;
3441         else
3442                 mac[from].egr_mirr = enabled;
3443
3444         return sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, from,
3445                                             &mac[from], true);
3446 }
3447
3448 static int sja1105_mirror_add(struct dsa_switch *ds, int port,
3449                               struct dsa_mall_mirror_tc_entry *mirror,
3450                               bool ingress)
3451 {
3452         return sja1105_mirror_apply(ds->priv, port, mirror->to_local_port,
3453                                     ingress, true);
3454 }
3455
3456 static void sja1105_mirror_del(struct dsa_switch *ds, int port,
3457                                struct dsa_mall_mirror_tc_entry *mirror)
3458 {
3459         sja1105_mirror_apply(ds->priv, port, mirror->to_local_port,
3460                              mirror->ingress, false);
3461 }
3462
3463 static int sja1105_port_policer_add(struct dsa_switch *ds, int port,
3464                                     struct dsa_mall_policer_tc_entry *policer)
3465 {
3466         struct sja1105_l2_policing_entry *policing;
3467         struct sja1105_private *priv = ds->priv;
3468
3469         policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
3470
3471         /* In hardware, every 8 microseconds the credit level is incremented by
3472          * the value of RATE bytes divided by 64, up to a maximum of SMAX
3473          * bytes.
3474          */
3475         policing[port].rate = div_u64(512 * policer->rate_bytes_per_sec,
3476                                       1000000);
3477         policing[port].smax = policer->burst;
3478
3479         return sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
3480 }
3481
3482 static void sja1105_port_policer_del(struct dsa_switch *ds, int port)
3483 {
3484         struct sja1105_l2_policing_entry *policing;
3485         struct sja1105_private *priv = ds->priv;
3486
3487         policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
3488
3489         policing[port].rate = SJA1105_RATE_MBPS(1000);
3490         policing[port].smax = 65535;
3491
3492         sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
3493 }
3494
3495 static int sja1105_port_set_learning(struct sja1105_private *priv, int port,
3496                                      bool enabled)
3497 {
3498         struct sja1105_mac_config_entry *mac;
3499         int rc;
3500
3501         mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
3502
3503         mac[port].dyn_learn = enabled;
3504
3505         rc = sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
3506                                           &mac[port], true);
3507         if (rc)
3508                 return rc;
3509
3510         if (enabled)
3511                 priv->learn_ena |= BIT(port);
3512         else
3513                 priv->learn_ena &= ~BIT(port);
3514
3515         return 0;
3516 }
3517
3518 static int sja1105_port_ucast_bcast_flood(struct sja1105_private *priv, int to,
3519                                           struct switchdev_brport_flags flags)
3520 {
3521         if (flags.mask & BR_FLOOD) {
3522                 if (flags.val & BR_FLOOD)
3523                         priv->ucast_egress_floods |= BIT(to);
3524                 else
3525                         priv->ucast_egress_floods &= ~BIT(to);
3526         }
3527
3528         if (flags.mask & BR_BCAST_FLOOD) {
3529                 if (flags.val & BR_BCAST_FLOOD)
3530                         priv->bcast_egress_floods |= BIT(to);
3531                 else
3532                         priv->bcast_egress_floods &= ~BIT(to);
3533         }
3534
3535         return sja1105_manage_flood_domains(priv);
3536 }
3537
3538 static int sja1105_port_mcast_flood(struct sja1105_private *priv, int to,
3539                                     struct switchdev_brport_flags flags,
3540                                     struct netlink_ext_ack *extack)
3541 {
3542         struct sja1105_l2_lookup_entry *l2_lookup;
3543         struct sja1105_table *table;
3544         int match;
3545
3546         table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP];
3547         l2_lookup = table->entries;
3548
3549         for (match = 0; match < table->entry_count; match++)
3550                 if (l2_lookup[match].macaddr == SJA1105_UNKNOWN_MULTICAST &&
3551                     l2_lookup[match].mask_macaddr == SJA1105_UNKNOWN_MULTICAST)
3552                         break;
3553
3554         if (match == table->entry_count) {
3555                 NL_SET_ERR_MSG_MOD(extack,
3556                                    "Could not find FDB entry for unknown multicast");
3557                 return -ENOSPC;
3558         }
3559
3560         if (flags.val & BR_MCAST_FLOOD)
3561                 l2_lookup[match].destports |= BIT(to);
3562         else
3563                 l2_lookup[match].destports &= ~BIT(to);
3564
3565         return sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
3566                                             l2_lookup[match].index,
3567                                             &l2_lookup[match],
3568                                             true);
3569 }
3570
3571 static int sja1105_port_pre_bridge_flags(struct dsa_switch *ds, int port,
3572                                          struct switchdev_brport_flags flags,
3573                                          struct netlink_ext_ack *extack)
3574 {
3575         struct sja1105_private *priv = ds->priv;
3576
3577         if (flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD |
3578                            BR_BCAST_FLOOD))
3579                 return -EINVAL;
3580
3581         if (flags.mask & (BR_FLOOD | BR_MCAST_FLOOD) &&
3582             !priv->info->can_limit_mcast_flood) {
3583                 bool multicast = !!(flags.val & BR_MCAST_FLOOD);
3584                 bool unicast = !!(flags.val & BR_FLOOD);
3585
3586                 if (unicast != multicast) {
3587                         NL_SET_ERR_MSG_MOD(extack,
3588                                            "This chip cannot configure multicast flooding independently of unicast");
3589                         return -EINVAL;
3590                 }
3591         }
3592
3593         return 0;
3594 }
3595
3596 static int sja1105_port_bridge_flags(struct dsa_switch *ds, int port,
3597                                      struct switchdev_brport_flags flags,
3598                                      struct netlink_ext_ack *extack)
3599 {
3600         struct sja1105_private *priv = ds->priv;
3601         int rc;
3602
3603         if (flags.mask & BR_LEARNING) {
3604                 bool learn_ena = !!(flags.val & BR_LEARNING);
3605
3606                 rc = sja1105_port_set_learning(priv, port, learn_ena);
3607                 if (rc)
3608                         return rc;
3609         }
3610
3611         if (flags.mask & (BR_FLOOD | BR_BCAST_FLOOD)) {
3612                 rc = sja1105_port_ucast_bcast_flood(priv, port, flags);
3613                 if (rc)
3614                         return rc;
3615         }
3616
3617         /* For chips that can't offload BR_MCAST_FLOOD independently, there
3618          * is nothing to do here, we ensured the configuration is in sync by
3619          * offloading BR_FLOOD.
3620          */
3621         if (flags.mask & BR_MCAST_FLOOD && priv->info->can_limit_mcast_flood) {
3622                 rc = sja1105_port_mcast_flood(priv, port, flags,
3623                                               extack);
3624                 if (rc)
3625                         return rc;
3626         }
3627
3628         return 0;
3629 }
3630
3631 static const struct dsa_switch_ops sja1105_switch_ops = {
3632         .get_tag_protocol       = sja1105_get_tag_protocol,
3633         .setup                  = sja1105_setup,
3634         .teardown               = sja1105_teardown,
3635         .set_ageing_time        = sja1105_set_ageing_time,
3636         .port_change_mtu        = sja1105_change_mtu,
3637         .port_max_mtu           = sja1105_get_max_mtu,
3638         .phylink_validate       = sja1105_phylink_validate,
3639         .phylink_mac_link_state = sja1105_mac_pcs_get_state,
3640         .phylink_mac_config     = sja1105_mac_config,
3641         .phylink_mac_link_up    = sja1105_mac_link_up,
3642         .phylink_mac_link_down  = sja1105_mac_link_down,
3643         .get_strings            = sja1105_get_strings,
3644         .get_ethtool_stats      = sja1105_get_ethtool_stats,
3645         .get_sset_count         = sja1105_get_sset_count,
3646         .get_ts_info            = sja1105_get_ts_info,
3647         .port_disable           = sja1105_port_disable,
3648         .port_fdb_dump          = sja1105_fdb_dump,
3649         .port_fdb_add           = sja1105_fdb_add,
3650         .port_fdb_del           = sja1105_fdb_del,
3651         .port_bridge_join       = sja1105_bridge_join,
3652         .port_bridge_leave      = sja1105_bridge_leave,
3653         .port_pre_bridge_flags  = sja1105_port_pre_bridge_flags,
3654         .port_bridge_flags      = sja1105_port_bridge_flags,
3655         .port_stp_state_set     = sja1105_bridge_stp_state_set,
3656         .port_vlan_filtering    = sja1105_vlan_filtering,
3657         .port_vlan_add          = sja1105_vlan_add,
3658         .port_vlan_del          = sja1105_vlan_del,
3659         .port_mdb_add           = sja1105_mdb_add,
3660         .port_mdb_del           = sja1105_mdb_del,
3661         .port_hwtstamp_get      = sja1105_hwtstamp_get,
3662         .port_hwtstamp_set      = sja1105_hwtstamp_set,
3663         .port_rxtstamp          = sja1105_port_rxtstamp,
3664         .port_txtstamp          = sja1105_port_txtstamp,
3665         .port_setup_tc          = sja1105_port_setup_tc,
3666         .port_mirror_add        = sja1105_mirror_add,
3667         .port_mirror_del        = sja1105_mirror_del,
3668         .port_policer_add       = sja1105_port_policer_add,
3669         .port_policer_del       = sja1105_port_policer_del,
3670         .cls_flower_add         = sja1105_cls_flower_add,
3671         .cls_flower_del         = sja1105_cls_flower_del,
3672         .cls_flower_stats       = sja1105_cls_flower_stats,
3673         .crosschip_bridge_join  = sja1105_crosschip_bridge_join,
3674         .crosschip_bridge_leave = sja1105_crosschip_bridge_leave,
3675         .devlink_param_get      = sja1105_devlink_param_get,
3676         .devlink_param_set      = sja1105_devlink_param_set,
3677         .devlink_info_get       = sja1105_devlink_info_get,
3678 };
3679
3680 static const struct of_device_id sja1105_dt_ids[];
3681
3682 static int sja1105_check_device_id(struct sja1105_private *priv)
3683 {
3684         const struct sja1105_regs *regs = priv->info->regs;
3685         u8 prod_id[SJA1105_SIZE_DEVICE_ID] = {0};
3686         struct device *dev = &priv->spidev->dev;
3687         const struct of_device_id *match;
3688         u32 device_id;
3689         u64 part_no;
3690         int rc;
3691
3692         rc = sja1105_xfer_u32(priv, SPI_READ, regs->device_id, &device_id,
3693                               NULL);
3694         if (rc < 0)
3695                 return rc;
3696
3697         rc = sja1105_xfer_buf(priv, SPI_READ, regs->prod_id, prod_id,
3698                               SJA1105_SIZE_DEVICE_ID);
3699         if (rc < 0)
3700                 return rc;
3701
3702         sja1105_unpack(prod_id, &part_no, 19, 4, SJA1105_SIZE_DEVICE_ID);
3703
3704         for (match = sja1105_dt_ids; match->compatible[0]; match++) {
3705                 const struct sja1105_info *info = match->data;
3706
3707                 /* Is what's been probed in our match table at all? */
3708                 if (info->device_id != device_id || info->part_no != part_no)
3709                         continue;
3710
3711                 /* But is it what's in the device tree? */
3712                 if (priv->info->device_id != device_id ||
3713                     priv->info->part_no != part_no) {
3714                         dev_warn(dev, "Device tree specifies chip %s but found %s, please fix it!\n",
3715                                  priv->info->name, info->name);
3716                         /* It isn't. No problem, pick that up. */
3717                         priv->info = info;
3718                 }
3719
3720                 return 0;
3721         }
3722
3723         dev_err(dev, "Unexpected {device ID, part number}: 0x%x 0x%llx\n",
3724                 device_id, part_no);
3725
3726         return -ENODEV;
3727 }
3728
3729 static int sja1105_probe(struct spi_device *spi)
3730 {
3731         struct sja1105_tagger_data *tagger_data;
3732         struct device *dev = &spi->dev;
3733         struct sja1105_private *priv;
3734         size_t max_xfer, max_msg;
3735         struct dsa_switch *ds;
3736         int rc, port;
3737
3738         if (!dev->of_node) {
3739                 dev_err(dev, "No DTS bindings for SJA1105 driver\n");
3740                 return -EINVAL;
3741         }
3742
3743         priv = devm_kzalloc(dev, sizeof(struct sja1105_private), GFP_KERNEL);
3744         if (!priv)
3745                 return -ENOMEM;
3746
3747         /* Configure the optional reset pin and bring up switch */
3748         priv->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
3749         if (IS_ERR(priv->reset_gpio))
3750                 dev_dbg(dev, "reset-gpios not defined, ignoring\n");
3751         else
3752                 sja1105_hw_reset(priv->reset_gpio, 1, 1);
3753
3754         /* Populate our driver private structure (priv) based on
3755          * the device tree node that was probed (spi)
3756          */
3757         priv->spidev = spi;
3758         spi_set_drvdata(spi, priv);
3759
3760         /* Configure the SPI bus */
3761         spi->bits_per_word = 8;
3762         rc = spi_setup(spi);
3763         if (rc < 0) {
3764                 dev_err(dev, "Could not init SPI\n");
3765                 return rc;
3766         }
3767
3768         /* In sja1105_xfer, we send spi_messages composed of two spi_transfers:
3769          * a small one for the message header and another one for the current
3770          * chunk of the packed buffer.
3771          * Check that the restrictions imposed by the SPI controller are
3772          * respected: the chunk buffer is smaller than the max transfer size,
3773          * and the total length of the chunk plus its message header is smaller
3774          * than the max message size.
3775          * We do that during probe time since the maximum transfer size is a
3776          * runtime invariant.
3777          */
3778         max_xfer = spi_max_transfer_size(spi);
3779         max_msg = spi_max_message_size(spi);
3780
3781         /* We need to send at least one 64-bit word of SPI payload per message
3782          * in order to be able to make useful progress.
3783          */
3784         if (max_msg < SJA1105_SIZE_SPI_MSG_HEADER + 8) {
3785                 dev_err(dev, "SPI master cannot send large enough buffers, aborting\n");
3786                 return -EINVAL;
3787         }
3788
3789         priv->max_xfer_len = SJA1105_SIZE_SPI_MSG_MAXLEN;
3790         if (priv->max_xfer_len > max_xfer)
3791                 priv->max_xfer_len = max_xfer;
3792         if (priv->max_xfer_len > max_msg - SJA1105_SIZE_SPI_MSG_HEADER)
3793                 priv->max_xfer_len = max_msg - SJA1105_SIZE_SPI_MSG_HEADER;
3794
3795         priv->info = of_device_get_match_data(dev);
3796
3797         /* Detect hardware device */
3798         rc = sja1105_check_device_id(priv);
3799         if (rc < 0) {
3800                 dev_err(dev, "Device ID check failed: %d\n", rc);
3801                 return rc;
3802         }
3803
3804         dev_info(dev, "Probed switch chip: %s\n", priv->info->name);
3805
3806         ds = devm_kzalloc(dev, sizeof(*ds), GFP_KERNEL);
3807         if (!ds)
3808                 return -ENOMEM;
3809
3810         ds->dev = dev;
3811         ds->num_ports = priv->info->num_ports;
3812         ds->ops = &sja1105_switch_ops;
3813         ds->priv = priv;
3814         priv->ds = ds;
3815
3816         tagger_data = &priv->tagger_data;
3817
3818         mutex_init(&priv->ptp_data.lock);
3819         mutex_init(&priv->mgmt_lock);
3820
3821         priv->dsa_8021q_ctx = devm_kzalloc(dev, sizeof(*priv->dsa_8021q_ctx),
3822                                            GFP_KERNEL);
3823         if (!priv->dsa_8021q_ctx)
3824                 return -ENOMEM;
3825
3826         priv->dsa_8021q_ctx->ops = &sja1105_dsa_8021q_ops;
3827         priv->dsa_8021q_ctx->proto = htons(ETH_P_8021Q);
3828         priv->dsa_8021q_ctx->ds = ds;
3829
3830         INIT_LIST_HEAD(&priv->dsa_8021q_ctx->crosschip_links);
3831         INIT_LIST_HEAD(&priv->bridge_vlans);
3832         INIT_LIST_HEAD(&priv->dsa_8021q_vlans);
3833
3834         sja1105_tas_setup(ds);
3835         sja1105_flower_setup(ds);
3836
3837         rc = dsa_register_switch(priv->ds);
3838         if (rc)
3839                 return rc;
3840
3841         if (IS_ENABLED(CONFIG_NET_SCH_CBS)) {
3842                 priv->cbs = devm_kcalloc(dev, priv->info->num_cbs_shapers,
3843                                          sizeof(struct sja1105_cbs_entry),
3844                                          GFP_KERNEL);
3845                 if (!priv->cbs) {
3846                         rc = -ENOMEM;
3847                         goto out_unregister_switch;
3848                 }
3849         }
3850
3851         /* Connections between dsa_port and sja1105_port */
3852         for (port = 0; port < ds->num_ports; port++) {
3853                 struct sja1105_port *sp = &priv->ports[port];
3854                 struct dsa_port *dp = dsa_to_port(ds, port);
3855                 struct net_device *slave;
3856                 int subvlan;
3857
3858                 if (!dsa_is_user_port(ds, port))
3859                         continue;
3860
3861                 dp->priv = sp;
3862                 sp->dp = dp;
3863                 sp->data = tagger_data;
3864                 slave = dp->slave;
3865                 kthread_init_work(&sp->xmit_work, sja1105_port_deferred_xmit);
3866                 sp->xmit_worker = kthread_create_worker(0, "%s_xmit",
3867                                                         slave->name);
3868                 if (IS_ERR(sp->xmit_worker)) {
3869                         rc = PTR_ERR(sp->xmit_worker);
3870                         dev_err(ds->dev,
3871                                 "failed to create deferred xmit thread: %d\n",
3872                                 rc);
3873                         goto out_destroy_workers;
3874                 }
3875                 skb_queue_head_init(&sp->xmit_queue);
3876                 sp->xmit_tpid = ETH_P_SJA1105;
3877
3878                 for (subvlan = 0; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++)
3879                         sp->subvlan_map[subvlan] = VLAN_N_VID;
3880         }
3881
3882         return 0;
3883
3884 out_destroy_workers:
3885         while (port-- > 0) {
3886                 struct sja1105_port *sp = &priv->ports[port];
3887
3888                 if (!dsa_is_user_port(ds, port))
3889                         continue;
3890
3891                 kthread_destroy_worker(sp->xmit_worker);
3892         }
3893
3894 out_unregister_switch:
3895         dsa_unregister_switch(ds);
3896
3897         return rc;
3898 }
3899
3900 static int sja1105_remove(struct spi_device *spi)
3901 {
3902         struct sja1105_private *priv = spi_get_drvdata(spi);
3903
3904         dsa_unregister_switch(priv->ds);
3905         return 0;
3906 }
3907
3908 static const struct of_device_id sja1105_dt_ids[] = {
3909         { .compatible = "nxp,sja1105e", .data = &sja1105e_info },
3910         { .compatible = "nxp,sja1105t", .data = &sja1105t_info },
3911         { .compatible = "nxp,sja1105p", .data = &sja1105p_info },
3912         { .compatible = "nxp,sja1105q", .data = &sja1105q_info },
3913         { .compatible = "nxp,sja1105r", .data = &sja1105r_info },
3914         { .compatible = "nxp,sja1105s", .data = &sja1105s_info },
3915         { .compatible = "nxp,sja1110a", .data = &sja1110a_info },
3916         { .compatible = "nxp,sja1110b", .data = &sja1110b_info },
3917         { .compatible = "nxp,sja1110c", .data = &sja1110c_info },
3918         { .compatible = "nxp,sja1110d", .data = &sja1110d_info },
3919         { /* sentinel */ },
3920 };
3921 MODULE_DEVICE_TABLE(of, sja1105_dt_ids);
3922
3923 static struct spi_driver sja1105_driver = {
3924         .driver = {
3925                 .name  = "sja1105",
3926                 .owner = THIS_MODULE,
3927                 .of_match_table = of_match_ptr(sja1105_dt_ids),
3928         },
3929         .probe  = sja1105_probe,
3930         .remove = sja1105_remove,
3931 };
3932
3933 module_spi_driver(sja1105_driver);
3934
3935 MODULE_AUTHOR("Vladimir Oltean <olteanv@gmail.com>");
3936 MODULE_AUTHOR("Georg Waibel <georg.waibel@sensor-technik.de>");
3937 MODULE_DESCRIPTION("SJA1105 Driver");
3938 MODULE_LICENSE("GPL v2");