Merge tag 'io_uring-5.19-2022-06-24' of git://git.kernel.dk/linux-block
[linux-2.6-microblaze.git] / drivers / net / dsa / qca8k.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2009 Felix Fietkau <nbd@nbd.name>
4  * Copyright (C) 2011-2012 Gabor Juhos <juhosg@openwrt.org>
5  * Copyright (c) 2015, 2019, The Linux Foundation. All rights reserved.
6  * Copyright (c) 2016 John Crispin <john@phrozen.org>
7  */
8
9 #include <linux/module.h>
10 #include <linux/phy.h>
11 #include <linux/netdevice.h>
12 #include <linux/bitfield.h>
13 #include <linux/regmap.h>
14 #include <net/dsa.h>
15 #include <linux/of_net.h>
16 #include <linux/of_mdio.h>
17 #include <linux/of_platform.h>
18 #include <linux/if_bridge.h>
19 #include <linux/mdio.h>
20 #include <linux/phylink.h>
21 #include <linux/gpio/consumer.h>
22 #include <linux/etherdevice.h>
23 #include <linux/dsa/tag_qca.h>
24
25 #include "qca8k.h"
26
27 #define MIB_DESC(_s, _o, _n)    \
28         {                       \
29                 .size = (_s),   \
30                 .offset = (_o), \
31                 .name = (_n),   \
32         }
33
34 static const struct qca8k_mib_desc ar8327_mib[] = {
35         MIB_DESC(1, 0x00, "RxBroad"),
36         MIB_DESC(1, 0x04, "RxPause"),
37         MIB_DESC(1, 0x08, "RxMulti"),
38         MIB_DESC(1, 0x0c, "RxFcsErr"),
39         MIB_DESC(1, 0x10, "RxAlignErr"),
40         MIB_DESC(1, 0x14, "RxRunt"),
41         MIB_DESC(1, 0x18, "RxFragment"),
42         MIB_DESC(1, 0x1c, "Rx64Byte"),
43         MIB_DESC(1, 0x20, "Rx128Byte"),
44         MIB_DESC(1, 0x24, "Rx256Byte"),
45         MIB_DESC(1, 0x28, "Rx512Byte"),
46         MIB_DESC(1, 0x2c, "Rx1024Byte"),
47         MIB_DESC(1, 0x30, "Rx1518Byte"),
48         MIB_DESC(1, 0x34, "RxMaxByte"),
49         MIB_DESC(1, 0x38, "RxTooLong"),
50         MIB_DESC(2, 0x3c, "RxGoodByte"),
51         MIB_DESC(2, 0x44, "RxBadByte"),
52         MIB_DESC(1, 0x4c, "RxOverFlow"),
53         MIB_DESC(1, 0x50, "Filtered"),
54         MIB_DESC(1, 0x54, "TxBroad"),
55         MIB_DESC(1, 0x58, "TxPause"),
56         MIB_DESC(1, 0x5c, "TxMulti"),
57         MIB_DESC(1, 0x60, "TxUnderRun"),
58         MIB_DESC(1, 0x64, "Tx64Byte"),
59         MIB_DESC(1, 0x68, "Tx128Byte"),
60         MIB_DESC(1, 0x6c, "Tx256Byte"),
61         MIB_DESC(1, 0x70, "Tx512Byte"),
62         MIB_DESC(1, 0x74, "Tx1024Byte"),
63         MIB_DESC(1, 0x78, "Tx1518Byte"),
64         MIB_DESC(1, 0x7c, "TxMaxByte"),
65         MIB_DESC(1, 0x80, "TxOverSize"),
66         MIB_DESC(2, 0x84, "TxByte"),
67         MIB_DESC(1, 0x8c, "TxCollision"),
68         MIB_DESC(1, 0x90, "TxAbortCol"),
69         MIB_DESC(1, 0x94, "TxMultiCol"),
70         MIB_DESC(1, 0x98, "TxSingleCol"),
71         MIB_DESC(1, 0x9c, "TxExcDefer"),
72         MIB_DESC(1, 0xa0, "TxDefer"),
73         MIB_DESC(1, 0xa4, "TxLateCol"),
74         MIB_DESC(1, 0xa8, "RXUnicast"),
75         MIB_DESC(1, 0xac, "TXUnicast"),
76 };
77
78 static void
79 qca8k_split_addr(u32 regaddr, u16 *r1, u16 *r2, u16 *page)
80 {
81         regaddr >>= 1;
82         *r1 = regaddr & 0x1e;
83
84         regaddr >>= 5;
85         *r2 = regaddr & 0x7;
86
87         regaddr >>= 3;
88         *page = regaddr & 0x3ff;
89 }
90
91 static int
92 qca8k_set_lo(struct qca8k_priv *priv, int phy_id, u32 regnum, u16 lo)
93 {
94         u16 *cached_lo = &priv->mdio_cache.lo;
95         struct mii_bus *bus = priv->bus;
96         int ret;
97
98         if (lo == *cached_lo)
99                 return 0;
100
101         ret = bus->write(bus, phy_id, regnum, lo);
102         if (ret < 0)
103                 dev_err_ratelimited(&bus->dev,
104                                     "failed to write qca8k 32bit lo register\n");
105
106         *cached_lo = lo;
107         return 0;
108 }
109
110 static int
111 qca8k_set_hi(struct qca8k_priv *priv, int phy_id, u32 regnum, u16 hi)
112 {
113         u16 *cached_hi = &priv->mdio_cache.hi;
114         struct mii_bus *bus = priv->bus;
115         int ret;
116
117         if (hi == *cached_hi)
118                 return 0;
119
120         ret = bus->write(bus, phy_id, regnum, hi);
121         if (ret < 0)
122                 dev_err_ratelimited(&bus->dev,
123                                     "failed to write qca8k 32bit hi register\n");
124
125         *cached_hi = hi;
126         return 0;
127 }
128
129 static int
130 qca8k_mii_read32(struct mii_bus *bus, int phy_id, u32 regnum, u32 *val)
131 {
132         int ret;
133
134         ret = bus->read(bus, phy_id, regnum);
135         if (ret >= 0) {
136                 *val = ret;
137                 ret = bus->read(bus, phy_id, regnum + 1);
138                 *val |= ret << 16;
139         }
140
141         if (ret < 0) {
142                 dev_err_ratelimited(&bus->dev,
143                                     "failed to read qca8k 32bit register\n");
144                 *val = 0;
145                 return ret;
146         }
147
148         return 0;
149 }
150
151 static void
152 qca8k_mii_write32(struct qca8k_priv *priv, int phy_id, u32 regnum, u32 val)
153 {
154         u16 lo, hi;
155         int ret;
156
157         lo = val & 0xffff;
158         hi = (u16)(val >> 16);
159
160         ret = qca8k_set_lo(priv, phy_id, regnum, lo);
161         if (ret >= 0)
162                 ret = qca8k_set_hi(priv, phy_id, regnum + 1, hi);
163 }
164
165 static int
166 qca8k_set_page(struct qca8k_priv *priv, u16 page)
167 {
168         u16 *cached_page = &priv->mdio_cache.page;
169         struct mii_bus *bus = priv->bus;
170         int ret;
171
172         if (page == *cached_page)
173                 return 0;
174
175         ret = bus->write(bus, 0x18, 0, page);
176         if (ret < 0) {
177                 dev_err_ratelimited(&bus->dev,
178                                     "failed to set qca8k page\n");
179                 return ret;
180         }
181
182         *cached_page = page;
183         usleep_range(1000, 2000);
184         return 0;
185 }
186
187 static int
188 qca8k_read(struct qca8k_priv *priv, u32 reg, u32 *val)
189 {
190         return regmap_read(priv->regmap, reg, val);
191 }
192
193 static int
194 qca8k_write(struct qca8k_priv *priv, u32 reg, u32 val)
195 {
196         return regmap_write(priv->regmap, reg, val);
197 }
198
199 static int
200 qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val)
201 {
202         return regmap_update_bits(priv->regmap, reg, mask, write_val);
203 }
204
205 static void qca8k_rw_reg_ack_handler(struct dsa_switch *ds, struct sk_buff *skb)
206 {
207         struct qca8k_mgmt_eth_data *mgmt_eth_data;
208         struct qca8k_priv *priv = ds->priv;
209         struct qca_mgmt_ethhdr *mgmt_ethhdr;
210         u8 len, cmd;
211
212         mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb_mac_header(skb);
213         mgmt_eth_data = &priv->mgmt_eth_data;
214
215         cmd = FIELD_GET(QCA_HDR_MGMT_CMD, mgmt_ethhdr->command);
216         len = FIELD_GET(QCA_HDR_MGMT_LENGTH, mgmt_ethhdr->command);
217
218         /* Make sure the seq match the requested packet */
219         if (mgmt_ethhdr->seq == mgmt_eth_data->seq)
220                 mgmt_eth_data->ack = true;
221
222         if (cmd == MDIO_READ) {
223                 mgmt_eth_data->data[0] = mgmt_ethhdr->mdio_data;
224
225                 /* Get the rest of the 12 byte of data.
226                  * The read/write function will extract the requested data.
227                  */
228                 if (len > QCA_HDR_MGMT_DATA1_LEN)
229                         memcpy(mgmt_eth_data->data + 1, skb->data,
230                                QCA_HDR_MGMT_DATA2_LEN);
231         }
232
233         complete(&mgmt_eth_data->rw_done);
234 }
235
236 static struct sk_buff *qca8k_alloc_mdio_header(enum mdio_cmd cmd, u32 reg, u32 *val,
237                                                int priority, unsigned int len)
238 {
239         struct qca_mgmt_ethhdr *mgmt_ethhdr;
240         unsigned int real_len;
241         struct sk_buff *skb;
242         u32 *data2;
243         u16 hdr;
244
245         skb = dev_alloc_skb(QCA_HDR_MGMT_PKT_LEN);
246         if (!skb)
247                 return NULL;
248
249         /* Max value for len reg is 15 (0xf) but the switch actually return 16 byte
250          * Actually for some reason the steps are:
251          * 0: nothing
252          * 1-4: first 4 byte
253          * 5-6: first 12 byte
254          * 7-15: all 16 byte
255          */
256         if (len == 16)
257                 real_len = 15;
258         else
259                 real_len = len;
260
261         skb_reset_mac_header(skb);
262         skb_set_network_header(skb, skb->len);
263
264         mgmt_ethhdr = skb_push(skb, QCA_HDR_MGMT_HEADER_LEN + QCA_HDR_LEN);
265
266         hdr = FIELD_PREP(QCA_HDR_XMIT_VERSION, QCA_HDR_VERSION);
267         hdr |= FIELD_PREP(QCA_HDR_XMIT_PRIORITY, priority);
268         hdr |= QCA_HDR_XMIT_FROM_CPU;
269         hdr |= FIELD_PREP(QCA_HDR_XMIT_DP_BIT, BIT(0));
270         hdr |= FIELD_PREP(QCA_HDR_XMIT_CONTROL, QCA_HDR_XMIT_TYPE_RW_REG);
271
272         mgmt_ethhdr->command = FIELD_PREP(QCA_HDR_MGMT_ADDR, reg);
273         mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_LENGTH, real_len);
274         mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_CMD, cmd);
275         mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_CHECK_CODE,
276                                            QCA_HDR_MGMT_CHECK_CODE_VAL);
277
278         if (cmd == MDIO_WRITE)
279                 mgmt_ethhdr->mdio_data = *val;
280
281         mgmt_ethhdr->hdr = htons(hdr);
282
283         data2 = skb_put_zero(skb, QCA_HDR_MGMT_DATA2_LEN + QCA_HDR_MGMT_PADDING_LEN);
284         if (cmd == MDIO_WRITE && len > QCA_HDR_MGMT_DATA1_LEN)
285                 memcpy(data2, val + 1, len - QCA_HDR_MGMT_DATA1_LEN);
286
287         return skb;
288 }
289
290 static void qca8k_mdio_header_fill_seq_num(struct sk_buff *skb, u32 seq_num)
291 {
292         struct qca_mgmt_ethhdr *mgmt_ethhdr;
293
294         mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb->data;
295         mgmt_ethhdr->seq = FIELD_PREP(QCA_HDR_MGMT_SEQ_NUM, seq_num);
296 }
297
298 static int qca8k_read_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
299 {
300         struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data;
301         struct sk_buff *skb;
302         bool ack;
303         int ret;
304
305         skb = qca8k_alloc_mdio_header(MDIO_READ, reg, NULL,
306                                       QCA8K_ETHERNET_MDIO_PRIORITY, len);
307         if (!skb)
308                 return -ENOMEM;
309
310         mutex_lock(&mgmt_eth_data->mutex);
311
312         /* Check mgmt_master if is operational */
313         if (!priv->mgmt_master) {
314                 kfree_skb(skb);
315                 mutex_unlock(&mgmt_eth_data->mutex);
316                 return -EINVAL;
317         }
318
319         skb->dev = priv->mgmt_master;
320
321         reinit_completion(&mgmt_eth_data->rw_done);
322
323         /* Increment seq_num and set it in the mdio pkt */
324         mgmt_eth_data->seq++;
325         qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
326         mgmt_eth_data->ack = false;
327
328         dev_queue_xmit(skb);
329
330         ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
331                                           msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT));
332
333         *val = mgmt_eth_data->data[0];
334         if (len > QCA_HDR_MGMT_DATA1_LEN)
335                 memcpy(val + 1, mgmt_eth_data->data + 1, len - QCA_HDR_MGMT_DATA1_LEN);
336
337         ack = mgmt_eth_data->ack;
338
339         mutex_unlock(&mgmt_eth_data->mutex);
340
341         if (ret <= 0)
342                 return -ETIMEDOUT;
343
344         if (!ack)
345                 return -EINVAL;
346
347         return 0;
348 }
349
350 static int qca8k_write_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
351 {
352         struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data;
353         struct sk_buff *skb;
354         bool ack;
355         int ret;
356
357         skb = qca8k_alloc_mdio_header(MDIO_WRITE, reg, val,
358                                       QCA8K_ETHERNET_MDIO_PRIORITY, len);
359         if (!skb)
360                 return -ENOMEM;
361
362         mutex_lock(&mgmt_eth_data->mutex);
363
364         /* Check mgmt_master if is operational */
365         if (!priv->mgmt_master) {
366                 kfree_skb(skb);
367                 mutex_unlock(&mgmt_eth_data->mutex);
368                 return -EINVAL;
369         }
370
371         skb->dev = priv->mgmt_master;
372
373         reinit_completion(&mgmt_eth_data->rw_done);
374
375         /* Increment seq_num and set it in the mdio pkt */
376         mgmt_eth_data->seq++;
377         qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
378         mgmt_eth_data->ack = false;
379
380         dev_queue_xmit(skb);
381
382         ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
383                                           msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT));
384
385         ack = mgmt_eth_data->ack;
386
387         mutex_unlock(&mgmt_eth_data->mutex);
388
389         if (ret <= 0)
390                 return -ETIMEDOUT;
391
392         if (!ack)
393                 return -EINVAL;
394
395         return 0;
396 }
397
398 static int
399 qca8k_regmap_update_bits_eth(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val)
400 {
401         u32 val = 0;
402         int ret;
403
404         ret = qca8k_read_eth(priv, reg, &val, sizeof(val));
405         if (ret)
406                 return ret;
407
408         val &= ~mask;
409         val |= write_val;
410
411         return qca8k_write_eth(priv, reg, &val, sizeof(val));
412 }
413
414 static int
415 qca8k_bulk_read(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
416 {
417         int i, count = len / sizeof(u32), ret;
418
419         if (priv->mgmt_master && !qca8k_read_eth(priv, reg, val, len))
420                 return 0;
421
422         for (i = 0; i < count; i++) {
423                 ret = regmap_read(priv->regmap, reg + (i * 4), val + i);
424                 if (ret < 0)
425                         return ret;
426         }
427
428         return 0;
429 }
430
431 static int
432 qca8k_bulk_write(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
433 {
434         int i, count = len / sizeof(u32), ret;
435         u32 tmp;
436
437         if (priv->mgmt_master && !qca8k_write_eth(priv, reg, val, len))
438                 return 0;
439
440         for (i = 0; i < count; i++) {
441                 tmp = val[i];
442
443                 ret = regmap_write(priv->regmap, reg + (i * 4), tmp);
444                 if (ret < 0)
445                         return ret;
446         }
447
448         return 0;
449 }
450
451 static int
452 qca8k_regmap_read(void *ctx, uint32_t reg, uint32_t *val)
453 {
454         struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
455         struct mii_bus *bus = priv->bus;
456         u16 r1, r2, page;
457         int ret;
458
459         if (!qca8k_read_eth(priv, reg, val, sizeof(*val)))
460                 return 0;
461
462         qca8k_split_addr(reg, &r1, &r2, &page);
463
464         mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
465
466         ret = qca8k_set_page(priv, page);
467         if (ret < 0)
468                 goto exit;
469
470         ret = qca8k_mii_read32(bus, 0x10 | r2, r1, val);
471
472 exit:
473         mutex_unlock(&bus->mdio_lock);
474         return ret;
475 }
476
477 static int
478 qca8k_regmap_write(void *ctx, uint32_t reg, uint32_t val)
479 {
480         struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
481         struct mii_bus *bus = priv->bus;
482         u16 r1, r2, page;
483         int ret;
484
485         if (!qca8k_write_eth(priv, reg, &val, sizeof(val)))
486                 return 0;
487
488         qca8k_split_addr(reg, &r1, &r2, &page);
489
490         mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
491
492         ret = qca8k_set_page(priv, page);
493         if (ret < 0)
494                 goto exit;
495
496         qca8k_mii_write32(priv, 0x10 | r2, r1, val);
497
498 exit:
499         mutex_unlock(&bus->mdio_lock);
500         return ret;
501 }
502
503 static int
504 qca8k_regmap_update_bits(void *ctx, uint32_t reg, uint32_t mask, uint32_t write_val)
505 {
506         struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
507         struct mii_bus *bus = priv->bus;
508         u16 r1, r2, page;
509         u32 val;
510         int ret;
511
512         if (!qca8k_regmap_update_bits_eth(priv, reg, mask, write_val))
513                 return 0;
514
515         qca8k_split_addr(reg, &r1, &r2, &page);
516
517         mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
518
519         ret = qca8k_set_page(priv, page);
520         if (ret < 0)
521                 goto exit;
522
523         ret = qca8k_mii_read32(bus, 0x10 | r2, r1, &val);
524         if (ret < 0)
525                 goto exit;
526
527         val &= ~mask;
528         val |= write_val;
529         qca8k_mii_write32(priv, 0x10 | r2, r1, val);
530
531 exit:
532         mutex_unlock(&bus->mdio_lock);
533
534         return ret;
535 }
536
537 static const struct regmap_range qca8k_readable_ranges[] = {
538         regmap_reg_range(0x0000, 0x00e4), /* Global control */
539         regmap_reg_range(0x0100, 0x0168), /* EEE control */
540         regmap_reg_range(0x0200, 0x0270), /* Parser control */
541         regmap_reg_range(0x0400, 0x0454), /* ACL */
542         regmap_reg_range(0x0600, 0x0718), /* Lookup */
543         regmap_reg_range(0x0800, 0x0b70), /* QM */
544         regmap_reg_range(0x0c00, 0x0c80), /* PKT */
545         regmap_reg_range(0x0e00, 0x0e98), /* L3 */
546         regmap_reg_range(0x1000, 0x10ac), /* MIB - Port0 */
547         regmap_reg_range(0x1100, 0x11ac), /* MIB - Port1 */
548         regmap_reg_range(0x1200, 0x12ac), /* MIB - Port2 */
549         regmap_reg_range(0x1300, 0x13ac), /* MIB - Port3 */
550         regmap_reg_range(0x1400, 0x14ac), /* MIB - Port4 */
551         regmap_reg_range(0x1500, 0x15ac), /* MIB - Port5 */
552         regmap_reg_range(0x1600, 0x16ac), /* MIB - Port6 */
553
554 };
555
556 static const struct regmap_access_table qca8k_readable_table = {
557         .yes_ranges = qca8k_readable_ranges,
558         .n_yes_ranges = ARRAY_SIZE(qca8k_readable_ranges),
559 };
560
561 static struct regmap_config qca8k_regmap_config = {
562         .reg_bits = 16,
563         .val_bits = 32,
564         .reg_stride = 4,
565         .max_register = 0x16ac, /* end MIB - Port6 range */
566         .reg_read = qca8k_regmap_read,
567         .reg_write = qca8k_regmap_write,
568         .reg_update_bits = qca8k_regmap_update_bits,
569         .rd_table = &qca8k_readable_table,
570         .disable_locking = true, /* Locking is handled by qca8k read/write */
571         .cache_type = REGCACHE_NONE, /* Explicitly disable CACHE */
572 };
573
574 static int
575 qca8k_busy_wait(struct qca8k_priv *priv, u32 reg, u32 mask)
576 {
577         u32 val;
578
579         return regmap_read_poll_timeout(priv->regmap, reg, val, !(val & mask), 0,
580                                        QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC);
581 }
582
583 static int
584 qca8k_fdb_read(struct qca8k_priv *priv, struct qca8k_fdb *fdb)
585 {
586         u32 reg[3];
587         int ret;
588
589         /* load the ARL table into an array */
590         ret = qca8k_bulk_read(priv, QCA8K_REG_ATU_DATA0, reg, sizeof(reg));
591         if (ret)
592                 return ret;
593
594         /* vid - 83:72 */
595         fdb->vid = FIELD_GET(QCA8K_ATU_VID_MASK, reg[2]);
596         /* aging - 67:64 */
597         fdb->aging = FIELD_GET(QCA8K_ATU_STATUS_MASK, reg[2]);
598         /* portmask - 54:48 */
599         fdb->port_mask = FIELD_GET(QCA8K_ATU_PORT_MASK, reg[1]);
600         /* mac - 47:0 */
601         fdb->mac[0] = FIELD_GET(QCA8K_ATU_ADDR0_MASK, reg[1]);
602         fdb->mac[1] = FIELD_GET(QCA8K_ATU_ADDR1_MASK, reg[1]);
603         fdb->mac[2] = FIELD_GET(QCA8K_ATU_ADDR2_MASK, reg[0]);
604         fdb->mac[3] = FIELD_GET(QCA8K_ATU_ADDR3_MASK, reg[0]);
605         fdb->mac[4] = FIELD_GET(QCA8K_ATU_ADDR4_MASK, reg[0]);
606         fdb->mac[5] = FIELD_GET(QCA8K_ATU_ADDR5_MASK, reg[0]);
607
608         return 0;
609 }
610
611 static void
612 qca8k_fdb_write(struct qca8k_priv *priv, u16 vid, u8 port_mask, const u8 *mac,
613                 u8 aging)
614 {
615         u32 reg[3] = { 0 };
616
617         /* vid - 83:72 */
618         reg[2] = FIELD_PREP(QCA8K_ATU_VID_MASK, vid);
619         /* aging - 67:64 */
620         reg[2] |= FIELD_PREP(QCA8K_ATU_STATUS_MASK, aging);
621         /* portmask - 54:48 */
622         reg[1] = FIELD_PREP(QCA8K_ATU_PORT_MASK, port_mask);
623         /* mac - 47:0 */
624         reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR0_MASK, mac[0]);
625         reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR1_MASK, mac[1]);
626         reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR2_MASK, mac[2]);
627         reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR3_MASK, mac[3]);
628         reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR4_MASK, mac[4]);
629         reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR5_MASK, mac[5]);
630
631         /* load the array into the ARL table */
632         qca8k_bulk_write(priv, QCA8K_REG_ATU_DATA0, reg, sizeof(reg));
633 }
634
635 static int
636 qca8k_fdb_access(struct qca8k_priv *priv, enum qca8k_fdb_cmd cmd, int port)
637 {
638         u32 reg;
639         int ret;
640
641         /* Set the command and FDB index */
642         reg = QCA8K_ATU_FUNC_BUSY;
643         reg |= cmd;
644         if (port >= 0) {
645                 reg |= QCA8K_ATU_FUNC_PORT_EN;
646                 reg |= FIELD_PREP(QCA8K_ATU_FUNC_PORT_MASK, port);
647         }
648
649         /* Write the function register triggering the table access */
650         ret = qca8k_write(priv, QCA8K_REG_ATU_FUNC, reg);
651         if (ret)
652                 return ret;
653
654         /* wait for completion */
655         ret = qca8k_busy_wait(priv, QCA8K_REG_ATU_FUNC, QCA8K_ATU_FUNC_BUSY);
656         if (ret)
657                 return ret;
658
659         /* Check for table full violation when adding an entry */
660         if (cmd == QCA8K_FDB_LOAD) {
661                 ret = qca8k_read(priv, QCA8K_REG_ATU_FUNC, &reg);
662                 if (ret < 0)
663                         return ret;
664                 if (reg & QCA8K_ATU_FUNC_FULL)
665                         return -1;
666         }
667
668         return 0;
669 }
670
671 static int
672 qca8k_fdb_next(struct qca8k_priv *priv, struct qca8k_fdb *fdb, int port)
673 {
674         int ret;
675
676         qca8k_fdb_write(priv, fdb->vid, fdb->port_mask, fdb->mac, fdb->aging);
677         ret = qca8k_fdb_access(priv, QCA8K_FDB_NEXT, port);
678         if (ret < 0)
679                 return ret;
680
681         return qca8k_fdb_read(priv, fdb);
682 }
683
684 static int
685 qca8k_fdb_add(struct qca8k_priv *priv, const u8 *mac, u16 port_mask,
686               u16 vid, u8 aging)
687 {
688         int ret;
689
690         mutex_lock(&priv->reg_mutex);
691         qca8k_fdb_write(priv, vid, port_mask, mac, aging);
692         ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
693         mutex_unlock(&priv->reg_mutex);
694
695         return ret;
696 }
697
698 static int
699 qca8k_fdb_del(struct qca8k_priv *priv, const u8 *mac, u16 port_mask, u16 vid)
700 {
701         int ret;
702
703         mutex_lock(&priv->reg_mutex);
704         qca8k_fdb_write(priv, vid, port_mask, mac, 0);
705         ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
706         mutex_unlock(&priv->reg_mutex);
707
708         return ret;
709 }
710
711 static void
712 qca8k_fdb_flush(struct qca8k_priv *priv)
713 {
714         mutex_lock(&priv->reg_mutex);
715         qca8k_fdb_access(priv, QCA8K_FDB_FLUSH, -1);
716         mutex_unlock(&priv->reg_mutex);
717 }
718
719 static int
720 qca8k_fdb_search_and_insert(struct qca8k_priv *priv, u8 port_mask,
721                             const u8 *mac, u16 vid)
722 {
723         struct qca8k_fdb fdb = { 0 };
724         int ret;
725
726         mutex_lock(&priv->reg_mutex);
727
728         qca8k_fdb_write(priv, vid, 0, mac, 0);
729         ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1);
730         if (ret < 0)
731                 goto exit;
732
733         ret = qca8k_fdb_read(priv, &fdb);
734         if (ret < 0)
735                 goto exit;
736
737         /* Rule exist. Delete first */
738         if (!fdb.aging) {
739                 ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
740                 if (ret)
741                         goto exit;
742         }
743
744         /* Add port to fdb portmask */
745         fdb.port_mask |= port_mask;
746
747         qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging);
748         ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
749
750 exit:
751         mutex_unlock(&priv->reg_mutex);
752         return ret;
753 }
754
755 static int
756 qca8k_fdb_search_and_del(struct qca8k_priv *priv, u8 port_mask,
757                          const u8 *mac, u16 vid)
758 {
759         struct qca8k_fdb fdb = { 0 };
760         int ret;
761
762         mutex_lock(&priv->reg_mutex);
763
764         qca8k_fdb_write(priv, vid, 0, mac, 0);
765         ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1);
766         if (ret < 0)
767                 goto exit;
768
769         /* Rule doesn't exist. Why delete? */
770         if (!fdb.aging) {
771                 ret = -EINVAL;
772                 goto exit;
773         }
774
775         ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
776         if (ret)
777                 goto exit;
778
779         /* Only port in the rule is this port. Don't re insert */
780         if (fdb.port_mask == port_mask)
781                 goto exit;
782
783         /* Remove port from port mask */
784         fdb.port_mask &= ~port_mask;
785
786         qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging);
787         ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
788
789 exit:
790         mutex_unlock(&priv->reg_mutex);
791         return ret;
792 }
793
794 static int
795 qca8k_vlan_access(struct qca8k_priv *priv, enum qca8k_vlan_cmd cmd, u16 vid)
796 {
797         u32 reg;
798         int ret;
799
800         /* Set the command and VLAN index */
801         reg = QCA8K_VTU_FUNC1_BUSY;
802         reg |= cmd;
803         reg |= FIELD_PREP(QCA8K_VTU_FUNC1_VID_MASK, vid);
804
805         /* Write the function register triggering the table access */
806         ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC1, reg);
807         if (ret)
808                 return ret;
809
810         /* wait for completion */
811         ret = qca8k_busy_wait(priv, QCA8K_REG_VTU_FUNC1, QCA8K_VTU_FUNC1_BUSY);
812         if (ret)
813                 return ret;
814
815         /* Check for table full violation when adding an entry */
816         if (cmd == QCA8K_VLAN_LOAD) {
817                 ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC1, &reg);
818                 if (ret < 0)
819                         return ret;
820                 if (reg & QCA8K_VTU_FUNC1_FULL)
821                         return -ENOMEM;
822         }
823
824         return 0;
825 }
826
827 static int
828 qca8k_vlan_add(struct qca8k_priv *priv, u8 port, u16 vid, bool untagged)
829 {
830         u32 reg;
831         int ret;
832
833         /*
834            We do the right thing with VLAN 0 and treat it as untagged while
835            preserving the tag on egress.
836          */
837         if (vid == 0)
838                 return 0;
839
840         mutex_lock(&priv->reg_mutex);
841         ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid);
842         if (ret < 0)
843                 goto out;
844
845         ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, &reg);
846         if (ret < 0)
847                 goto out;
848         reg |= QCA8K_VTU_FUNC0_VALID | QCA8K_VTU_FUNC0_IVL_EN;
849         reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port);
850         if (untagged)
851                 reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_UNTAG(port);
852         else
853                 reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_TAG(port);
854
855         ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg);
856         if (ret)
857                 goto out;
858         ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid);
859
860 out:
861         mutex_unlock(&priv->reg_mutex);
862
863         return ret;
864 }
865
866 static int
867 qca8k_vlan_del(struct qca8k_priv *priv, u8 port, u16 vid)
868 {
869         u32 reg, mask;
870         int ret, i;
871         bool del;
872
873         mutex_lock(&priv->reg_mutex);
874         ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid);
875         if (ret < 0)
876                 goto out;
877
878         ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, &reg);
879         if (ret < 0)
880                 goto out;
881         reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port);
882         reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(port);
883
884         /* Check if we're the last member to be removed */
885         del = true;
886         for (i = 0; i < QCA8K_NUM_PORTS; i++) {
887                 mask = QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(i);
888
889                 if ((reg & mask) != mask) {
890                         del = false;
891                         break;
892                 }
893         }
894
895         if (del) {
896                 ret = qca8k_vlan_access(priv, QCA8K_VLAN_PURGE, vid);
897         } else {
898                 ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg);
899                 if (ret)
900                         goto out;
901                 ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid);
902         }
903
904 out:
905         mutex_unlock(&priv->reg_mutex);
906
907         return ret;
908 }
909
910 static int
911 qca8k_mib_init(struct qca8k_priv *priv)
912 {
913         int ret;
914
915         mutex_lock(&priv->reg_mutex);
916         ret = regmap_update_bits(priv->regmap, QCA8K_REG_MIB,
917                                  QCA8K_MIB_FUNC | QCA8K_MIB_BUSY,
918                                  FIELD_PREP(QCA8K_MIB_FUNC, QCA8K_MIB_FLUSH) |
919                                  QCA8K_MIB_BUSY);
920         if (ret)
921                 goto exit;
922
923         ret = qca8k_busy_wait(priv, QCA8K_REG_MIB, QCA8K_MIB_BUSY);
924         if (ret)
925                 goto exit;
926
927         ret = regmap_set_bits(priv->regmap, QCA8K_REG_MIB, QCA8K_MIB_CPU_KEEP);
928         if (ret)
929                 goto exit;
930
931         ret = qca8k_write(priv, QCA8K_REG_MODULE_EN, QCA8K_MODULE_EN_MIB);
932
933 exit:
934         mutex_unlock(&priv->reg_mutex);
935         return ret;
936 }
937
938 static void
939 qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable)
940 {
941         u32 mask = QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC;
942
943         /* Port 0 and 6 have no internal PHY */
944         if (port > 0 && port < 6)
945                 mask |= QCA8K_PORT_STATUS_LINK_AUTO;
946
947         if (enable)
948                 regmap_set_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask);
949         else
950                 regmap_clear_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask);
951 }
952
953 static int
954 qca8k_phy_eth_busy_wait(struct qca8k_mgmt_eth_data *mgmt_eth_data,
955                         struct sk_buff *read_skb, u32 *val)
956 {
957         struct sk_buff *skb = skb_copy(read_skb, GFP_KERNEL);
958         bool ack;
959         int ret;
960
961         reinit_completion(&mgmt_eth_data->rw_done);
962
963         /* Increment seq_num and set it in the copy pkt */
964         mgmt_eth_data->seq++;
965         qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
966         mgmt_eth_data->ack = false;
967
968         dev_queue_xmit(skb);
969
970         ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
971                                           QCA8K_ETHERNET_TIMEOUT);
972
973         ack = mgmt_eth_data->ack;
974
975         if (ret <= 0)
976                 return -ETIMEDOUT;
977
978         if (!ack)
979                 return -EINVAL;
980
981         *val = mgmt_eth_data->data[0];
982
983         return 0;
984 }
985
986 static int
987 qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
988                       int regnum, u16 data)
989 {
990         struct sk_buff *write_skb, *clear_skb, *read_skb;
991         struct qca8k_mgmt_eth_data *mgmt_eth_data;
992         u32 write_val, clear_val = 0, val;
993         struct net_device *mgmt_master;
994         int ret, ret1;
995         bool ack;
996
997         if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
998                 return -EINVAL;
999
1000         mgmt_eth_data = &priv->mgmt_eth_data;
1001
1002         write_val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
1003                     QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
1004                     QCA8K_MDIO_MASTER_REG_ADDR(regnum);
1005
1006         if (read) {
1007                 write_val |= QCA8K_MDIO_MASTER_READ;
1008         } else {
1009                 write_val |= QCA8K_MDIO_MASTER_WRITE;
1010                 write_val |= QCA8K_MDIO_MASTER_DATA(data);
1011         }
1012
1013         /* Prealloc all the needed skb before the lock */
1014         write_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, &write_val,
1015                                             QCA8K_ETHERNET_PHY_PRIORITY, sizeof(write_val));
1016         if (!write_skb)
1017                 return -ENOMEM;
1018
1019         clear_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, &clear_val,
1020                                             QCA8K_ETHERNET_PHY_PRIORITY, sizeof(clear_val));
1021         if (!clear_skb) {
1022                 ret = -ENOMEM;
1023                 goto err_clear_skb;
1024         }
1025
1026         read_skb = qca8k_alloc_mdio_header(MDIO_READ, QCA8K_MDIO_MASTER_CTRL, &clear_val,
1027                                            QCA8K_ETHERNET_PHY_PRIORITY, sizeof(clear_val));
1028         if (!read_skb) {
1029                 ret = -ENOMEM;
1030                 goto err_read_skb;
1031         }
1032
1033         /* Actually start the request:
1034          * 1. Send mdio master packet
1035          * 2. Busy Wait for mdio master command
1036          * 3. Get the data if we are reading
1037          * 4. Reset the mdio master (even with error)
1038          */
1039         mutex_lock(&mgmt_eth_data->mutex);
1040
1041         /* Check if mgmt_master is operational */
1042         mgmt_master = priv->mgmt_master;
1043         if (!mgmt_master) {
1044                 mutex_unlock(&mgmt_eth_data->mutex);
1045                 ret = -EINVAL;
1046                 goto err_mgmt_master;
1047         }
1048
1049         read_skb->dev = mgmt_master;
1050         clear_skb->dev = mgmt_master;
1051         write_skb->dev = mgmt_master;
1052
1053         reinit_completion(&mgmt_eth_data->rw_done);
1054
1055         /* Increment seq_num and set it in the write pkt */
1056         mgmt_eth_data->seq++;
1057         qca8k_mdio_header_fill_seq_num(write_skb, mgmt_eth_data->seq);
1058         mgmt_eth_data->ack = false;
1059
1060         dev_queue_xmit(write_skb);
1061
1062         ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
1063                                           QCA8K_ETHERNET_TIMEOUT);
1064
1065         ack = mgmt_eth_data->ack;
1066
1067         if (ret <= 0) {
1068                 ret = -ETIMEDOUT;
1069                 kfree_skb(read_skb);
1070                 goto exit;
1071         }
1072
1073         if (!ack) {
1074                 ret = -EINVAL;
1075                 kfree_skb(read_skb);
1076                 goto exit;
1077         }
1078
1079         ret = read_poll_timeout(qca8k_phy_eth_busy_wait, ret1,
1080                                 !(val & QCA8K_MDIO_MASTER_BUSY), 0,
1081                                 QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false,
1082                                 mgmt_eth_data, read_skb, &val);
1083
1084         if (ret < 0 && ret1 < 0) {
1085                 ret = ret1;
1086                 goto exit;
1087         }
1088
1089         if (read) {
1090                 reinit_completion(&mgmt_eth_data->rw_done);
1091
1092                 /* Increment seq_num and set it in the read pkt */
1093                 mgmt_eth_data->seq++;
1094                 qca8k_mdio_header_fill_seq_num(read_skb, mgmt_eth_data->seq);
1095                 mgmt_eth_data->ack = false;
1096
1097                 dev_queue_xmit(read_skb);
1098
1099                 ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
1100                                                   QCA8K_ETHERNET_TIMEOUT);
1101
1102                 ack = mgmt_eth_data->ack;
1103
1104                 if (ret <= 0) {
1105                         ret = -ETIMEDOUT;
1106                         goto exit;
1107                 }
1108
1109                 if (!ack) {
1110                         ret = -EINVAL;
1111                         goto exit;
1112                 }
1113
1114                 ret = mgmt_eth_data->data[0] & QCA8K_MDIO_MASTER_DATA_MASK;
1115         } else {
1116                 kfree_skb(read_skb);
1117         }
1118 exit:
1119         reinit_completion(&mgmt_eth_data->rw_done);
1120
1121         /* Increment seq_num and set it in the clear pkt */
1122         mgmt_eth_data->seq++;
1123         qca8k_mdio_header_fill_seq_num(clear_skb, mgmt_eth_data->seq);
1124         mgmt_eth_data->ack = false;
1125
1126         dev_queue_xmit(clear_skb);
1127
1128         wait_for_completion_timeout(&mgmt_eth_data->rw_done,
1129                                     QCA8K_ETHERNET_TIMEOUT);
1130
1131         mutex_unlock(&mgmt_eth_data->mutex);
1132
1133         return ret;
1134
1135         /* Error handling before lock */
1136 err_mgmt_master:
1137         kfree_skb(read_skb);
1138 err_read_skb:
1139         kfree_skb(clear_skb);
1140 err_clear_skb:
1141         kfree_skb(write_skb);
1142
1143         return ret;
1144 }
1145
1146 static u32
1147 qca8k_port_to_phy(int port)
1148 {
1149         /* From Andrew Lunn:
1150          * Port 0 has no internal phy.
1151          * Port 1 has an internal PHY at MDIO address 0.
1152          * Port 2 has an internal PHY at MDIO address 1.
1153          * ...
1154          * Port 5 has an internal PHY at MDIO address 4.
1155          * Port 6 has no internal PHY.
1156          */
1157
1158         return port - 1;
1159 }
1160
1161 static int
1162 qca8k_mdio_busy_wait(struct mii_bus *bus, u32 reg, u32 mask)
1163 {
1164         u16 r1, r2, page;
1165         u32 val;
1166         int ret, ret1;
1167
1168         qca8k_split_addr(reg, &r1, &r2, &page);
1169
1170         ret = read_poll_timeout(qca8k_mii_read32, ret1, !(val & mask), 0,
1171                                 QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false,
1172                                 bus, 0x10 | r2, r1, &val);
1173
1174         /* Check if qca8k_read has failed for a different reason
1175          * before returnting -ETIMEDOUT
1176          */
1177         if (ret < 0 && ret1 < 0)
1178                 return ret1;
1179
1180         return ret;
1181 }
1182
1183 static int
1184 qca8k_mdio_write(struct qca8k_priv *priv, int phy, int regnum, u16 data)
1185 {
1186         struct mii_bus *bus = priv->bus;
1187         u16 r1, r2, page;
1188         u32 val;
1189         int ret;
1190
1191         if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
1192                 return -EINVAL;
1193
1194         val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
1195               QCA8K_MDIO_MASTER_WRITE | QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
1196               QCA8K_MDIO_MASTER_REG_ADDR(regnum) |
1197               QCA8K_MDIO_MASTER_DATA(data);
1198
1199         qca8k_split_addr(QCA8K_MDIO_MASTER_CTRL, &r1, &r2, &page);
1200
1201         mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
1202
1203         ret = qca8k_set_page(priv, page);
1204         if (ret)
1205                 goto exit;
1206
1207         qca8k_mii_write32(priv, 0x10 | r2, r1, val);
1208
1209         ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL,
1210                                    QCA8K_MDIO_MASTER_BUSY);
1211
1212 exit:
1213         /* even if the busy_wait timeouts try to clear the MASTER_EN */
1214         qca8k_mii_write32(priv, 0x10 | r2, r1, 0);
1215
1216         mutex_unlock(&bus->mdio_lock);
1217
1218         return ret;
1219 }
1220
1221 static int
1222 qca8k_mdio_read(struct qca8k_priv *priv, int phy, int regnum)
1223 {
1224         struct mii_bus *bus = priv->bus;
1225         u16 r1, r2, page;
1226         u32 val;
1227         int ret;
1228
1229         if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
1230                 return -EINVAL;
1231
1232         val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
1233               QCA8K_MDIO_MASTER_READ | QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
1234               QCA8K_MDIO_MASTER_REG_ADDR(regnum);
1235
1236         qca8k_split_addr(QCA8K_MDIO_MASTER_CTRL, &r1, &r2, &page);
1237
1238         mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
1239
1240         ret = qca8k_set_page(priv, page);
1241         if (ret)
1242                 goto exit;
1243
1244         qca8k_mii_write32(priv, 0x10 | r2, r1, val);
1245
1246         ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL,
1247                                    QCA8K_MDIO_MASTER_BUSY);
1248         if (ret)
1249                 goto exit;
1250
1251         ret = qca8k_mii_read32(bus, 0x10 | r2, r1, &val);
1252
1253 exit:
1254         /* even if the busy_wait timeouts try to clear the MASTER_EN */
1255         qca8k_mii_write32(priv, 0x10 | r2, r1, 0);
1256
1257         mutex_unlock(&bus->mdio_lock);
1258
1259         if (ret >= 0)
1260                 ret = val & QCA8K_MDIO_MASTER_DATA_MASK;
1261
1262         return ret;
1263 }
1264
1265 static int
1266 qca8k_internal_mdio_write(struct mii_bus *slave_bus, int phy, int regnum, u16 data)
1267 {
1268         struct qca8k_priv *priv = slave_bus->priv;
1269         int ret;
1270
1271         /* Use mdio Ethernet when available, fallback to legacy one on error */
1272         ret = qca8k_phy_eth_command(priv, false, phy, regnum, data);
1273         if (!ret)
1274                 return 0;
1275
1276         return qca8k_mdio_write(priv, phy, regnum, data);
1277 }
1278
1279 static int
1280 qca8k_internal_mdio_read(struct mii_bus *slave_bus, int phy, int regnum)
1281 {
1282         struct qca8k_priv *priv = slave_bus->priv;
1283         int ret;
1284
1285         /* Use mdio Ethernet when available, fallback to legacy one on error */
1286         ret = qca8k_phy_eth_command(priv, true, phy, regnum, 0);
1287         if (ret >= 0)
1288                 return ret;
1289
1290         ret = qca8k_mdio_read(priv, phy, regnum);
1291
1292         if (ret < 0)
1293                 return 0xffff;
1294
1295         return ret;
1296 }
1297
1298 static int
1299 qca8k_legacy_mdio_write(struct mii_bus *slave_bus, int port, int regnum, u16 data)
1300 {
1301         port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
1302
1303         return qca8k_internal_mdio_write(slave_bus, port, regnum, data);
1304 }
1305
1306 static int
1307 qca8k_legacy_mdio_read(struct mii_bus *slave_bus, int port, int regnum)
1308 {
1309         port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
1310
1311         return qca8k_internal_mdio_read(slave_bus, port, regnum);
1312 }
1313
1314 static int
1315 qca8k_mdio_register(struct qca8k_priv *priv)
1316 {
1317         struct dsa_switch *ds = priv->ds;
1318         struct device_node *mdio;
1319         struct mii_bus *bus;
1320
1321         bus = devm_mdiobus_alloc(ds->dev);
1322         if (!bus)
1323                 return -ENOMEM;
1324
1325         bus->priv = (void *)priv;
1326         snprintf(bus->id, MII_BUS_ID_SIZE, "qca8k-%d.%d",
1327                  ds->dst->index, ds->index);
1328         bus->parent = ds->dev;
1329         bus->phy_mask = ~ds->phys_mii_mask;
1330         ds->slave_mii_bus = bus;
1331
1332         /* Check if the devicetree declare the port:phy mapping */
1333         mdio = of_get_child_by_name(priv->dev->of_node, "mdio");
1334         if (of_device_is_available(mdio)) {
1335                 bus->name = "qca8k slave mii";
1336                 bus->read = qca8k_internal_mdio_read;
1337                 bus->write = qca8k_internal_mdio_write;
1338                 return devm_of_mdiobus_register(priv->dev, bus, mdio);
1339         }
1340
1341         /* If a mapping can't be found the legacy mapping is used,
1342          * using the qca8k_port_to_phy function
1343          */
1344         bus->name = "qca8k-legacy slave mii";
1345         bus->read = qca8k_legacy_mdio_read;
1346         bus->write = qca8k_legacy_mdio_write;
1347         return devm_mdiobus_register(priv->dev, bus);
1348 }
1349
1350 static int
1351 qca8k_setup_mdio_bus(struct qca8k_priv *priv)
1352 {
1353         u32 internal_mdio_mask = 0, external_mdio_mask = 0, reg;
1354         struct device_node *ports, *port;
1355         phy_interface_t mode;
1356         int err;
1357
1358         ports = of_get_child_by_name(priv->dev->of_node, "ports");
1359         if (!ports)
1360                 ports = of_get_child_by_name(priv->dev->of_node, "ethernet-ports");
1361
1362         if (!ports)
1363                 return -EINVAL;
1364
1365         for_each_available_child_of_node(ports, port) {
1366                 err = of_property_read_u32(port, "reg", &reg);
1367                 if (err) {
1368                         of_node_put(port);
1369                         of_node_put(ports);
1370                         return err;
1371                 }
1372
1373                 if (!dsa_is_user_port(priv->ds, reg))
1374                         continue;
1375
1376                 of_get_phy_mode(port, &mode);
1377
1378                 if (of_property_read_bool(port, "phy-handle") &&
1379                     mode != PHY_INTERFACE_MODE_INTERNAL)
1380                         external_mdio_mask |= BIT(reg);
1381                 else
1382                         internal_mdio_mask |= BIT(reg);
1383         }
1384
1385         of_node_put(ports);
1386         if (!external_mdio_mask && !internal_mdio_mask) {
1387                 dev_err(priv->dev, "no PHYs are defined.\n");
1388                 return -EINVAL;
1389         }
1390
1391         /* The QCA8K_MDIO_MASTER_EN Bit, which grants access to PHYs through
1392          * the MDIO_MASTER register also _disconnects_ the external MDC
1393          * passthrough to the internal PHYs. It's not possible to use both
1394          * configurations at the same time!
1395          *
1396          * Because this came up during the review process:
1397          * If the external mdio-bus driver is capable magically disabling
1398          * the QCA8K_MDIO_MASTER_EN and mutex/spin-locking out the qca8k's
1399          * accessors for the time being, it would be possible to pull this
1400          * off.
1401          */
1402         if (!!external_mdio_mask && !!internal_mdio_mask) {
1403                 dev_err(priv->dev, "either internal or external mdio bus configuration is supported.\n");
1404                 return -EINVAL;
1405         }
1406
1407         if (external_mdio_mask) {
1408                 /* Make sure to disable the internal mdio bus in cases
1409                  * a dt-overlay and driver reload changed the configuration
1410                  */
1411
1412                 return regmap_clear_bits(priv->regmap, QCA8K_MDIO_MASTER_CTRL,
1413                                          QCA8K_MDIO_MASTER_EN);
1414         }
1415
1416         return qca8k_mdio_register(priv);
1417 }
1418
1419 static int
1420 qca8k_setup_mac_pwr_sel(struct qca8k_priv *priv)
1421 {
1422         u32 mask = 0;
1423         int ret = 0;
1424
1425         /* SoC specific settings for ipq8064.
1426          * If more device require this consider adding
1427          * a dedicated binding.
1428          */
1429         if (of_machine_is_compatible("qcom,ipq8064"))
1430                 mask |= QCA8K_MAC_PWR_RGMII0_1_8V;
1431
1432         /* SoC specific settings for ipq8065 */
1433         if (of_machine_is_compatible("qcom,ipq8065"))
1434                 mask |= QCA8K_MAC_PWR_RGMII1_1_8V;
1435
1436         if (mask) {
1437                 ret = qca8k_rmw(priv, QCA8K_REG_MAC_PWR_SEL,
1438                                 QCA8K_MAC_PWR_RGMII0_1_8V |
1439                                 QCA8K_MAC_PWR_RGMII1_1_8V,
1440                                 mask);
1441         }
1442
1443         return ret;
1444 }
1445
1446 static int qca8k_find_cpu_port(struct dsa_switch *ds)
1447 {
1448         struct qca8k_priv *priv = ds->priv;
1449
1450         /* Find the connected cpu port. Valid port are 0 or 6 */
1451         if (dsa_is_cpu_port(ds, 0))
1452                 return 0;
1453
1454         dev_dbg(priv->dev, "port 0 is not the CPU port. Checking port 6");
1455
1456         if (dsa_is_cpu_port(ds, 6))
1457                 return 6;
1458
1459         return -EINVAL;
1460 }
1461
1462 static int
1463 qca8k_setup_of_pws_reg(struct qca8k_priv *priv)
1464 {
1465         struct device_node *node = priv->dev->of_node;
1466         const struct qca8k_match_data *data;
1467         u32 val = 0;
1468         int ret;
1469
1470         /* QCA8327 require to set to the correct mode.
1471          * His bigger brother QCA8328 have the 172 pin layout.
1472          * Should be applied by default but we set this just to make sure.
1473          */
1474         if (priv->switch_id == QCA8K_ID_QCA8327) {
1475                 data = of_device_get_match_data(priv->dev);
1476
1477                 /* Set the correct package of 148 pin for QCA8327 */
1478                 if (data->reduced_package)
1479                         val |= QCA8327_PWS_PACKAGE148_EN;
1480
1481                 ret = qca8k_rmw(priv, QCA8K_REG_PWS, QCA8327_PWS_PACKAGE148_EN,
1482                                 val);
1483                 if (ret)
1484                         return ret;
1485         }
1486
1487         if (of_property_read_bool(node, "qca,ignore-power-on-sel"))
1488                 val |= QCA8K_PWS_POWER_ON_SEL;
1489
1490         if (of_property_read_bool(node, "qca,led-open-drain")) {
1491                 if (!(val & QCA8K_PWS_POWER_ON_SEL)) {
1492                         dev_err(priv->dev, "qca,led-open-drain require qca,ignore-power-on-sel to be set.");
1493                         return -EINVAL;
1494                 }
1495
1496                 val |= QCA8K_PWS_LED_OPEN_EN_CSR;
1497         }
1498
1499         return qca8k_rmw(priv, QCA8K_REG_PWS,
1500                         QCA8K_PWS_LED_OPEN_EN_CSR | QCA8K_PWS_POWER_ON_SEL,
1501                         val);
1502 }
1503
1504 static int
1505 qca8k_parse_port_config(struct qca8k_priv *priv)
1506 {
1507         int port, cpu_port_index = -1, ret;
1508         struct device_node *port_dn;
1509         phy_interface_t mode;
1510         struct dsa_port *dp;
1511         u32 delay;
1512
1513         /* We have 2 CPU port. Check them */
1514         for (port = 0; port < QCA8K_NUM_PORTS; port++) {
1515                 /* Skip every other port */
1516                 if (port != 0 && port != 6)
1517                         continue;
1518
1519                 dp = dsa_to_port(priv->ds, port);
1520                 port_dn = dp->dn;
1521                 cpu_port_index++;
1522
1523                 if (!of_device_is_available(port_dn))
1524                         continue;
1525
1526                 ret = of_get_phy_mode(port_dn, &mode);
1527                 if (ret)
1528                         continue;
1529
1530                 switch (mode) {
1531                 case PHY_INTERFACE_MODE_RGMII:
1532                 case PHY_INTERFACE_MODE_RGMII_ID:
1533                 case PHY_INTERFACE_MODE_RGMII_TXID:
1534                 case PHY_INTERFACE_MODE_RGMII_RXID:
1535                 case PHY_INTERFACE_MODE_SGMII:
1536                         delay = 0;
1537
1538                         if (!of_property_read_u32(port_dn, "tx-internal-delay-ps", &delay))
1539                                 /* Switch regs accept value in ns, convert ps to ns */
1540                                 delay = delay / 1000;
1541                         else if (mode == PHY_INTERFACE_MODE_RGMII_ID ||
1542                                  mode == PHY_INTERFACE_MODE_RGMII_TXID)
1543                                 delay = 1;
1544
1545                         if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK, delay)) {
1546                                 dev_err(priv->dev, "rgmii tx delay is limited to a max value of 3ns, setting to the max value");
1547                                 delay = 3;
1548                         }
1549
1550                         priv->ports_config.rgmii_tx_delay[cpu_port_index] = delay;
1551
1552                         delay = 0;
1553
1554                         if (!of_property_read_u32(port_dn, "rx-internal-delay-ps", &delay))
1555                                 /* Switch regs accept value in ns, convert ps to ns */
1556                                 delay = delay / 1000;
1557                         else if (mode == PHY_INTERFACE_MODE_RGMII_ID ||
1558                                  mode == PHY_INTERFACE_MODE_RGMII_RXID)
1559                                 delay = 2;
1560
1561                         if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK, delay)) {
1562                                 dev_err(priv->dev, "rgmii rx delay is limited to a max value of 3ns, setting to the max value");
1563                                 delay = 3;
1564                         }
1565
1566                         priv->ports_config.rgmii_rx_delay[cpu_port_index] = delay;
1567
1568                         /* Skip sgmii parsing for rgmii* mode */
1569                         if (mode == PHY_INTERFACE_MODE_RGMII ||
1570                             mode == PHY_INTERFACE_MODE_RGMII_ID ||
1571                             mode == PHY_INTERFACE_MODE_RGMII_TXID ||
1572                             mode == PHY_INTERFACE_MODE_RGMII_RXID)
1573                                 break;
1574
1575                         if (of_property_read_bool(port_dn, "qca,sgmii-txclk-falling-edge"))
1576                                 priv->ports_config.sgmii_tx_clk_falling_edge = true;
1577
1578                         if (of_property_read_bool(port_dn, "qca,sgmii-rxclk-falling-edge"))
1579                                 priv->ports_config.sgmii_rx_clk_falling_edge = true;
1580
1581                         if (of_property_read_bool(port_dn, "qca,sgmii-enable-pll")) {
1582                                 priv->ports_config.sgmii_enable_pll = true;
1583
1584                                 if (priv->switch_id == QCA8K_ID_QCA8327) {
1585                                         dev_err(priv->dev, "SGMII PLL should NOT be enabled for qca8327. Aborting enabling");
1586                                         priv->ports_config.sgmii_enable_pll = false;
1587                                 }
1588
1589                                 if (priv->switch_revision < 2)
1590                                         dev_warn(priv->dev, "SGMII PLL should NOT be enabled for qca8337 with revision 2 or more.");
1591                         }
1592
1593                         break;
1594                 default:
1595                         continue;
1596                 }
1597         }
1598
1599         return 0;
1600 }
1601
1602 static void
1603 qca8k_mac_config_setup_internal_delay(struct qca8k_priv *priv, int cpu_port_index,
1604                                       u32 reg)
1605 {
1606         u32 delay, val = 0;
1607         int ret;
1608
1609         /* Delay can be declared in 3 different way.
1610          * Mode to rgmii and internal-delay standard binding defined
1611          * rgmii-id or rgmii-tx/rx phy mode set.
1612          * The parse logic set a delay different than 0 only when one
1613          * of the 3 different way is used. In all other case delay is
1614          * not enabled. With ID or TX/RXID delay is enabled and set
1615          * to the default and recommended value.
1616          */
1617         if (priv->ports_config.rgmii_tx_delay[cpu_port_index]) {
1618                 delay = priv->ports_config.rgmii_tx_delay[cpu_port_index];
1619
1620                 val |= QCA8K_PORT_PAD_RGMII_TX_DELAY(delay) |
1621                         QCA8K_PORT_PAD_RGMII_TX_DELAY_EN;
1622         }
1623
1624         if (priv->ports_config.rgmii_rx_delay[cpu_port_index]) {
1625                 delay = priv->ports_config.rgmii_rx_delay[cpu_port_index];
1626
1627                 val |= QCA8K_PORT_PAD_RGMII_RX_DELAY(delay) |
1628                         QCA8K_PORT_PAD_RGMII_RX_DELAY_EN;
1629         }
1630
1631         /* Set RGMII delay based on the selected values */
1632         ret = qca8k_rmw(priv, reg,
1633                         QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK |
1634                         QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK |
1635                         QCA8K_PORT_PAD_RGMII_TX_DELAY_EN |
1636                         QCA8K_PORT_PAD_RGMII_RX_DELAY_EN,
1637                         val);
1638         if (ret)
1639                 dev_err(priv->dev, "Failed to set internal delay for CPU port%d",
1640                         cpu_port_index == QCA8K_CPU_PORT0 ? 0 : 6);
1641 }
1642
1643 static struct phylink_pcs *
1644 qca8k_phylink_mac_select_pcs(struct dsa_switch *ds, int port,
1645                              phy_interface_t interface)
1646 {
1647         struct qca8k_priv *priv = ds->priv;
1648         struct phylink_pcs *pcs = NULL;
1649
1650         switch (interface) {
1651         case PHY_INTERFACE_MODE_SGMII:
1652         case PHY_INTERFACE_MODE_1000BASEX:
1653                 switch (port) {
1654                 case 0:
1655                         pcs = &priv->pcs_port_0.pcs;
1656                         break;
1657
1658                 case 6:
1659                         pcs = &priv->pcs_port_6.pcs;
1660                         break;
1661                 }
1662                 break;
1663
1664         default:
1665                 break;
1666         }
1667
1668         return pcs;
1669 }
1670
1671 static void
1672 qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
1673                          const struct phylink_link_state *state)
1674 {
1675         struct qca8k_priv *priv = ds->priv;
1676         int cpu_port_index;
1677         u32 reg;
1678
1679         switch (port) {
1680         case 0: /* 1st CPU port */
1681                 if (state->interface != PHY_INTERFACE_MODE_RGMII &&
1682                     state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
1683                     state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
1684                     state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
1685                     state->interface != PHY_INTERFACE_MODE_SGMII)
1686                         return;
1687
1688                 reg = QCA8K_REG_PORT0_PAD_CTRL;
1689                 cpu_port_index = QCA8K_CPU_PORT0;
1690                 break;
1691         case 1:
1692         case 2:
1693         case 3:
1694         case 4:
1695         case 5:
1696                 /* Internal PHY, nothing to do */
1697                 return;
1698         case 6: /* 2nd CPU port / external PHY */
1699                 if (state->interface != PHY_INTERFACE_MODE_RGMII &&
1700                     state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
1701                     state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
1702                     state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
1703                     state->interface != PHY_INTERFACE_MODE_SGMII &&
1704                     state->interface != PHY_INTERFACE_MODE_1000BASEX)
1705                         return;
1706
1707                 reg = QCA8K_REG_PORT6_PAD_CTRL;
1708                 cpu_port_index = QCA8K_CPU_PORT6;
1709                 break;
1710         default:
1711                 dev_err(ds->dev, "%s: unsupported port: %i\n", __func__, port);
1712                 return;
1713         }
1714
1715         if (port != 6 && phylink_autoneg_inband(mode)) {
1716                 dev_err(ds->dev, "%s: in-band negotiation unsupported\n",
1717                         __func__);
1718                 return;
1719         }
1720
1721         switch (state->interface) {
1722         case PHY_INTERFACE_MODE_RGMII:
1723         case PHY_INTERFACE_MODE_RGMII_ID:
1724         case PHY_INTERFACE_MODE_RGMII_TXID:
1725         case PHY_INTERFACE_MODE_RGMII_RXID:
1726                 qca8k_write(priv, reg, QCA8K_PORT_PAD_RGMII_EN);
1727
1728                 /* Configure rgmii delay */
1729                 qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
1730
1731                 /* QCA8337 requires to set rgmii rx delay for all ports.
1732                  * This is enabled through PORT5_PAD_CTRL for all ports,
1733                  * rather than individual port registers.
1734                  */
1735                 if (priv->switch_id == QCA8K_ID_QCA8337)
1736                         qca8k_write(priv, QCA8K_REG_PORT5_PAD_CTRL,
1737                                     QCA8K_PORT_PAD_RGMII_RX_DELAY_EN);
1738                 break;
1739         case PHY_INTERFACE_MODE_SGMII:
1740         case PHY_INTERFACE_MODE_1000BASEX:
1741                 /* Enable SGMII on the port */
1742                 qca8k_write(priv, reg, QCA8K_PORT_PAD_SGMII_EN);
1743                 break;
1744         default:
1745                 dev_err(ds->dev, "xMII mode %s not supported for port %d\n",
1746                         phy_modes(state->interface), port);
1747                 return;
1748         }
1749 }
1750
1751 static void qca8k_phylink_get_caps(struct dsa_switch *ds, int port,
1752                                    struct phylink_config *config)
1753 {
1754         switch (port) {
1755         case 0: /* 1st CPU port */
1756                 phy_interface_set_rgmii(config->supported_interfaces);
1757                 __set_bit(PHY_INTERFACE_MODE_SGMII,
1758                           config->supported_interfaces);
1759                 break;
1760
1761         case 1:
1762         case 2:
1763         case 3:
1764         case 4:
1765         case 5:
1766                 /* Internal PHY */
1767                 __set_bit(PHY_INTERFACE_MODE_GMII,
1768                           config->supported_interfaces);
1769                 __set_bit(PHY_INTERFACE_MODE_INTERNAL,
1770                           config->supported_interfaces);
1771                 break;
1772
1773         case 6: /* 2nd CPU port / external PHY */
1774                 phy_interface_set_rgmii(config->supported_interfaces);
1775                 __set_bit(PHY_INTERFACE_MODE_SGMII,
1776                           config->supported_interfaces);
1777                 __set_bit(PHY_INTERFACE_MODE_1000BASEX,
1778                           config->supported_interfaces);
1779                 break;
1780         }
1781
1782         config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
1783                 MAC_10 | MAC_100 | MAC_1000FD;
1784
1785         config->legacy_pre_march2020 = false;
1786 }
1787
1788 static void
1789 qca8k_phylink_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
1790                             phy_interface_t interface)
1791 {
1792         struct qca8k_priv *priv = ds->priv;
1793
1794         qca8k_port_set_status(priv, port, 0);
1795 }
1796
1797 static void
1798 qca8k_phylink_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode,
1799                           phy_interface_t interface, struct phy_device *phydev,
1800                           int speed, int duplex, bool tx_pause, bool rx_pause)
1801 {
1802         struct qca8k_priv *priv = ds->priv;
1803         u32 reg;
1804
1805         if (phylink_autoneg_inband(mode)) {
1806                 reg = QCA8K_PORT_STATUS_LINK_AUTO;
1807         } else {
1808                 switch (speed) {
1809                 case SPEED_10:
1810                         reg = QCA8K_PORT_STATUS_SPEED_10;
1811                         break;
1812                 case SPEED_100:
1813                         reg = QCA8K_PORT_STATUS_SPEED_100;
1814                         break;
1815                 case SPEED_1000:
1816                         reg = QCA8K_PORT_STATUS_SPEED_1000;
1817                         break;
1818                 default:
1819                         reg = QCA8K_PORT_STATUS_LINK_AUTO;
1820                         break;
1821                 }
1822
1823                 if (duplex == DUPLEX_FULL)
1824                         reg |= QCA8K_PORT_STATUS_DUPLEX;
1825
1826                 if (rx_pause || dsa_is_cpu_port(ds, port))
1827                         reg |= QCA8K_PORT_STATUS_RXFLOW;
1828
1829                 if (tx_pause || dsa_is_cpu_port(ds, port))
1830                         reg |= QCA8K_PORT_STATUS_TXFLOW;
1831         }
1832
1833         reg |= QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC;
1834
1835         qca8k_write(priv, QCA8K_REG_PORT_STATUS(port), reg);
1836 }
1837
1838 static struct qca8k_pcs *pcs_to_qca8k_pcs(struct phylink_pcs *pcs)
1839 {
1840         return container_of(pcs, struct qca8k_pcs, pcs);
1841 }
1842
1843 static void qca8k_pcs_get_state(struct phylink_pcs *pcs,
1844                                 struct phylink_link_state *state)
1845 {
1846         struct qca8k_priv *priv = pcs_to_qca8k_pcs(pcs)->priv;
1847         int port = pcs_to_qca8k_pcs(pcs)->port;
1848         u32 reg;
1849         int ret;
1850
1851         ret = qca8k_read(priv, QCA8K_REG_PORT_STATUS(port), &reg);
1852         if (ret < 0) {
1853                 state->link = false;
1854                 return;
1855         }
1856
1857         state->link = !!(reg & QCA8K_PORT_STATUS_LINK_UP);
1858         state->an_complete = state->link;
1859         state->an_enabled = !!(reg & QCA8K_PORT_STATUS_LINK_AUTO);
1860         state->duplex = (reg & QCA8K_PORT_STATUS_DUPLEX) ? DUPLEX_FULL :
1861                                                            DUPLEX_HALF;
1862
1863         switch (reg & QCA8K_PORT_STATUS_SPEED) {
1864         case QCA8K_PORT_STATUS_SPEED_10:
1865                 state->speed = SPEED_10;
1866                 break;
1867         case QCA8K_PORT_STATUS_SPEED_100:
1868                 state->speed = SPEED_100;
1869                 break;
1870         case QCA8K_PORT_STATUS_SPEED_1000:
1871                 state->speed = SPEED_1000;
1872                 break;
1873         default:
1874                 state->speed = SPEED_UNKNOWN;
1875                 break;
1876         }
1877
1878         if (reg & QCA8K_PORT_STATUS_RXFLOW)
1879                 state->pause |= MLO_PAUSE_RX;
1880         if (reg & QCA8K_PORT_STATUS_TXFLOW)
1881                 state->pause |= MLO_PAUSE_TX;
1882 }
1883
1884 static int qca8k_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
1885                             phy_interface_t interface,
1886                             const unsigned long *advertising,
1887                             bool permit_pause_to_mac)
1888 {
1889         struct qca8k_priv *priv = pcs_to_qca8k_pcs(pcs)->priv;
1890         int cpu_port_index, ret, port;
1891         u32 reg, val;
1892
1893         port = pcs_to_qca8k_pcs(pcs)->port;
1894         switch (port) {
1895         case 0:
1896                 reg = QCA8K_REG_PORT0_PAD_CTRL;
1897                 cpu_port_index = QCA8K_CPU_PORT0;
1898                 break;
1899
1900         case 6:
1901                 reg = QCA8K_REG_PORT6_PAD_CTRL;
1902                 cpu_port_index = QCA8K_CPU_PORT6;
1903                 break;
1904
1905         default:
1906                 WARN_ON(1);
1907                 return -EINVAL;
1908         }
1909
1910         /* Enable/disable SerDes auto-negotiation as necessary */
1911         ret = qca8k_read(priv, QCA8K_REG_PWS, &val);
1912         if (ret)
1913                 return ret;
1914         if (phylink_autoneg_inband(mode))
1915                 val &= ~QCA8K_PWS_SERDES_AEN_DIS;
1916         else
1917                 val |= QCA8K_PWS_SERDES_AEN_DIS;
1918         qca8k_write(priv, QCA8K_REG_PWS, val);
1919
1920         /* Configure the SGMII parameters */
1921         ret = qca8k_read(priv, QCA8K_REG_SGMII_CTRL, &val);
1922         if (ret)
1923                 return ret;
1924
1925         val |= QCA8K_SGMII_EN_SD;
1926
1927         if (priv->ports_config.sgmii_enable_pll)
1928                 val |= QCA8K_SGMII_EN_PLL | QCA8K_SGMII_EN_RX |
1929                        QCA8K_SGMII_EN_TX;
1930
1931         if (dsa_is_cpu_port(priv->ds, port)) {
1932                 /* CPU port, we're talking to the CPU MAC, be a PHY */
1933                 val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
1934                 val |= QCA8K_SGMII_MODE_CTRL_PHY;
1935         } else if (interface == PHY_INTERFACE_MODE_SGMII) {
1936                 val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
1937                 val |= QCA8K_SGMII_MODE_CTRL_MAC;
1938         } else if (interface == PHY_INTERFACE_MODE_1000BASEX) {
1939                 val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
1940                 val |= QCA8K_SGMII_MODE_CTRL_BASEX;
1941         }
1942
1943         qca8k_write(priv, QCA8K_REG_SGMII_CTRL, val);
1944
1945         /* From original code is reported port instability as SGMII also
1946          * require delay set. Apply advised values here or take them from DT.
1947          */
1948         if (interface == PHY_INTERFACE_MODE_SGMII)
1949                 qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
1950         /* For qca8327/qca8328/qca8334/qca8338 sgmii is unique and
1951          * falling edge is set writing in the PORT0 PAD reg
1952          */
1953         if (priv->switch_id == QCA8K_ID_QCA8327 ||
1954             priv->switch_id == QCA8K_ID_QCA8337)
1955                 reg = QCA8K_REG_PORT0_PAD_CTRL;
1956
1957         val = 0;
1958
1959         /* SGMII Clock phase configuration */
1960         if (priv->ports_config.sgmii_rx_clk_falling_edge)
1961                 val |= QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE;
1962
1963         if (priv->ports_config.sgmii_tx_clk_falling_edge)
1964                 val |= QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE;
1965
1966         if (val)
1967                 ret = qca8k_rmw(priv, reg,
1968                                 QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE |
1969                                 QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE,
1970                                 val);
1971
1972         return 0;
1973 }
1974
1975 static void qca8k_pcs_an_restart(struct phylink_pcs *pcs)
1976 {
1977 }
1978
1979 static const struct phylink_pcs_ops qca8k_pcs_ops = {
1980         .pcs_get_state = qca8k_pcs_get_state,
1981         .pcs_config = qca8k_pcs_config,
1982         .pcs_an_restart = qca8k_pcs_an_restart,
1983 };
1984
1985 static void qca8k_setup_pcs(struct qca8k_priv *priv, struct qca8k_pcs *qpcs,
1986                             int port)
1987 {
1988         qpcs->pcs.ops = &qca8k_pcs_ops;
1989
1990         /* We don't have interrupts for link changes, so we need to poll */
1991         qpcs->pcs.poll = true;
1992         qpcs->priv = priv;
1993         qpcs->port = port;
1994 }
1995
1996 static void
1997 qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data)
1998 {
1999         const struct qca8k_match_data *match_data;
2000         struct qca8k_priv *priv = ds->priv;
2001         int i;
2002
2003         if (stringset != ETH_SS_STATS)
2004                 return;
2005
2006         match_data = of_device_get_match_data(priv->dev);
2007
2008         for (i = 0; i < match_data->mib_count; i++)
2009                 strncpy(data + i * ETH_GSTRING_LEN, ar8327_mib[i].name,
2010                         ETH_GSTRING_LEN);
2011 }
2012
2013 static void qca8k_mib_autocast_handler(struct dsa_switch *ds, struct sk_buff *skb)
2014 {
2015         const struct qca8k_match_data *match_data;
2016         struct qca8k_mib_eth_data *mib_eth_data;
2017         struct qca8k_priv *priv = ds->priv;
2018         const struct qca8k_mib_desc *mib;
2019         struct mib_ethhdr *mib_ethhdr;
2020         int i, mib_len, offset = 0;
2021         u64 *data;
2022         u8 port;
2023
2024         mib_ethhdr = (struct mib_ethhdr *)skb_mac_header(skb);
2025         mib_eth_data = &priv->mib_eth_data;
2026
2027         /* The switch autocast every port. Ignore other packet and
2028          * parse only the requested one.
2029          */
2030         port = FIELD_GET(QCA_HDR_RECV_SOURCE_PORT, ntohs(mib_ethhdr->hdr));
2031         if (port != mib_eth_data->req_port)
2032                 goto exit;
2033
2034         match_data = device_get_match_data(priv->dev);
2035         data = mib_eth_data->data;
2036
2037         for (i = 0; i < match_data->mib_count; i++) {
2038                 mib = &ar8327_mib[i];
2039
2040                 /* First 3 mib are present in the skb head */
2041                 if (i < 3) {
2042                         data[i] = mib_ethhdr->data[i];
2043                         continue;
2044                 }
2045
2046                 mib_len = sizeof(uint32_t);
2047
2048                 /* Some mib are 64 bit wide */
2049                 if (mib->size == 2)
2050                         mib_len = sizeof(uint64_t);
2051
2052                 /* Copy the mib value from packet to the */
2053                 memcpy(data + i, skb->data + offset, mib_len);
2054
2055                 /* Set the offset for the next mib */
2056                 offset += mib_len;
2057         }
2058
2059 exit:
2060         /* Complete on receiving all the mib packet */
2061         if (refcount_dec_and_test(&mib_eth_data->port_parsed))
2062                 complete(&mib_eth_data->rw_done);
2063 }
2064
2065 static int
2066 qca8k_get_ethtool_stats_eth(struct dsa_switch *ds, int port, u64 *data)
2067 {
2068         struct dsa_port *dp = dsa_to_port(ds, port);
2069         struct qca8k_mib_eth_data *mib_eth_data;
2070         struct qca8k_priv *priv = ds->priv;
2071         int ret;
2072
2073         mib_eth_data = &priv->mib_eth_data;
2074
2075         mutex_lock(&mib_eth_data->mutex);
2076
2077         reinit_completion(&mib_eth_data->rw_done);
2078
2079         mib_eth_data->req_port = dp->index;
2080         mib_eth_data->data = data;
2081         refcount_set(&mib_eth_data->port_parsed, QCA8K_NUM_PORTS);
2082
2083         mutex_lock(&priv->reg_mutex);
2084
2085         /* Send mib autocast request */
2086         ret = regmap_update_bits(priv->regmap, QCA8K_REG_MIB,
2087                                  QCA8K_MIB_FUNC | QCA8K_MIB_BUSY,
2088                                  FIELD_PREP(QCA8K_MIB_FUNC, QCA8K_MIB_CAST) |
2089                                  QCA8K_MIB_BUSY);
2090
2091         mutex_unlock(&priv->reg_mutex);
2092
2093         if (ret)
2094                 goto exit;
2095
2096         ret = wait_for_completion_timeout(&mib_eth_data->rw_done, QCA8K_ETHERNET_TIMEOUT);
2097
2098 exit:
2099         mutex_unlock(&mib_eth_data->mutex);
2100
2101         return ret;
2102 }
2103
2104 static void
2105 qca8k_get_ethtool_stats(struct dsa_switch *ds, int port,
2106                         uint64_t *data)
2107 {
2108         struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
2109         const struct qca8k_match_data *match_data;
2110         const struct qca8k_mib_desc *mib;
2111         u32 reg, i, val;
2112         u32 hi = 0;
2113         int ret;
2114
2115         if (priv->mgmt_master &&
2116             qca8k_get_ethtool_stats_eth(ds, port, data) > 0)
2117                 return;
2118
2119         match_data = of_device_get_match_data(priv->dev);
2120
2121         for (i = 0; i < match_data->mib_count; i++) {
2122                 mib = &ar8327_mib[i];
2123                 reg = QCA8K_PORT_MIB_COUNTER(port) + mib->offset;
2124
2125                 ret = qca8k_read(priv, reg, &val);
2126                 if (ret < 0)
2127                         continue;
2128
2129                 if (mib->size == 2) {
2130                         ret = qca8k_read(priv, reg + 4, &hi);
2131                         if (ret < 0)
2132                                 continue;
2133                 }
2134
2135                 data[i] = val;
2136                 if (mib->size == 2)
2137                         data[i] |= (u64)hi << 32;
2138         }
2139 }
2140
2141 static int
2142 qca8k_get_sset_count(struct dsa_switch *ds, int port, int sset)
2143 {
2144         const struct qca8k_match_data *match_data;
2145         struct qca8k_priv *priv = ds->priv;
2146
2147         if (sset != ETH_SS_STATS)
2148                 return 0;
2149
2150         match_data = of_device_get_match_data(priv->dev);
2151
2152         return match_data->mib_count;
2153 }
2154
2155 static int
2156 qca8k_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *eee)
2157 {
2158         struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
2159         u32 lpi_en = QCA8K_REG_EEE_CTRL_LPI_EN(port);
2160         u32 reg;
2161         int ret;
2162
2163         mutex_lock(&priv->reg_mutex);
2164         ret = qca8k_read(priv, QCA8K_REG_EEE_CTRL, &reg);
2165         if (ret < 0)
2166                 goto exit;
2167
2168         if (eee->eee_enabled)
2169                 reg |= lpi_en;
2170         else
2171                 reg &= ~lpi_en;
2172         ret = qca8k_write(priv, QCA8K_REG_EEE_CTRL, reg);
2173
2174 exit:
2175         mutex_unlock(&priv->reg_mutex);
2176         return ret;
2177 }
2178
2179 static int
2180 qca8k_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
2181 {
2182         /* Nothing to do on the port's MAC */
2183         return 0;
2184 }
2185
2186 static void
2187 qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
2188 {
2189         struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
2190         u32 stp_state;
2191
2192         switch (state) {
2193         case BR_STATE_DISABLED:
2194                 stp_state = QCA8K_PORT_LOOKUP_STATE_DISABLED;
2195                 break;
2196         case BR_STATE_BLOCKING:
2197                 stp_state = QCA8K_PORT_LOOKUP_STATE_BLOCKING;
2198                 break;
2199         case BR_STATE_LISTENING:
2200                 stp_state = QCA8K_PORT_LOOKUP_STATE_LISTENING;
2201                 break;
2202         case BR_STATE_LEARNING:
2203                 stp_state = QCA8K_PORT_LOOKUP_STATE_LEARNING;
2204                 break;
2205         case BR_STATE_FORWARDING:
2206         default:
2207                 stp_state = QCA8K_PORT_LOOKUP_STATE_FORWARD;
2208                 break;
2209         }
2210
2211         qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
2212                   QCA8K_PORT_LOOKUP_STATE_MASK, stp_state);
2213 }
2214
2215 static int qca8k_port_bridge_join(struct dsa_switch *ds, int port,
2216                                   struct dsa_bridge bridge,
2217                                   bool *tx_fwd_offload,
2218                                   struct netlink_ext_ack *extack)
2219 {
2220         struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
2221         int port_mask, cpu_port;
2222         int i, ret;
2223
2224         cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
2225         port_mask = BIT(cpu_port);
2226
2227         for (i = 0; i < QCA8K_NUM_PORTS; i++) {
2228                 if (dsa_is_cpu_port(ds, i))
2229                         continue;
2230                 if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
2231                         continue;
2232                 /* Add this port to the portvlan mask of the other ports
2233                  * in the bridge
2234                  */
2235                 ret = regmap_set_bits(priv->regmap,
2236                                       QCA8K_PORT_LOOKUP_CTRL(i),
2237                                       BIT(port));
2238                 if (ret)
2239                         return ret;
2240                 if (i != port)
2241                         port_mask |= BIT(i);
2242         }
2243
2244         /* Add all other ports to this ports portvlan mask */
2245         ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
2246                         QCA8K_PORT_LOOKUP_MEMBER, port_mask);
2247
2248         return ret;
2249 }
2250
2251 static void qca8k_port_bridge_leave(struct dsa_switch *ds, int port,
2252                                     struct dsa_bridge bridge)
2253 {
2254         struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
2255         int cpu_port, i;
2256
2257         cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
2258
2259         for (i = 0; i < QCA8K_NUM_PORTS; i++) {
2260                 if (dsa_is_cpu_port(ds, i))
2261                         continue;
2262                 if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
2263                         continue;
2264                 /* Remove this port to the portvlan mask of the other ports
2265                  * in the bridge
2266                  */
2267                 regmap_clear_bits(priv->regmap,
2268                                   QCA8K_PORT_LOOKUP_CTRL(i),
2269                                   BIT(port));
2270         }
2271
2272         /* Set the cpu port to be the only one in the portvlan mask of
2273          * this port
2274          */
2275         qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
2276                   QCA8K_PORT_LOOKUP_MEMBER, BIT(cpu_port));
2277 }
2278
2279 static void
2280 qca8k_port_fast_age(struct dsa_switch *ds, int port)
2281 {
2282         struct qca8k_priv *priv = ds->priv;
2283
2284         mutex_lock(&priv->reg_mutex);
2285         qca8k_fdb_access(priv, QCA8K_FDB_FLUSH_PORT, port);
2286         mutex_unlock(&priv->reg_mutex);
2287 }
2288
2289 static int
2290 qca8k_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
2291 {
2292         struct qca8k_priv *priv = ds->priv;
2293         unsigned int secs = msecs / 1000;
2294         u32 val;
2295
2296         /* AGE_TIME reg is set in 7s step */
2297         val = secs / 7;
2298
2299         /* Handle case with 0 as val to NOT disable
2300          * learning
2301          */
2302         if (!val)
2303                 val = 1;
2304
2305         return regmap_update_bits(priv->regmap, QCA8K_REG_ATU_CTRL, QCA8K_ATU_AGE_TIME_MASK,
2306                                   QCA8K_ATU_AGE_TIME(val));
2307 }
2308
2309 static int
2310 qca8k_port_enable(struct dsa_switch *ds, int port,
2311                   struct phy_device *phy)
2312 {
2313         struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
2314
2315         qca8k_port_set_status(priv, port, 1);
2316         priv->port_enabled_map |= BIT(port);
2317
2318         if (dsa_is_user_port(ds, port))
2319                 phy_support_asym_pause(phy);
2320
2321         return 0;
2322 }
2323
2324 static void
2325 qca8k_port_disable(struct dsa_switch *ds, int port)
2326 {
2327         struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
2328
2329         qca8k_port_set_status(priv, port, 0);
2330         priv->port_enabled_map &= ~BIT(port);
2331 }
2332
2333 static int
2334 qca8k_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
2335 {
2336         struct qca8k_priv *priv = ds->priv;
2337         int ret;
2338
2339         /* We have only have a general MTU setting.
2340          * DSA always set the CPU port's MTU to the largest MTU of the slave
2341          * ports.
2342          * Setting MTU just for the CPU port is sufficient to correctly set a
2343          * value for every port.
2344          */
2345         if (!dsa_is_cpu_port(ds, port))
2346                 return 0;
2347
2348         /* To change the MAX_FRAME_SIZE the cpu ports must be off or
2349          * the switch panics.
2350          * Turn off both cpu ports before applying the new value to prevent
2351          * this.
2352          */
2353         if (priv->port_enabled_map & BIT(0))
2354                 qca8k_port_set_status(priv, 0, 0);
2355
2356         if (priv->port_enabled_map & BIT(6))
2357                 qca8k_port_set_status(priv, 6, 0);
2358
2359         /* Include L2 header / FCS length */
2360         ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, new_mtu + ETH_HLEN + ETH_FCS_LEN);
2361
2362         if (priv->port_enabled_map & BIT(0))
2363                 qca8k_port_set_status(priv, 0, 1);
2364
2365         if (priv->port_enabled_map & BIT(6))
2366                 qca8k_port_set_status(priv, 6, 1);
2367
2368         return ret;
2369 }
2370
2371 static int
2372 qca8k_port_max_mtu(struct dsa_switch *ds, int port)
2373 {
2374         return QCA8K_MAX_MTU;
2375 }
2376
2377 static int
2378 qca8k_port_fdb_insert(struct qca8k_priv *priv, const u8 *addr,
2379                       u16 port_mask, u16 vid)
2380 {
2381         /* Set the vid to the port vlan id if no vid is set */
2382         if (!vid)
2383                 vid = QCA8K_PORT_VID_DEF;
2384
2385         return qca8k_fdb_add(priv, addr, port_mask, vid,
2386                              QCA8K_ATU_STATUS_STATIC);
2387 }
2388
2389 static int
2390 qca8k_port_fdb_add(struct dsa_switch *ds, int port,
2391                    const unsigned char *addr, u16 vid,
2392                    struct dsa_db db)
2393 {
2394         struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
2395         u16 port_mask = BIT(port);
2396
2397         return qca8k_port_fdb_insert(priv, addr, port_mask, vid);
2398 }
2399
2400 static int
2401 qca8k_port_fdb_del(struct dsa_switch *ds, int port,
2402                    const unsigned char *addr, u16 vid,
2403                    struct dsa_db db)
2404 {
2405         struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
2406         u16 port_mask = BIT(port);
2407
2408         if (!vid)
2409                 vid = QCA8K_PORT_VID_DEF;
2410
2411         return qca8k_fdb_del(priv, addr, port_mask, vid);
2412 }
2413
2414 static int
2415 qca8k_port_fdb_dump(struct dsa_switch *ds, int port,
2416                     dsa_fdb_dump_cb_t *cb, void *data)
2417 {
2418         struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
2419         struct qca8k_fdb _fdb = { 0 };
2420         int cnt = QCA8K_NUM_FDB_RECORDS;
2421         bool is_static;
2422         int ret = 0;
2423
2424         mutex_lock(&priv->reg_mutex);
2425         while (cnt-- && !qca8k_fdb_next(priv, &_fdb, port)) {
2426                 if (!_fdb.aging)
2427                         break;
2428                 is_static = (_fdb.aging == QCA8K_ATU_STATUS_STATIC);
2429                 ret = cb(_fdb.mac, _fdb.vid, is_static, data);
2430                 if (ret)
2431                         break;
2432         }
2433         mutex_unlock(&priv->reg_mutex);
2434
2435         return 0;
2436 }
2437
2438 static int
2439 qca8k_port_mdb_add(struct dsa_switch *ds, int port,
2440                    const struct switchdev_obj_port_mdb *mdb,
2441                    struct dsa_db db)
2442 {
2443         struct qca8k_priv *priv = ds->priv;
2444         const u8 *addr = mdb->addr;
2445         u16 vid = mdb->vid;
2446
2447         return qca8k_fdb_search_and_insert(priv, BIT(port), addr, vid);
2448 }
2449
2450 static int
2451 qca8k_port_mdb_del(struct dsa_switch *ds, int port,
2452                    const struct switchdev_obj_port_mdb *mdb,
2453                    struct dsa_db db)
2454 {
2455         struct qca8k_priv *priv = ds->priv;
2456         const u8 *addr = mdb->addr;
2457         u16 vid = mdb->vid;
2458
2459         return qca8k_fdb_search_and_del(priv, BIT(port), addr, vid);
2460 }
2461
2462 static int
2463 qca8k_port_mirror_add(struct dsa_switch *ds, int port,
2464                       struct dsa_mall_mirror_tc_entry *mirror,
2465                       bool ingress, struct netlink_ext_ack *extack)
2466 {
2467         struct qca8k_priv *priv = ds->priv;
2468         int monitor_port, ret;
2469         u32 reg, val;
2470
2471         /* Check for existent entry */
2472         if ((ingress ? priv->mirror_rx : priv->mirror_tx) & BIT(port))
2473                 return -EEXIST;
2474
2475         ret = regmap_read(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0, &val);
2476         if (ret)
2477                 return ret;
2478
2479         /* QCA83xx can have only one port set to mirror mode.
2480          * Check that the correct port is requested and return error otherwise.
2481          * When no mirror port is set, the values is set to 0xF
2482          */
2483         monitor_port = FIELD_GET(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
2484         if (monitor_port != 0xF && monitor_port != mirror->to_local_port)
2485                 return -EEXIST;
2486
2487         /* Set the monitor port */
2488         val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM,
2489                          mirror->to_local_port);
2490         ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
2491                                  QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
2492         if (ret)
2493                 return ret;
2494
2495         if (ingress) {
2496                 reg = QCA8K_PORT_LOOKUP_CTRL(port);
2497                 val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN;
2498         } else {
2499                 reg = QCA8K_REG_PORT_HOL_CTRL1(port);
2500                 val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN;
2501         }
2502
2503         ret = regmap_update_bits(priv->regmap, reg, val, val);
2504         if (ret)
2505                 return ret;
2506
2507         /* Track mirror port for tx and rx to decide when the
2508          * mirror port has to be disabled.
2509          */
2510         if (ingress)
2511                 priv->mirror_rx |= BIT(port);
2512         else
2513                 priv->mirror_tx |= BIT(port);
2514
2515         return 0;
2516 }
2517
2518 static void
2519 qca8k_port_mirror_del(struct dsa_switch *ds, int port,
2520                       struct dsa_mall_mirror_tc_entry *mirror)
2521 {
2522         struct qca8k_priv *priv = ds->priv;
2523         u32 reg, val;
2524         int ret;
2525
2526         if (mirror->ingress) {
2527                 reg = QCA8K_PORT_LOOKUP_CTRL(port);
2528                 val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN;
2529         } else {
2530                 reg = QCA8K_REG_PORT_HOL_CTRL1(port);
2531                 val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN;
2532         }
2533
2534         ret = regmap_clear_bits(priv->regmap, reg, val);
2535         if (ret)
2536                 goto err;
2537
2538         if (mirror->ingress)
2539                 priv->mirror_rx &= ~BIT(port);
2540         else
2541                 priv->mirror_tx &= ~BIT(port);
2542
2543         /* No port set to send packet to mirror port. Disable mirror port */
2544         if (!priv->mirror_rx && !priv->mirror_tx) {
2545                 val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, 0xF);
2546                 ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
2547                                          QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
2548                 if (ret)
2549                         goto err;
2550         }
2551 err:
2552         dev_err(priv->dev, "Failed to del mirror port from %d", port);
2553 }
2554
2555 static int
2556 qca8k_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
2557                           struct netlink_ext_ack *extack)
2558 {
2559         struct qca8k_priv *priv = ds->priv;
2560         int ret;
2561
2562         if (vlan_filtering) {
2563                 ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
2564                                 QCA8K_PORT_LOOKUP_VLAN_MODE_MASK,
2565                                 QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE);
2566         } else {
2567                 ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
2568                                 QCA8K_PORT_LOOKUP_VLAN_MODE_MASK,
2569                                 QCA8K_PORT_LOOKUP_VLAN_MODE_NONE);
2570         }
2571
2572         return ret;
2573 }
2574
2575 static int
2576 qca8k_port_vlan_add(struct dsa_switch *ds, int port,
2577                     const struct switchdev_obj_port_vlan *vlan,
2578                     struct netlink_ext_ack *extack)
2579 {
2580         bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
2581         bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
2582         struct qca8k_priv *priv = ds->priv;
2583         int ret;
2584
2585         ret = qca8k_vlan_add(priv, port, vlan->vid, untagged);
2586         if (ret) {
2587                 dev_err(priv->dev, "Failed to add VLAN to port %d (%d)", port, ret);
2588                 return ret;
2589         }
2590
2591         if (pvid) {
2592                 ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(port),
2593                                 QCA8K_EGREES_VLAN_PORT_MASK(port),
2594                                 QCA8K_EGREES_VLAN_PORT(port, vlan->vid));
2595                 if (ret)
2596                         return ret;
2597
2598                 ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(port),
2599                                   QCA8K_PORT_VLAN_CVID(vlan->vid) |
2600                                   QCA8K_PORT_VLAN_SVID(vlan->vid));
2601         }
2602
2603         return ret;
2604 }
2605
2606 static int
2607 qca8k_port_vlan_del(struct dsa_switch *ds, int port,
2608                     const struct switchdev_obj_port_vlan *vlan)
2609 {
2610         struct qca8k_priv *priv = ds->priv;
2611         int ret;
2612
2613         ret = qca8k_vlan_del(priv, port, vlan->vid);
2614         if (ret)
2615                 dev_err(priv->dev, "Failed to delete VLAN from port %d (%d)", port, ret);
2616
2617         return ret;
2618 }
2619
2620 static u32 qca8k_get_phy_flags(struct dsa_switch *ds, int port)
2621 {
2622         struct qca8k_priv *priv = ds->priv;
2623
2624         /* Communicate to the phy internal driver the switch revision.
2625          * Based on the switch revision different values needs to be
2626          * set to the dbg and mmd reg on the phy.
2627          * The first 2 bit are used to communicate the switch revision
2628          * to the phy driver.
2629          */
2630         if (port > 0 && port < 6)
2631                 return priv->switch_revision;
2632
2633         return 0;
2634 }
2635
2636 static enum dsa_tag_protocol
2637 qca8k_get_tag_protocol(struct dsa_switch *ds, int port,
2638                        enum dsa_tag_protocol mp)
2639 {
2640         return DSA_TAG_PROTO_QCA;
2641 }
2642
2643 static bool
2644 qca8k_lag_can_offload(struct dsa_switch *ds, struct dsa_lag lag,
2645                       struct netdev_lag_upper_info *info)
2646 {
2647         struct dsa_port *dp;
2648         int members = 0;
2649
2650         if (!lag.id)
2651                 return false;
2652
2653         dsa_lag_foreach_port(dp, ds->dst, &lag)
2654                 /* Includes the port joining the LAG */
2655                 members++;
2656
2657         if (members > QCA8K_NUM_PORTS_FOR_LAG)
2658                 return false;
2659
2660         if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
2661                 return false;
2662
2663         if (info->hash_type != NETDEV_LAG_HASH_L2 &&
2664             info->hash_type != NETDEV_LAG_HASH_L23)
2665                 return false;
2666
2667         return true;
2668 }
2669
2670 static int
2671 qca8k_lag_setup_hash(struct dsa_switch *ds, struct dsa_lag lag,
2672                      struct netdev_lag_upper_info *info)
2673 {
2674         struct net_device *lag_dev = lag.dev;
2675         struct qca8k_priv *priv = ds->priv;
2676         bool unique_lag = true;
2677         unsigned int i;
2678         u32 hash = 0;
2679
2680         switch (info->hash_type) {
2681         case NETDEV_LAG_HASH_L23:
2682                 hash |= QCA8K_TRUNK_HASH_SIP_EN;
2683                 hash |= QCA8K_TRUNK_HASH_DIP_EN;
2684                 fallthrough;
2685         case NETDEV_LAG_HASH_L2:
2686                 hash |= QCA8K_TRUNK_HASH_SA_EN;
2687                 hash |= QCA8K_TRUNK_HASH_DA_EN;
2688                 break;
2689         default: /* We should NEVER reach this */
2690                 return -EOPNOTSUPP;
2691         }
2692
2693         /* Check if we are the unique configured LAG */
2694         dsa_lags_foreach_id(i, ds->dst)
2695                 if (i != lag.id && dsa_lag_by_id(ds->dst, i)) {
2696                         unique_lag = false;
2697                         break;
2698                 }
2699
2700         /* Hash Mode is global. Make sure the same Hash Mode
2701          * is set to all the 4 possible lag.
2702          * If we are the unique LAG we can set whatever hash
2703          * mode we want.
2704          * To change hash mode it's needed to remove all LAG
2705          * and change the mode with the latest.
2706          */
2707         if (unique_lag) {
2708                 priv->lag_hash_mode = hash;
2709         } else if (priv->lag_hash_mode != hash) {
2710                 netdev_err(lag_dev, "Error: Mismatched Hash Mode across different lag is not supported\n");
2711                 return -EOPNOTSUPP;
2712         }
2713
2714         return regmap_update_bits(priv->regmap, QCA8K_TRUNK_HASH_EN_CTRL,
2715                                   QCA8K_TRUNK_HASH_MASK, hash);
2716 }
2717
2718 static int
2719 qca8k_lag_refresh_portmap(struct dsa_switch *ds, int port,
2720                           struct dsa_lag lag, bool delete)
2721 {
2722         struct qca8k_priv *priv = ds->priv;
2723         int ret, id, i;
2724         u32 val;
2725
2726         /* DSA LAG IDs are one-based, hardware is zero-based */
2727         id = lag.id - 1;
2728
2729         /* Read current port member */
2730         ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0, &val);
2731         if (ret)
2732                 return ret;
2733
2734         /* Shift val to the correct trunk */
2735         val >>= QCA8K_REG_GOL_TRUNK_SHIFT(id);
2736         val &= QCA8K_REG_GOL_TRUNK_MEMBER_MASK;
2737         if (delete)
2738                 val &= ~BIT(port);
2739         else
2740                 val |= BIT(port);
2741
2742         /* Update port member. With empty portmap disable trunk */
2743         ret = regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0,
2744                                  QCA8K_REG_GOL_TRUNK_MEMBER(id) |
2745                                  QCA8K_REG_GOL_TRUNK_EN(id),
2746                                  !val << QCA8K_REG_GOL_TRUNK_SHIFT(id) |
2747                                  val << QCA8K_REG_GOL_TRUNK_SHIFT(id));
2748
2749         /* Search empty member if adding or port on deleting */
2750         for (i = 0; i < QCA8K_NUM_PORTS_FOR_LAG; i++) {
2751                 ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id), &val);
2752                 if (ret)
2753                         return ret;
2754
2755                 val >>= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i);
2756                 val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_MASK;
2757
2758                 if (delete) {
2759                         /* If port flagged to be disabled assume this member is
2760                          * empty
2761                          */
2762                         if (val != QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK)
2763                                 continue;
2764
2765                         val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT_MASK;
2766                         if (val != port)
2767                                 continue;
2768                 } else {
2769                         /* If port flagged to be enabled assume this member is
2770                          * already set
2771                          */
2772                         if (val == QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK)
2773                                 continue;
2774                 }
2775
2776                 /* We have found the member to add/remove */
2777                 break;
2778         }
2779
2780         /* Set port in the correct port mask or disable port if in delete mode */
2781         return regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id),
2782                                   QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN(id, i) |
2783                                   QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT(id, i),
2784                                   !delete << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i) |
2785                                   port << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i));
2786 }
2787
2788 static int
2789 qca8k_port_lag_join(struct dsa_switch *ds, int port, struct dsa_lag lag,
2790                     struct netdev_lag_upper_info *info)
2791 {
2792         int ret;
2793
2794         if (!qca8k_lag_can_offload(ds, lag, info))
2795                 return -EOPNOTSUPP;
2796
2797         ret = qca8k_lag_setup_hash(ds, lag, info);
2798         if (ret)
2799                 return ret;
2800
2801         return qca8k_lag_refresh_portmap(ds, port, lag, false);
2802 }
2803
2804 static int
2805 qca8k_port_lag_leave(struct dsa_switch *ds, int port,
2806                      struct dsa_lag lag)
2807 {
2808         return qca8k_lag_refresh_portmap(ds, port, lag, true);
2809 }
2810
2811 static void
2812 qca8k_master_change(struct dsa_switch *ds, const struct net_device *master,
2813                     bool operational)
2814 {
2815         struct dsa_port *dp = master->dsa_ptr;
2816         struct qca8k_priv *priv = ds->priv;
2817
2818         /* Ethernet MIB/MDIO is only supported for CPU port 0 */
2819         if (dp->index != 0)
2820                 return;
2821
2822         mutex_lock(&priv->mgmt_eth_data.mutex);
2823         mutex_lock(&priv->mib_eth_data.mutex);
2824
2825         priv->mgmt_master = operational ? (struct net_device *)master : NULL;
2826
2827         mutex_unlock(&priv->mib_eth_data.mutex);
2828         mutex_unlock(&priv->mgmt_eth_data.mutex);
2829 }
2830
2831 static int qca8k_connect_tag_protocol(struct dsa_switch *ds,
2832                                       enum dsa_tag_protocol proto)
2833 {
2834         struct qca_tagger_data *tagger_data;
2835
2836         switch (proto) {
2837         case DSA_TAG_PROTO_QCA:
2838                 tagger_data = ds->tagger_data;
2839
2840                 tagger_data->rw_reg_ack_handler = qca8k_rw_reg_ack_handler;
2841                 tagger_data->mib_autocast_handler = qca8k_mib_autocast_handler;
2842
2843                 break;
2844         default:
2845                 return -EOPNOTSUPP;
2846         }
2847
2848         return 0;
2849 }
2850
2851 static int
2852 qca8k_setup(struct dsa_switch *ds)
2853 {
2854         struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
2855         int cpu_port, ret, i;
2856         u32 mask;
2857
2858         cpu_port = qca8k_find_cpu_port(ds);
2859         if (cpu_port < 0) {
2860                 dev_err(priv->dev, "No cpu port configured in both cpu port0 and port6");
2861                 return cpu_port;
2862         }
2863
2864         /* Parse CPU port config to be later used in phy_link mac_config */
2865         ret = qca8k_parse_port_config(priv);
2866         if (ret)
2867                 return ret;
2868
2869         ret = qca8k_setup_mdio_bus(priv);
2870         if (ret)
2871                 return ret;
2872
2873         ret = qca8k_setup_of_pws_reg(priv);
2874         if (ret)
2875                 return ret;
2876
2877         ret = qca8k_setup_mac_pwr_sel(priv);
2878         if (ret)
2879                 return ret;
2880
2881         qca8k_setup_pcs(priv, &priv->pcs_port_0, 0);
2882         qca8k_setup_pcs(priv, &priv->pcs_port_6, 6);
2883
2884         /* Make sure MAC06 is disabled */
2885         ret = regmap_clear_bits(priv->regmap, QCA8K_REG_PORT0_PAD_CTRL,
2886                                 QCA8K_PORT0_PAD_MAC06_EXCHANGE_EN);
2887         if (ret) {
2888                 dev_err(priv->dev, "failed disabling MAC06 exchange");
2889                 return ret;
2890         }
2891
2892         /* Enable CPU Port */
2893         ret = regmap_set_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
2894                               QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN);
2895         if (ret) {
2896                 dev_err(priv->dev, "failed enabling CPU port");
2897                 return ret;
2898         }
2899
2900         /* Enable MIB counters */
2901         ret = qca8k_mib_init(priv);
2902         if (ret)
2903                 dev_warn(priv->dev, "mib init failed");
2904
2905         /* Initial setup of all ports */
2906         for (i = 0; i < QCA8K_NUM_PORTS; i++) {
2907                 /* Disable forwarding by default on all ports */
2908                 ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
2909                                 QCA8K_PORT_LOOKUP_MEMBER, 0);
2910                 if (ret)
2911                         return ret;
2912
2913                 /* Enable QCA header mode on all cpu ports */
2914                 if (dsa_is_cpu_port(ds, i)) {
2915                         ret = qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(i),
2916                                           FIELD_PREP(QCA8K_PORT_HDR_CTRL_TX_MASK, QCA8K_PORT_HDR_CTRL_ALL) |
2917                                           FIELD_PREP(QCA8K_PORT_HDR_CTRL_RX_MASK, QCA8K_PORT_HDR_CTRL_ALL));
2918                         if (ret) {
2919                                 dev_err(priv->dev, "failed enabling QCA header mode");
2920                                 return ret;
2921                         }
2922                 }
2923
2924                 /* Disable MAC by default on all user ports */
2925                 if (dsa_is_user_port(ds, i))
2926                         qca8k_port_set_status(priv, i, 0);
2927         }
2928
2929         /* Forward all unknown frames to CPU port for Linux processing
2930          * Notice that in multi-cpu config only one port should be set
2931          * for igmp, unknown, multicast and broadcast packet
2932          */
2933         ret = qca8k_write(priv, QCA8K_REG_GLOBAL_FW_CTRL1,
2934                           FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_MASK, BIT(cpu_port)) |
2935                           FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_BC_DP_MASK, BIT(cpu_port)) |
2936                           FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_MC_DP_MASK, BIT(cpu_port)) |
2937                           FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_UC_DP_MASK, BIT(cpu_port)));
2938         if (ret)
2939                 return ret;
2940
2941         /* Setup connection between CPU port & user ports
2942          * Configure specific switch configuration for ports
2943          */
2944         for (i = 0; i < QCA8K_NUM_PORTS; i++) {
2945                 /* CPU port gets connected to all user ports of the switch */
2946                 if (dsa_is_cpu_port(ds, i)) {
2947                         ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
2948                                         QCA8K_PORT_LOOKUP_MEMBER, dsa_user_ports(ds));
2949                         if (ret)
2950                                 return ret;
2951                 }
2952
2953                 /* Individual user ports get connected to CPU port only */
2954                 if (dsa_is_user_port(ds, i)) {
2955                         ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
2956                                         QCA8K_PORT_LOOKUP_MEMBER,
2957                                         BIT(cpu_port));
2958                         if (ret)
2959                                 return ret;
2960
2961                         /* Enable ARP Auto-learning by default */
2962                         ret = regmap_set_bits(priv->regmap, QCA8K_PORT_LOOKUP_CTRL(i),
2963                                               QCA8K_PORT_LOOKUP_LEARN);
2964                         if (ret)
2965                                 return ret;
2966
2967                         /* For port based vlans to work we need to set the
2968                          * default egress vid
2969                          */
2970                         ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(i),
2971                                         QCA8K_EGREES_VLAN_PORT_MASK(i),
2972                                         QCA8K_EGREES_VLAN_PORT(i, QCA8K_PORT_VID_DEF));
2973                         if (ret)
2974                                 return ret;
2975
2976                         ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(i),
2977                                           QCA8K_PORT_VLAN_CVID(QCA8K_PORT_VID_DEF) |
2978                                           QCA8K_PORT_VLAN_SVID(QCA8K_PORT_VID_DEF));
2979                         if (ret)
2980                                 return ret;
2981                 }
2982
2983                 /* The port 5 of the qca8337 have some problem in flood condition. The
2984                  * original legacy driver had some specific buffer and priority settings
2985                  * for the different port suggested by the QCA switch team. Add this
2986                  * missing settings to improve switch stability under load condition.
2987                  * This problem is limited to qca8337 and other qca8k switch are not affected.
2988                  */
2989                 if (priv->switch_id == QCA8K_ID_QCA8337) {
2990                         switch (i) {
2991                         /* The 2 CPU port and port 5 requires some different
2992                          * priority than any other ports.
2993                          */
2994                         case 0:
2995                         case 5:
2996                         case 6:
2997                                 mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) |
2998                                         QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) |
2999                                         QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x4) |
3000                                         QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x4) |
3001                                         QCA8K_PORT_HOL_CTRL0_EG_PRI4(0x6) |
3002                                         QCA8K_PORT_HOL_CTRL0_EG_PRI5(0x8) |
3003                                         QCA8K_PORT_HOL_CTRL0_EG_PORT(0x1e);
3004                                 break;
3005                         default:
3006                                 mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) |
3007                                         QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) |
3008                                         QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x6) |
3009                                         QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x8) |
3010                                         QCA8K_PORT_HOL_CTRL0_EG_PORT(0x19);
3011                         }
3012                         qca8k_write(priv, QCA8K_REG_PORT_HOL_CTRL0(i), mask);
3013
3014                         mask = QCA8K_PORT_HOL_CTRL1_ING(0x6) |
3015                         QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN |
3016                         QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
3017                         QCA8K_PORT_HOL_CTRL1_WRED_EN;
3018                         qca8k_rmw(priv, QCA8K_REG_PORT_HOL_CTRL1(i),
3019                                   QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK |
3020                                   QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN |
3021                                   QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
3022                                   QCA8K_PORT_HOL_CTRL1_WRED_EN,
3023                                   mask);
3024                 }
3025         }
3026
3027         /* Special GLOBAL_FC_THRESH value are needed for ar8327 switch */
3028         if (priv->switch_id == QCA8K_ID_QCA8327) {
3029                 mask = QCA8K_GLOBAL_FC_GOL_XON_THRES(288) |
3030                        QCA8K_GLOBAL_FC_GOL_XOFF_THRES(496);
3031                 qca8k_rmw(priv, QCA8K_REG_GLOBAL_FC_THRESH,
3032                           QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK |
3033                           QCA8K_GLOBAL_FC_GOL_XOFF_THRES_MASK,
3034                           mask);
3035         }
3036
3037         /* Setup our port MTUs to match power on defaults */
3038         ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, ETH_FRAME_LEN + ETH_FCS_LEN);
3039         if (ret)
3040                 dev_warn(priv->dev, "failed setting MTU settings");
3041
3042         /* Flush the FDB table */
3043         qca8k_fdb_flush(priv);
3044
3045         /* Set min a max ageing value supported */
3046         ds->ageing_time_min = 7000;
3047         ds->ageing_time_max = 458745000;
3048
3049         /* Set max number of LAGs supported */
3050         ds->num_lag_ids = QCA8K_NUM_LAGS;
3051
3052         return 0;
3053 }
3054
3055 static const struct dsa_switch_ops qca8k_switch_ops = {
3056         .get_tag_protocol       = qca8k_get_tag_protocol,
3057         .setup                  = qca8k_setup,
3058         .get_strings            = qca8k_get_strings,
3059         .get_ethtool_stats      = qca8k_get_ethtool_stats,
3060         .get_sset_count         = qca8k_get_sset_count,
3061         .set_ageing_time        = qca8k_set_ageing_time,
3062         .get_mac_eee            = qca8k_get_mac_eee,
3063         .set_mac_eee            = qca8k_set_mac_eee,
3064         .port_enable            = qca8k_port_enable,
3065         .port_disable           = qca8k_port_disable,
3066         .port_change_mtu        = qca8k_port_change_mtu,
3067         .port_max_mtu           = qca8k_port_max_mtu,
3068         .port_stp_state_set     = qca8k_port_stp_state_set,
3069         .port_bridge_join       = qca8k_port_bridge_join,
3070         .port_bridge_leave      = qca8k_port_bridge_leave,
3071         .port_fast_age          = qca8k_port_fast_age,
3072         .port_fdb_add           = qca8k_port_fdb_add,
3073         .port_fdb_del           = qca8k_port_fdb_del,
3074         .port_fdb_dump          = qca8k_port_fdb_dump,
3075         .port_mdb_add           = qca8k_port_mdb_add,
3076         .port_mdb_del           = qca8k_port_mdb_del,
3077         .port_mirror_add        = qca8k_port_mirror_add,
3078         .port_mirror_del        = qca8k_port_mirror_del,
3079         .port_vlan_filtering    = qca8k_port_vlan_filtering,
3080         .port_vlan_add          = qca8k_port_vlan_add,
3081         .port_vlan_del          = qca8k_port_vlan_del,
3082         .phylink_get_caps       = qca8k_phylink_get_caps,
3083         .phylink_mac_select_pcs = qca8k_phylink_mac_select_pcs,
3084         .phylink_mac_config     = qca8k_phylink_mac_config,
3085         .phylink_mac_link_down  = qca8k_phylink_mac_link_down,
3086         .phylink_mac_link_up    = qca8k_phylink_mac_link_up,
3087         .get_phy_flags          = qca8k_get_phy_flags,
3088         .port_lag_join          = qca8k_port_lag_join,
3089         .port_lag_leave         = qca8k_port_lag_leave,
3090         .master_state_change    = qca8k_master_change,
3091         .connect_tag_protocol   = qca8k_connect_tag_protocol,
3092 };
3093
3094 static int qca8k_read_switch_id(struct qca8k_priv *priv)
3095 {
3096         const struct qca8k_match_data *data;
3097         u32 val;
3098         u8 id;
3099         int ret;
3100
3101         /* get the switches ID from the compatible */
3102         data = of_device_get_match_data(priv->dev);
3103         if (!data)
3104                 return -ENODEV;
3105
3106         ret = qca8k_read(priv, QCA8K_REG_MASK_CTRL, &val);
3107         if (ret < 0)
3108                 return -ENODEV;
3109
3110         id = QCA8K_MASK_CTRL_DEVICE_ID(val);
3111         if (id != data->id) {
3112                 dev_err(priv->dev, "Switch id detected %x but expected %x", id, data->id);
3113                 return -ENODEV;
3114         }
3115
3116         priv->switch_id = id;
3117
3118         /* Save revision to communicate to the internal PHY driver */
3119         priv->switch_revision = QCA8K_MASK_CTRL_REV_ID(val);
3120
3121         return 0;
3122 }
3123
3124 static int
3125 qca8k_sw_probe(struct mdio_device *mdiodev)
3126 {
3127         struct qca8k_priv *priv;
3128         int ret;
3129
3130         /* allocate the private data struct so that we can probe the switches
3131          * ID register
3132          */
3133         priv = devm_kzalloc(&mdiodev->dev, sizeof(*priv), GFP_KERNEL);
3134         if (!priv)
3135                 return -ENOMEM;
3136
3137         priv->bus = mdiodev->bus;
3138         priv->dev = &mdiodev->dev;
3139
3140         priv->reset_gpio = devm_gpiod_get_optional(priv->dev, "reset",
3141                                                    GPIOD_ASIS);
3142         if (IS_ERR(priv->reset_gpio))
3143                 return PTR_ERR(priv->reset_gpio);
3144
3145         if (priv->reset_gpio) {
3146                 gpiod_set_value_cansleep(priv->reset_gpio, 1);
3147                 /* The active low duration must be greater than 10 ms
3148                  * and checkpatch.pl wants 20 ms.
3149                  */
3150                 msleep(20);
3151                 gpiod_set_value_cansleep(priv->reset_gpio, 0);
3152         }
3153
3154         /* Start by setting up the register mapping */
3155         priv->regmap = devm_regmap_init(&mdiodev->dev, NULL, priv,
3156                                         &qca8k_regmap_config);
3157         if (IS_ERR(priv->regmap)) {
3158                 dev_err(priv->dev, "regmap initialization failed");
3159                 return PTR_ERR(priv->regmap);
3160         }
3161
3162         priv->mdio_cache.page = 0xffff;
3163         priv->mdio_cache.lo = 0xffff;
3164         priv->mdio_cache.hi = 0xffff;
3165
3166         /* Check the detected switch id */
3167         ret = qca8k_read_switch_id(priv);
3168         if (ret)
3169                 return ret;
3170
3171         priv->ds = devm_kzalloc(&mdiodev->dev, sizeof(*priv->ds), GFP_KERNEL);
3172         if (!priv->ds)
3173                 return -ENOMEM;
3174
3175         mutex_init(&priv->mgmt_eth_data.mutex);
3176         init_completion(&priv->mgmt_eth_data.rw_done);
3177
3178         mutex_init(&priv->mib_eth_data.mutex);
3179         init_completion(&priv->mib_eth_data.rw_done);
3180
3181         priv->ds->dev = &mdiodev->dev;
3182         priv->ds->num_ports = QCA8K_NUM_PORTS;
3183         priv->ds->priv = priv;
3184         priv->ds->ops = &qca8k_switch_ops;
3185         mutex_init(&priv->reg_mutex);
3186         dev_set_drvdata(&mdiodev->dev, priv);
3187
3188         return dsa_register_switch(priv->ds);
3189 }
3190
3191 static void
3192 qca8k_sw_remove(struct mdio_device *mdiodev)
3193 {
3194         struct qca8k_priv *priv = dev_get_drvdata(&mdiodev->dev);
3195         int i;
3196
3197         if (!priv)
3198                 return;
3199
3200         for (i = 0; i < QCA8K_NUM_PORTS; i++)
3201                 qca8k_port_set_status(priv, i, 0);
3202
3203         dsa_unregister_switch(priv->ds);
3204
3205         dev_set_drvdata(&mdiodev->dev, NULL);
3206 }
3207
3208 static void qca8k_sw_shutdown(struct mdio_device *mdiodev)
3209 {
3210         struct qca8k_priv *priv = dev_get_drvdata(&mdiodev->dev);
3211
3212         if (!priv)
3213                 return;
3214
3215         dsa_switch_shutdown(priv->ds);
3216
3217         dev_set_drvdata(&mdiodev->dev, NULL);
3218 }
3219
3220 #ifdef CONFIG_PM_SLEEP
3221 static void
3222 qca8k_set_pm(struct qca8k_priv *priv, int enable)
3223 {
3224         int port;
3225
3226         for (port = 0; port < QCA8K_NUM_PORTS; port++) {
3227                 /* Do not enable on resume if the port was
3228                  * disabled before.
3229                  */
3230                 if (!(priv->port_enabled_map & BIT(port)))
3231                         continue;
3232
3233                 qca8k_port_set_status(priv, port, enable);
3234         }
3235 }
3236
3237 static int qca8k_suspend(struct device *dev)
3238 {
3239         struct qca8k_priv *priv = dev_get_drvdata(dev);
3240
3241         qca8k_set_pm(priv, 0);
3242
3243         return dsa_switch_suspend(priv->ds);
3244 }
3245
3246 static int qca8k_resume(struct device *dev)
3247 {
3248         struct qca8k_priv *priv = dev_get_drvdata(dev);
3249
3250         qca8k_set_pm(priv, 1);
3251
3252         return dsa_switch_resume(priv->ds);
3253 }
3254 #endif /* CONFIG_PM_SLEEP */
3255
3256 static SIMPLE_DEV_PM_OPS(qca8k_pm_ops,
3257                          qca8k_suspend, qca8k_resume);
3258
3259 static const struct qca8k_match_data qca8327 = {
3260         .id = QCA8K_ID_QCA8327,
3261         .reduced_package = true,
3262         .mib_count = QCA8K_QCA832X_MIB_COUNT,
3263 };
3264
3265 static const struct qca8k_match_data qca8328 = {
3266         .id = QCA8K_ID_QCA8327,
3267         .mib_count = QCA8K_QCA832X_MIB_COUNT,
3268 };
3269
3270 static const struct qca8k_match_data qca833x = {
3271         .id = QCA8K_ID_QCA8337,
3272         .mib_count = QCA8K_QCA833X_MIB_COUNT,
3273 };
3274
3275 static const struct of_device_id qca8k_of_match[] = {
3276         { .compatible = "qca,qca8327", .data = &qca8327 },
3277         { .compatible = "qca,qca8328", .data = &qca8328 },
3278         { .compatible = "qca,qca8334", .data = &qca833x },
3279         { .compatible = "qca,qca8337", .data = &qca833x },
3280         { /* sentinel */ },
3281 };
3282
3283 static struct mdio_driver qca8kmdio_driver = {
3284         .probe  = qca8k_sw_probe,
3285         .remove = qca8k_sw_remove,
3286         .shutdown = qca8k_sw_shutdown,
3287         .mdiodrv.driver = {
3288                 .name = "qca8k",
3289                 .of_match_table = qca8k_of_match,
3290                 .pm = &qca8k_pm_ops,
3291         },
3292 };
3293
3294 mdio_module_driver(qca8kmdio_driver);
3295
3296 MODULE_AUTHOR("Mathieu Olivari, John Crispin <john@phrozen.org>");
3297 MODULE_DESCRIPTION("Driver for QCA8K ethernet switch family");
3298 MODULE_LICENSE("GPL v2");
3299 MODULE_ALIAS("platform:qca8k");