Linux 6.9-rc1
[linux-2.6-microblaze.git] / drivers / net / ethernet / broadcom / bcmsysport.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Broadcom BCM7xxx System Port Ethernet MAC driver
4  *
5  * Copyright (C) 2014 Broadcom Corporation
6  */
7
8 #define pr_fmt(fmt)     KBUILD_MODNAME ": " fmt
9
10 #include <linux/init.h>
11 #include <linux/interrupt.h>
12 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/netdevice.h>
15 #include <linux/dsa/brcm.h>
16 #include <linux/etherdevice.h>
17 #include <linux/platform_device.h>
18 #include <linux/of.h>
19 #include <linux/of_net.h>
20 #include <linux/of_mdio.h>
21 #include <linux/phy.h>
22 #include <linux/phy_fixed.h>
23 #include <net/dsa.h>
24 #include <linux/clk.h>
25 #include <net/ip.h>
26 #include <net/ipv6.h>
27
28 #include "bcmsysport.h"
29
30 /* I/O accessors register helpers */
31 #define BCM_SYSPORT_IO_MACRO(name, offset) \
32 static inline u32 name##_readl(struct bcm_sysport_priv *priv, u32 off)  \
33 {                                                                       \
34         u32 reg = readl_relaxed(priv->base + offset + off);             \
35         return reg;                                                     \
36 }                                                                       \
37 static inline void name##_writel(struct bcm_sysport_priv *priv,         \
38                                   u32 val, u32 off)                     \
39 {                                                                       \
40         writel_relaxed(val, priv->base + offset + off);                 \
41 }                                                                       \
42
43 BCM_SYSPORT_IO_MACRO(intrl2_0, SYS_PORT_INTRL2_0_OFFSET);
44 BCM_SYSPORT_IO_MACRO(intrl2_1, SYS_PORT_INTRL2_1_OFFSET);
45 BCM_SYSPORT_IO_MACRO(umac, SYS_PORT_UMAC_OFFSET);
46 BCM_SYSPORT_IO_MACRO(gib, SYS_PORT_GIB_OFFSET);
47 BCM_SYSPORT_IO_MACRO(tdma, SYS_PORT_TDMA_OFFSET);
48 BCM_SYSPORT_IO_MACRO(rxchk, SYS_PORT_RXCHK_OFFSET);
49 BCM_SYSPORT_IO_MACRO(txchk, SYS_PORT_TXCHK_OFFSET);
50 BCM_SYSPORT_IO_MACRO(rbuf, SYS_PORT_RBUF_OFFSET);
51 BCM_SYSPORT_IO_MACRO(tbuf, SYS_PORT_TBUF_OFFSET);
52 BCM_SYSPORT_IO_MACRO(topctrl, SYS_PORT_TOPCTRL_OFFSET);
53
54 /* On SYSTEMPORT Lite, any register after RDMA_STATUS has the exact
55  * same layout, except it has been moved by 4 bytes up, *sigh*
56  */
57 static inline u32 rdma_readl(struct bcm_sysport_priv *priv, u32 off)
58 {
59         if (priv->is_lite && off >= RDMA_STATUS)
60                 off += 4;
61         return readl_relaxed(priv->base + SYS_PORT_RDMA_OFFSET + off);
62 }
63
64 static inline void rdma_writel(struct bcm_sysport_priv *priv, u32 val, u32 off)
65 {
66         if (priv->is_lite && off >= RDMA_STATUS)
67                 off += 4;
68         writel_relaxed(val, priv->base + SYS_PORT_RDMA_OFFSET + off);
69 }
70
71 static inline u32 tdma_control_bit(struct bcm_sysport_priv *priv, u32 bit)
72 {
73         if (!priv->is_lite) {
74                 return BIT(bit);
75         } else {
76                 if (bit >= ACB_ALGO)
77                         return BIT(bit + 1);
78                 else
79                         return BIT(bit);
80         }
81 }
82
83 /* L2-interrupt masking/unmasking helpers, does automatic saving of the applied
84  * mask in a software copy to avoid CPU_MASK_STATUS reads in hot-paths.
85   */
86 #define BCM_SYSPORT_INTR_L2(which)      \
87 static inline void intrl2_##which##_mask_clear(struct bcm_sysport_priv *priv, \
88                                                 u32 mask)               \
89 {                                                                       \
90         priv->irq##which##_mask &= ~(mask);                             \
91         intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR);     \
92 }                                                                       \
93 static inline void intrl2_##which##_mask_set(struct bcm_sysport_priv *priv, \
94                                                 u32 mask)               \
95 {                                                                       \
96         intrl2_## which##_writel(priv, mask, INTRL2_CPU_MASK_SET);      \
97         priv->irq##which##_mask |= (mask);                              \
98 }                                                                       \
99
100 BCM_SYSPORT_INTR_L2(0)
101 BCM_SYSPORT_INTR_L2(1)
102
103 /* Register accesses to GISB/RBUS registers are expensive (few hundred
104  * nanoseconds), so keep the check for 64-bits explicit here to save
105  * one register write per-packet on 32-bits platforms.
106  */
107 static inline void dma_desc_set_addr(struct bcm_sysport_priv *priv,
108                                      void __iomem *d,
109                                      dma_addr_t addr)
110 {
111 #ifdef CONFIG_PHYS_ADDR_T_64BIT
112         writel_relaxed(upper_32_bits(addr) & DESC_ADDR_HI_MASK,
113                      d + DESC_ADDR_HI_STATUS_LEN);
114 #endif
115         writel_relaxed(lower_32_bits(addr), d + DESC_ADDR_LO);
116 }
117
118 /* Ethtool operations */
119 static void bcm_sysport_set_rx_csum(struct net_device *dev,
120                                     netdev_features_t wanted)
121 {
122         struct bcm_sysport_priv *priv = netdev_priv(dev);
123         u32 reg;
124
125         priv->rx_chk_en = !!(wanted & NETIF_F_RXCSUM);
126         reg = rxchk_readl(priv, RXCHK_CONTROL);
127         /* Clear L2 header checks, which would prevent BPDUs
128          * from being received.
129          */
130         reg &= ~RXCHK_L2_HDR_DIS;
131         if (priv->rx_chk_en)
132                 reg |= RXCHK_EN;
133         else
134                 reg &= ~RXCHK_EN;
135
136         /* If UniMAC forwards CRC, we need to skip over it to get
137          * a valid CHK bit to be set in the per-packet status word
138          */
139         if (priv->rx_chk_en && priv->crc_fwd)
140                 reg |= RXCHK_SKIP_FCS;
141         else
142                 reg &= ~RXCHK_SKIP_FCS;
143
144         /* If Broadcom tags are enabled (e.g: using a switch), make
145          * sure we tell the RXCHK hardware to expect a 4-bytes Broadcom
146          * tag after the Ethernet MAC Source Address.
147          */
148         if (netdev_uses_dsa(dev))
149                 reg |= RXCHK_BRCM_TAG_EN;
150         else
151                 reg &= ~RXCHK_BRCM_TAG_EN;
152
153         rxchk_writel(priv, reg, RXCHK_CONTROL);
154 }
155
156 static void bcm_sysport_set_tx_csum(struct net_device *dev,
157                                     netdev_features_t wanted)
158 {
159         struct bcm_sysport_priv *priv = netdev_priv(dev);
160         u32 reg;
161
162         /* Hardware transmit checksum requires us to enable the Transmit status
163          * block prepended to the packet contents
164          */
165         priv->tsb_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
166                                     NETIF_F_HW_VLAN_CTAG_TX));
167         reg = tdma_readl(priv, TDMA_CONTROL);
168         if (priv->tsb_en)
169                 reg |= tdma_control_bit(priv, TSB_EN);
170         else
171                 reg &= ~tdma_control_bit(priv, TSB_EN);
172         /* Indicating that software inserts Broadcom tags is needed for the TX
173          * checksum to be computed correctly when using VLAN HW acceleration,
174          * else it has no effect, so it can always be turned on.
175          */
176         if (netdev_uses_dsa(dev))
177                 reg |= tdma_control_bit(priv, SW_BRCM_TAG);
178         else
179                 reg &= ~tdma_control_bit(priv, SW_BRCM_TAG);
180         tdma_writel(priv, reg, TDMA_CONTROL);
181
182         /* Default TPID is ETH_P_8021AD, change to ETH_P_8021Q */
183         if (wanted & NETIF_F_HW_VLAN_CTAG_TX)
184                 tdma_writel(priv, ETH_P_8021Q, TDMA_TPID);
185 }
186
187 static int bcm_sysport_set_features(struct net_device *dev,
188                                     netdev_features_t features)
189 {
190         struct bcm_sysport_priv *priv = netdev_priv(dev);
191         int ret;
192
193         ret = clk_prepare_enable(priv->clk);
194         if (ret)
195                 return ret;
196
197         /* Read CRC forward */
198         if (!priv->is_lite)
199                 priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD);
200         else
201                 priv->crc_fwd = !((gib_readl(priv, GIB_CONTROL) &
202                                   GIB_FCS_STRIP) >> GIB_FCS_STRIP_SHIFT);
203
204         bcm_sysport_set_rx_csum(dev, features);
205         bcm_sysport_set_tx_csum(dev, features);
206
207         clk_disable_unprepare(priv->clk);
208
209         return 0;
210 }
211
212 /* Hardware counters must be kept in sync because the order/offset
213  * is important here (order in structure declaration = order in hardware)
214  */
215 static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = {
216         /* general stats */
217         STAT_NETDEV64(rx_packets),
218         STAT_NETDEV64(tx_packets),
219         STAT_NETDEV64(rx_bytes),
220         STAT_NETDEV64(tx_bytes),
221         STAT_NETDEV(rx_errors),
222         STAT_NETDEV(tx_errors),
223         STAT_NETDEV(rx_dropped),
224         STAT_NETDEV(tx_dropped),
225         STAT_NETDEV(multicast),
226         /* UniMAC RSV counters */
227         STAT_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64),
228         STAT_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127),
229         STAT_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255),
230         STAT_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511),
231         STAT_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023),
232         STAT_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518),
233         STAT_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv),
234         STAT_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047),
235         STAT_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095),
236         STAT_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216),
237         STAT_MIB_RX("rx_pkts", mib.rx.pkt),
238         STAT_MIB_RX("rx_bytes", mib.rx.bytes),
239         STAT_MIB_RX("rx_multicast", mib.rx.mca),
240         STAT_MIB_RX("rx_broadcast", mib.rx.bca),
241         STAT_MIB_RX("rx_fcs", mib.rx.fcs),
242         STAT_MIB_RX("rx_control", mib.rx.cf),
243         STAT_MIB_RX("rx_pause", mib.rx.pf),
244         STAT_MIB_RX("rx_unknown", mib.rx.uo),
245         STAT_MIB_RX("rx_align", mib.rx.aln),
246         STAT_MIB_RX("rx_outrange", mib.rx.flr),
247         STAT_MIB_RX("rx_code", mib.rx.cde),
248         STAT_MIB_RX("rx_carrier", mib.rx.fcr),
249         STAT_MIB_RX("rx_oversize", mib.rx.ovr),
250         STAT_MIB_RX("rx_jabber", mib.rx.jbr),
251         STAT_MIB_RX("rx_mtu_err", mib.rx.mtue),
252         STAT_MIB_RX("rx_good_pkts", mib.rx.pok),
253         STAT_MIB_RX("rx_unicast", mib.rx.uc),
254         STAT_MIB_RX("rx_ppp", mib.rx.ppp),
255         STAT_MIB_RX("rx_crc", mib.rx.rcrc),
256         /* UniMAC TSV counters */
257         STAT_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64),
258         STAT_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127),
259         STAT_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255),
260         STAT_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511),
261         STAT_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023),
262         STAT_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518),
263         STAT_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv),
264         STAT_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047),
265         STAT_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095),
266         STAT_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216),
267         STAT_MIB_TX("tx_pkts", mib.tx.pkts),
268         STAT_MIB_TX("tx_multicast", mib.tx.mca),
269         STAT_MIB_TX("tx_broadcast", mib.tx.bca),
270         STAT_MIB_TX("tx_pause", mib.tx.pf),
271         STAT_MIB_TX("tx_control", mib.tx.cf),
272         STAT_MIB_TX("tx_fcs_err", mib.tx.fcs),
273         STAT_MIB_TX("tx_oversize", mib.tx.ovr),
274         STAT_MIB_TX("tx_defer", mib.tx.drf),
275         STAT_MIB_TX("tx_excess_defer", mib.tx.edf),
276         STAT_MIB_TX("tx_single_col", mib.tx.scl),
277         STAT_MIB_TX("tx_multi_col", mib.tx.mcl),
278         STAT_MIB_TX("tx_late_col", mib.tx.lcl),
279         STAT_MIB_TX("tx_excess_col", mib.tx.ecl),
280         STAT_MIB_TX("tx_frags", mib.tx.frg),
281         STAT_MIB_TX("tx_total_col", mib.tx.ncl),
282         STAT_MIB_TX("tx_jabber", mib.tx.jbr),
283         STAT_MIB_TX("tx_bytes", mib.tx.bytes),
284         STAT_MIB_TX("tx_good_pkts", mib.tx.pok),
285         STAT_MIB_TX("tx_unicast", mib.tx.uc),
286         /* UniMAC RUNT counters */
287         STAT_RUNT("rx_runt_pkts", mib.rx_runt_cnt),
288         STAT_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs),
289         STAT_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align),
290         STAT_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
291         /* RXCHK misc statistics */
292         STAT_RXCHK("rxchk_bad_csum", mib.rxchk_bad_csum, RXCHK_BAD_CSUM_CNTR),
293         STAT_RXCHK("rxchk_other_pkt_disc", mib.rxchk_other_pkt_disc,
294                    RXCHK_OTHER_DISC_CNTR),
295         /* RBUF misc statistics */
296         STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR),
297         STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR),
298         /* RDMA misc statistics */
299         STAT_RDMA("rdma_ovflow_cnt", mib.rdma_ovflow_cnt, RDMA_OVFL_DISC_CNTR),
300         STAT_MIB_SOFT("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
301         STAT_MIB_SOFT("rx_dma_failed", mib.rx_dma_failed),
302         STAT_MIB_SOFT("tx_dma_failed", mib.tx_dma_failed),
303         STAT_MIB_SOFT("tx_realloc_tsb", mib.tx_realloc_tsb),
304         STAT_MIB_SOFT("tx_realloc_tsb_failed", mib.tx_realloc_tsb_failed),
305         /* Per TX-queue statistics are dynamically appended */
306 };
307
308 #define BCM_SYSPORT_STATS_LEN   ARRAY_SIZE(bcm_sysport_gstrings_stats)
309
310 static void bcm_sysport_get_drvinfo(struct net_device *dev,
311                                     struct ethtool_drvinfo *info)
312 {
313         strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
314         strscpy(info->bus_info, "platform", sizeof(info->bus_info));
315 }
316
317 static u32 bcm_sysport_get_msglvl(struct net_device *dev)
318 {
319         struct bcm_sysport_priv *priv = netdev_priv(dev);
320
321         return priv->msg_enable;
322 }
323
324 static void bcm_sysport_set_msglvl(struct net_device *dev, u32 enable)
325 {
326         struct bcm_sysport_priv *priv = netdev_priv(dev);
327
328         priv->msg_enable = enable;
329 }
330
331 static inline bool bcm_sysport_lite_stat_valid(enum bcm_sysport_stat_type type)
332 {
333         switch (type) {
334         case BCM_SYSPORT_STAT_NETDEV:
335         case BCM_SYSPORT_STAT_NETDEV64:
336         case BCM_SYSPORT_STAT_RXCHK:
337         case BCM_SYSPORT_STAT_RBUF:
338         case BCM_SYSPORT_STAT_RDMA:
339         case BCM_SYSPORT_STAT_SOFT:
340                 return true;
341         default:
342                 return false;
343         }
344 }
345
346 static int bcm_sysport_get_sset_count(struct net_device *dev, int string_set)
347 {
348         struct bcm_sysport_priv *priv = netdev_priv(dev);
349         const struct bcm_sysport_stats *s;
350         unsigned int i, j;
351
352         switch (string_set) {
353         case ETH_SS_STATS:
354                 for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
355                         s = &bcm_sysport_gstrings_stats[i];
356                         if (priv->is_lite &&
357                             !bcm_sysport_lite_stat_valid(s->type))
358                                 continue;
359                         j++;
360                 }
361                 /* Include per-queue statistics */
362                 return j + dev->num_tx_queues * NUM_SYSPORT_TXQ_STAT;
363         default:
364                 return -EOPNOTSUPP;
365         }
366 }
367
368 static void bcm_sysport_get_strings(struct net_device *dev,
369                                     u32 stringset, u8 *data)
370 {
371         struct bcm_sysport_priv *priv = netdev_priv(dev);
372         const struct bcm_sysport_stats *s;
373         char buf[128];
374         int i, j;
375
376         switch (stringset) {
377         case ETH_SS_STATS:
378                 for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
379                         s = &bcm_sysport_gstrings_stats[i];
380                         if (priv->is_lite &&
381                             !bcm_sysport_lite_stat_valid(s->type))
382                                 continue;
383
384                         memcpy(data + j * ETH_GSTRING_LEN, s->stat_string,
385                                ETH_GSTRING_LEN);
386                         j++;
387                 }
388
389                 for (i = 0; i < dev->num_tx_queues; i++) {
390                         snprintf(buf, sizeof(buf), "txq%d_packets", i);
391                         memcpy(data + j * ETH_GSTRING_LEN, buf,
392                                ETH_GSTRING_LEN);
393                         j++;
394
395                         snprintf(buf, sizeof(buf), "txq%d_bytes", i);
396                         memcpy(data + j * ETH_GSTRING_LEN, buf,
397                                ETH_GSTRING_LEN);
398                         j++;
399                 }
400                 break;
401         default:
402                 break;
403         }
404 }
405
406 static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv)
407 {
408         int i, j = 0;
409
410         for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
411                 const struct bcm_sysport_stats *s;
412                 u8 offset = 0;
413                 u32 val = 0;
414                 char *p;
415
416                 s = &bcm_sysport_gstrings_stats[i];
417                 switch (s->type) {
418                 case BCM_SYSPORT_STAT_NETDEV:
419                 case BCM_SYSPORT_STAT_NETDEV64:
420                 case BCM_SYSPORT_STAT_SOFT:
421                         continue;
422                 case BCM_SYSPORT_STAT_MIB_RX:
423                 case BCM_SYSPORT_STAT_MIB_TX:
424                 case BCM_SYSPORT_STAT_RUNT:
425                         if (priv->is_lite)
426                                 continue;
427
428                         if (s->type != BCM_SYSPORT_STAT_MIB_RX)
429                                 offset = UMAC_MIB_STAT_OFFSET;
430                         val = umac_readl(priv, UMAC_MIB_START + j + offset);
431                         break;
432                 case BCM_SYSPORT_STAT_RXCHK:
433                         val = rxchk_readl(priv, s->reg_offset);
434                         if (val == ~0)
435                                 rxchk_writel(priv, 0, s->reg_offset);
436                         break;
437                 case BCM_SYSPORT_STAT_RBUF:
438                         val = rbuf_readl(priv, s->reg_offset);
439                         if (val == ~0)
440                                 rbuf_writel(priv, 0, s->reg_offset);
441                         break;
442                 case BCM_SYSPORT_STAT_RDMA:
443                         if (!priv->is_lite)
444                                 continue;
445
446                         val = rdma_readl(priv, s->reg_offset);
447                         if (val == ~0)
448                                 rdma_writel(priv, 0, s->reg_offset);
449                         break;
450                 }
451
452                 j += s->stat_sizeof;
453                 p = (char *)priv + s->stat_offset;
454                 *(u32 *)p = val;
455         }
456
457         netif_dbg(priv, hw, priv->netdev, "updated MIB counters\n");
458 }
459
460 static void bcm_sysport_update_tx_stats(struct bcm_sysport_priv *priv,
461                                         u64 *tx_bytes, u64 *tx_packets)
462 {
463         struct bcm_sysport_tx_ring *ring;
464         u64 bytes = 0, packets = 0;
465         unsigned int start;
466         unsigned int q;
467
468         for (q = 0; q < priv->netdev->num_tx_queues; q++) {
469                 ring = &priv->tx_rings[q];
470                 do {
471                         start = u64_stats_fetch_begin(&priv->syncp);
472                         bytes = ring->bytes;
473                         packets = ring->packets;
474                 } while (u64_stats_fetch_retry(&priv->syncp, start));
475
476                 *tx_bytes += bytes;
477                 *tx_packets += packets;
478         }
479 }
480
481 static void bcm_sysport_get_stats(struct net_device *dev,
482                                   struct ethtool_stats *stats, u64 *data)
483 {
484         struct bcm_sysport_priv *priv = netdev_priv(dev);
485         struct bcm_sysport_stats64 *stats64 = &priv->stats64;
486         struct u64_stats_sync *syncp = &priv->syncp;
487         struct bcm_sysport_tx_ring *ring;
488         u64 tx_bytes = 0, tx_packets = 0;
489         unsigned int start;
490         int i, j;
491
492         if (netif_running(dev)) {
493                 bcm_sysport_update_mib_counters(priv);
494                 bcm_sysport_update_tx_stats(priv, &tx_bytes, &tx_packets);
495                 stats64->tx_bytes = tx_bytes;
496                 stats64->tx_packets = tx_packets;
497         }
498
499         for (i =  0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
500                 const struct bcm_sysport_stats *s;
501                 char *p;
502
503                 s = &bcm_sysport_gstrings_stats[i];
504                 if (s->type == BCM_SYSPORT_STAT_NETDEV)
505                         p = (char *)&dev->stats;
506                 else if (s->type == BCM_SYSPORT_STAT_NETDEV64)
507                         p = (char *)stats64;
508                 else
509                         p = (char *)priv;
510
511                 if (priv->is_lite && !bcm_sysport_lite_stat_valid(s->type))
512                         continue;
513                 p += s->stat_offset;
514
515                 if (s->stat_sizeof == sizeof(u64) &&
516                     s->type == BCM_SYSPORT_STAT_NETDEV64) {
517                         do {
518                                 start = u64_stats_fetch_begin(syncp);
519                                 data[i] = *(u64 *)p;
520                         } while (u64_stats_fetch_retry(syncp, start));
521                 } else
522                         data[i] = *(u32 *)p;
523                 j++;
524         }
525
526         /* For SYSTEMPORT Lite since we have holes in our statistics, j would
527          * be equal to BCM_SYSPORT_STATS_LEN at the end of the loop, but it
528          * needs to point to how many total statistics we have minus the
529          * number of per TX queue statistics
530          */
531         j = bcm_sysport_get_sset_count(dev, ETH_SS_STATS) -
532             dev->num_tx_queues * NUM_SYSPORT_TXQ_STAT;
533
534         for (i = 0; i < dev->num_tx_queues; i++) {
535                 ring = &priv->tx_rings[i];
536                 data[j] = ring->packets;
537                 j++;
538                 data[j] = ring->bytes;
539                 j++;
540         }
541 }
542
543 static void bcm_sysport_get_wol(struct net_device *dev,
544                                 struct ethtool_wolinfo *wol)
545 {
546         struct bcm_sysport_priv *priv = netdev_priv(dev);
547
548         wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER;
549         wol->wolopts = priv->wolopts;
550
551         if (!(priv->wolopts & WAKE_MAGICSECURE))
552                 return;
553
554         memcpy(wol->sopass, priv->sopass, sizeof(priv->sopass));
555 }
556
557 static int bcm_sysport_set_wol(struct net_device *dev,
558                                struct ethtool_wolinfo *wol)
559 {
560         struct bcm_sysport_priv *priv = netdev_priv(dev);
561         struct device *kdev = &priv->pdev->dev;
562         u32 supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER;
563
564         if (!device_can_wakeup(kdev))
565                 return -ENOTSUPP;
566
567         if (wol->wolopts & ~supported)
568                 return -EINVAL;
569
570         if (wol->wolopts & WAKE_MAGICSECURE)
571                 memcpy(priv->sopass, wol->sopass, sizeof(priv->sopass));
572
573         /* Flag the device and relevant IRQ as wakeup capable */
574         if (wol->wolopts) {
575                 device_set_wakeup_enable(kdev, 1);
576                 if (priv->wol_irq_disabled)
577                         enable_irq_wake(priv->wol_irq);
578                 priv->wol_irq_disabled = 0;
579         } else {
580                 device_set_wakeup_enable(kdev, 0);
581                 /* Avoid unbalanced disable_irq_wake calls */
582                 if (!priv->wol_irq_disabled)
583                         disable_irq_wake(priv->wol_irq);
584                 priv->wol_irq_disabled = 1;
585         }
586
587         priv->wolopts = wol->wolopts;
588
589         return 0;
590 }
591
592 static void bcm_sysport_set_rx_coalesce(struct bcm_sysport_priv *priv,
593                                         u32 usecs, u32 pkts)
594 {
595         u32 reg;
596
597         reg = rdma_readl(priv, RDMA_MBDONE_INTR);
598         reg &= ~(RDMA_INTR_THRESH_MASK |
599                  RDMA_TIMEOUT_MASK << RDMA_TIMEOUT_SHIFT);
600         reg |= pkts;
601         reg |= DIV_ROUND_UP(usecs * 1000, 8192) << RDMA_TIMEOUT_SHIFT;
602         rdma_writel(priv, reg, RDMA_MBDONE_INTR);
603 }
604
605 static void bcm_sysport_set_tx_coalesce(struct bcm_sysport_tx_ring *ring,
606                                         struct ethtool_coalesce *ec)
607 {
608         struct bcm_sysport_priv *priv = ring->priv;
609         u32 reg;
610
611         reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(ring->index));
612         reg &= ~(RING_INTR_THRESH_MASK |
613                  RING_TIMEOUT_MASK << RING_TIMEOUT_SHIFT);
614         reg |= ec->tx_max_coalesced_frames;
615         reg |= DIV_ROUND_UP(ec->tx_coalesce_usecs * 1000, 8192) <<
616                             RING_TIMEOUT_SHIFT;
617         tdma_writel(priv, reg, TDMA_DESC_RING_INTR_CONTROL(ring->index));
618 }
619
620 static int bcm_sysport_get_coalesce(struct net_device *dev,
621                                     struct ethtool_coalesce *ec,
622                                     struct kernel_ethtool_coalesce *kernel_coal,
623                                     struct netlink_ext_ack *extack)
624 {
625         struct bcm_sysport_priv *priv = netdev_priv(dev);
626         u32 reg;
627
628         reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(0));
629
630         ec->tx_coalesce_usecs = (reg >> RING_TIMEOUT_SHIFT) * 8192 / 1000;
631         ec->tx_max_coalesced_frames = reg & RING_INTR_THRESH_MASK;
632
633         reg = rdma_readl(priv, RDMA_MBDONE_INTR);
634
635         ec->rx_coalesce_usecs = (reg >> RDMA_TIMEOUT_SHIFT) * 8192 / 1000;
636         ec->rx_max_coalesced_frames = reg & RDMA_INTR_THRESH_MASK;
637         ec->use_adaptive_rx_coalesce = priv->dim.use_dim;
638
639         return 0;
640 }
641
642 static int bcm_sysport_set_coalesce(struct net_device *dev,
643                                     struct ethtool_coalesce *ec,
644                                     struct kernel_ethtool_coalesce *kernel_coal,
645                                     struct netlink_ext_ack *extack)
646 {
647         struct bcm_sysport_priv *priv = netdev_priv(dev);
648         struct dim_cq_moder moder;
649         u32 usecs, pkts;
650         unsigned int i;
651
652         /* Base system clock is 125Mhz, DMA timeout is this reference clock
653          * divided by 1024, which yield roughly 8.192 us, our maximum value has
654          * to fit in the RING_TIMEOUT_MASK (16 bits).
655          */
656         if (ec->tx_max_coalesced_frames > RING_INTR_THRESH_MASK ||
657             ec->tx_coalesce_usecs > (RING_TIMEOUT_MASK * 8) + 1 ||
658             ec->rx_max_coalesced_frames > RDMA_INTR_THRESH_MASK ||
659             ec->rx_coalesce_usecs > (RDMA_TIMEOUT_MASK * 8) + 1)
660                 return -EINVAL;
661
662         if ((ec->tx_coalesce_usecs == 0 && ec->tx_max_coalesced_frames == 0) ||
663             (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0))
664                 return -EINVAL;
665
666         for (i = 0; i < dev->num_tx_queues; i++)
667                 bcm_sysport_set_tx_coalesce(&priv->tx_rings[i], ec);
668
669         priv->rx_coalesce_usecs = ec->rx_coalesce_usecs;
670         priv->rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
671         usecs = priv->rx_coalesce_usecs;
672         pkts = priv->rx_max_coalesced_frames;
673
674         if (ec->use_adaptive_rx_coalesce && !priv->dim.use_dim) {
675                 moder = net_dim_get_def_rx_moderation(priv->dim.dim.mode);
676                 usecs = moder.usec;
677                 pkts = moder.pkts;
678         }
679
680         priv->dim.use_dim = ec->use_adaptive_rx_coalesce;
681
682         /* Apply desired coalescing parameters */
683         bcm_sysport_set_rx_coalesce(priv, usecs, pkts);
684
685         return 0;
686 }
687
688 static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb)
689 {
690         dev_consume_skb_any(cb->skb);
691         cb->skb = NULL;
692         dma_unmap_addr_set(cb, dma_addr, 0);
693 }
694
695 static struct sk_buff *bcm_sysport_rx_refill(struct bcm_sysport_priv *priv,
696                                              struct bcm_sysport_cb *cb)
697 {
698         struct device *kdev = &priv->pdev->dev;
699         struct net_device *ndev = priv->netdev;
700         struct sk_buff *skb, *rx_skb;
701         dma_addr_t mapping;
702
703         /* Allocate a new SKB for a new packet */
704         skb = __netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH,
705                                  GFP_ATOMIC | __GFP_NOWARN);
706         if (!skb) {
707                 priv->mib.alloc_rx_buff_failed++;
708                 netif_err(priv, rx_err, ndev, "SKB alloc failed\n");
709                 return NULL;
710         }
711
712         mapping = dma_map_single(kdev, skb->data,
713                                  RX_BUF_LENGTH, DMA_FROM_DEVICE);
714         if (dma_mapping_error(kdev, mapping)) {
715                 priv->mib.rx_dma_failed++;
716                 dev_kfree_skb_any(skb);
717                 netif_err(priv, rx_err, ndev, "DMA mapping failure\n");
718                 return NULL;
719         }
720
721         /* Grab the current SKB on the ring */
722         rx_skb = cb->skb;
723         if (likely(rx_skb))
724                 dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
725                                  RX_BUF_LENGTH, DMA_FROM_DEVICE);
726
727         /* Put the new SKB on the ring */
728         cb->skb = skb;
729         dma_unmap_addr_set(cb, dma_addr, mapping);
730         dma_desc_set_addr(priv, cb->bd_addr, mapping);
731
732         netif_dbg(priv, rx_status, ndev, "RX refill\n");
733
734         /* Return the current SKB to the caller */
735         return rx_skb;
736 }
737
738 static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv *priv)
739 {
740         struct bcm_sysport_cb *cb;
741         struct sk_buff *skb;
742         unsigned int i;
743
744         for (i = 0; i < priv->num_rx_bds; i++) {
745                 cb = &priv->rx_cbs[i];
746                 skb = bcm_sysport_rx_refill(priv, cb);
747                 dev_kfree_skb(skb);
748                 if (!cb->skb)
749                         return -ENOMEM;
750         }
751
752         return 0;
753 }
754
755 /* Poll the hardware for up to budget packets to process */
756 static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
757                                         unsigned int budget)
758 {
759         struct bcm_sysport_stats64 *stats64 = &priv->stats64;
760         struct net_device *ndev = priv->netdev;
761         unsigned int processed = 0, to_process;
762         unsigned int processed_bytes = 0;
763         struct bcm_sysport_cb *cb;
764         struct sk_buff *skb;
765         unsigned int p_index;
766         u16 len, status;
767         struct bcm_rsb *rsb;
768
769         /* Clear status before servicing to reduce spurious interrupts */
770         intrl2_0_writel(priv, INTRL2_0_RDMA_MBDONE, INTRL2_CPU_CLEAR);
771
772         /* Determine how much we should process since last call, SYSTEMPORT Lite
773          * groups the producer and consumer indexes into the same 32-bit
774          * which we access using RDMA_CONS_INDEX
775          */
776         if (!priv->is_lite)
777                 p_index = rdma_readl(priv, RDMA_PROD_INDEX);
778         else
779                 p_index = rdma_readl(priv, RDMA_CONS_INDEX);
780         p_index &= RDMA_PROD_INDEX_MASK;
781
782         to_process = (p_index - priv->rx_c_index) & RDMA_CONS_INDEX_MASK;
783
784         netif_dbg(priv, rx_status, ndev,
785                   "p_index=%d rx_c_index=%d to_process=%d\n",
786                   p_index, priv->rx_c_index, to_process);
787
788         while ((processed < to_process) && (processed < budget)) {
789                 cb = &priv->rx_cbs[priv->rx_read_ptr];
790                 skb = bcm_sysport_rx_refill(priv, cb);
791
792
793                 /* We do not have a backing SKB, so we do not a corresponding
794                  * DMA mapping for this incoming packet since
795                  * bcm_sysport_rx_refill always either has both skb and mapping
796                  * or none.
797                  */
798                 if (unlikely(!skb)) {
799                         netif_err(priv, rx_err, ndev, "out of memory!\n");
800                         ndev->stats.rx_dropped++;
801                         ndev->stats.rx_errors++;
802                         goto next;
803                 }
804
805                 /* Extract the Receive Status Block prepended */
806                 rsb = (struct bcm_rsb *)skb->data;
807                 len = (rsb->rx_status_len >> DESC_LEN_SHIFT) & DESC_LEN_MASK;
808                 status = (rsb->rx_status_len >> DESC_STATUS_SHIFT) &
809                           DESC_STATUS_MASK;
810
811                 netif_dbg(priv, rx_status, ndev,
812                           "p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n",
813                           p_index, priv->rx_c_index, priv->rx_read_ptr,
814                           len, status);
815
816                 if (unlikely(len > RX_BUF_LENGTH)) {
817                         netif_err(priv, rx_status, ndev, "oversized packet\n");
818                         ndev->stats.rx_length_errors++;
819                         ndev->stats.rx_errors++;
820                         dev_kfree_skb_any(skb);
821                         goto next;
822                 }
823
824                 if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) {
825                         netif_err(priv, rx_status, ndev, "fragmented packet!\n");
826                         ndev->stats.rx_dropped++;
827                         ndev->stats.rx_errors++;
828                         dev_kfree_skb_any(skb);
829                         goto next;
830                 }
831
832                 if (unlikely(status & (RX_STATUS_ERR | RX_STATUS_OVFLOW))) {
833                         netif_err(priv, rx_err, ndev, "error packet\n");
834                         if (status & RX_STATUS_OVFLOW)
835                                 ndev->stats.rx_over_errors++;
836                         ndev->stats.rx_dropped++;
837                         ndev->stats.rx_errors++;
838                         dev_kfree_skb_any(skb);
839                         goto next;
840                 }
841
842                 skb_put(skb, len);
843
844                 /* Hardware validated our checksum */
845                 if (likely(status & DESC_L4_CSUM))
846                         skb->ip_summed = CHECKSUM_UNNECESSARY;
847
848                 /* Hardware pre-pends packets with 2bytes before Ethernet
849                  * header plus we have the Receive Status Block, strip off all
850                  * of this from the SKB.
851                  */
852                 skb_pull(skb, sizeof(*rsb) + 2);
853                 len -= (sizeof(*rsb) + 2);
854                 processed_bytes += len;
855
856                 /* UniMAC may forward CRC */
857                 if (priv->crc_fwd) {
858                         skb_trim(skb, len - ETH_FCS_LEN);
859                         len -= ETH_FCS_LEN;
860                 }
861
862                 skb->protocol = eth_type_trans(skb, ndev);
863                 ndev->stats.rx_packets++;
864                 ndev->stats.rx_bytes += len;
865                 u64_stats_update_begin(&priv->syncp);
866                 stats64->rx_packets++;
867                 stats64->rx_bytes += len;
868                 u64_stats_update_end(&priv->syncp);
869
870                 napi_gro_receive(&priv->napi, skb);
871 next:
872                 processed++;
873                 priv->rx_read_ptr++;
874
875                 if (priv->rx_read_ptr == priv->num_rx_bds)
876                         priv->rx_read_ptr = 0;
877         }
878
879         priv->dim.packets = processed;
880         priv->dim.bytes = processed_bytes;
881
882         return processed;
883 }
884
885 static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_tx_ring *ring,
886                                        struct bcm_sysport_cb *cb,
887                                        unsigned int *bytes_compl,
888                                        unsigned int *pkts_compl)
889 {
890         struct bcm_sysport_priv *priv = ring->priv;
891         struct device *kdev = &priv->pdev->dev;
892
893         if (cb->skb) {
894                 *bytes_compl += cb->skb->len;
895                 dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
896                                  dma_unmap_len(cb, dma_len),
897                                  DMA_TO_DEVICE);
898                 (*pkts_compl)++;
899                 bcm_sysport_free_cb(cb);
900         /* SKB fragment */
901         } else if (dma_unmap_addr(cb, dma_addr)) {
902                 *bytes_compl += dma_unmap_len(cb, dma_len);
903                 dma_unmap_page(kdev, dma_unmap_addr(cb, dma_addr),
904                                dma_unmap_len(cb, dma_len), DMA_TO_DEVICE);
905                 dma_unmap_addr_set(cb, dma_addr, 0);
906         }
907 }
908
909 /* Reclaim queued SKBs for transmission completion, lockless version */
910 static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
911                                              struct bcm_sysport_tx_ring *ring)
912 {
913         unsigned int pkts_compl = 0, bytes_compl = 0;
914         struct net_device *ndev = priv->netdev;
915         unsigned int txbds_processed = 0;
916         struct bcm_sysport_cb *cb;
917         unsigned int txbds_ready;
918         unsigned int c_index;
919         u32 hw_ind;
920
921         /* Clear status before servicing to reduce spurious interrupts */
922         if (!ring->priv->is_lite)
923                 intrl2_1_writel(ring->priv, BIT(ring->index), INTRL2_CPU_CLEAR);
924         else
925                 intrl2_0_writel(ring->priv, BIT(ring->index +
926                                 INTRL2_0_TDMA_MBDONE_SHIFT), INTRL2_CPU_CLEAR);
927
928         /* Compute how many descriptors have been processed since last call */
929         hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index));
930         c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK;
931         txbds_ready = (c_index - ring->c_index) & RING_CONS_INDEX_MASK;
932
933         netif_dbg(priv, tx_done, ndev,
934                   "ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n",
935                   ring->index, ring->c_index, c_index, txbds_ready);
936
937         while (txbds_processed < txbds_ready) {
938                 cb = &ring->cbs[ring->clean_index];
939                 bcm_sysport_tx_reclaim_one(ring, cb, &bytes_compl, &pkts_compl);
940
941                 ring->desc_count++;
942                 txbds_processed++;
943
944                 if (likely(ring->clean_index < ring->size - 1))
945                         ring->clean_index++;
946                 else
947                         ring->clean_index = 0;
948         }
949
950         u64_stats_update_begin(&priv->syncp);
951         ring->packets += pkts_compl;
952         ring->bytes += bytes_compl;
953         u64_stats_update_end(&priv->syncp);
954
955         ring->c_index = c_index;
956
957         netif_dbg(priv, tx_done, ndev,
958                   "ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n",
959                   ring->index, ring->c_index, pkts_compl, bytes_compl);
960
961         return pkts_compl;
962 }
963
964 /* Locked version of the per-ring TX reclaim routine */
965 static unsigned int bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
966                                            struct bcm_sysport_tx_ring *ring)
967 {
968         struct netdev_queue *txq;
969         unsigned int released;
970         unsigned long flags;
971
972         txq = netdev_get_tx_queue(priv->netdev, ring->index);
973
974         spin_lock_irqsave(&ring->lock, flags);
975         released = __bcm_sysport_tx_reclaim(priv, ring);
976         if (released)
977                 netif_tx_wake_queue(txq);
978
979         spin_unlock_irqrestore(&ring->lock, flags);
980
981         return released;
982 }
983
984 /* Locked version of the per-ring TX reclaim, but does not wake the queue */
985 static void bcm_sysport_tx_clean(struct bcm_sysport_priv *priv,
986                                  struct bcm_sysport_tx_ring *ring)
987 {
988         unsigned long flags;
989
990         spin_lock_irqsave(&ring->lock, flags);
991         __bcm_sysport_tx_reclaim(priv, ring);
992         spin_unlock_irqrestore(&ring->lock, flags);
993 }
994
995 static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget)
996 {
997         struct bcm_sysport_tx_ring *ring =
998                 container_of(napi, struct bcm_sysport_tx_ring, napi);
999         unsigned int work_done = 0;
1000
1001         work_done = bcm_sysport_tx_reclaim(ring->priv, ring);
1002
1003         if (work_done == 0) {
1004                 napi_complete(napi);
1005                 /* re-enable TX interrupt */
1006                 if (!ring->priv->is_lite)
1007                         intrl2_1_mask_clear(ring->priv, BIT(ring->index));
1008                 else
1009                         intrl2_0_mask_clear(ring->priv, BIT(ring->index +
1010                                             INTRL2_0_TDMA_MBDONE_SHIFT));
1011
1012                 return 0;
1013         }
1014
1015         return budget;
1016 }
1017
1018 static void bcm_sysport_tx_reclaim_all(struct bcm_sysport_priv *priv)
1019 {
1020         unsigned int q;
1021
1022         for (q = 0; q < priv->netdev->num_tx_queues; q++)
1023                 bcm_sysport_tx_reclaim(priv, &priv->tx_rings[q]);
1024 }
1025
1026 static int bcm_sysport_poll(struct napi_struct *napi, int budget)
1027 {
1028         struct bcm_sysport_priv *priv =
1029                 container_of(napi, struct bcm_sysport_priv, napi);
1030         struct dim_sample dim_sample = {};
1031         unsigned int work_done = 0;
1032
1033         work_done = bcm_sysport_desc_rx(priv, budget);
1034
1035         priv->rx_c_index += work_done;
1036         priv->rx_c_index &= RDMA_CONS_INDEX_MASK;
1037
1038         /* SYSTEMPORT Lite groups the producer/consumer index, producer is
1039          * maintained by HW, but writes to it will be ignore while RDMA
1040          * is active
1041          */
1042         if (!priv->is_lite)
1043                 rdma_writel(priv, priv->rx_c_index, RDMA_CONS_INDEX);
1044         else
1045                 rdma_writel(priv, priv->rx_c_index << 16, RDMA_CONS_INDEX);
1046
1047         if (work_done < budget) {
1048                 napi_complete_done(napi, work_done);
1049                 /* re-enable RX interrupts */
1050                 intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE);
1051         }
1052
1053         if (priv->dim.use_dim) {
1054                 dim_update_sample(priv->dim.event_ctr, priv->dim.packets,
1055                                   priv->dim.bytes, &dim_sample);
1056                 net_dim(&priv->dim.dim, dim_sample);
1057         }
1058
1059         return work_done;
1060 }
1061
1062 static void mpd_enable_set(struct bcm_sysport_priv *priv, bool enable)
1063 {
1064         u32 reg, bit;
1065
1066         reg = umac_readl(priv, UMAC_MPD_CTRL);
1067         if (enable)
1068                 reg |= MPD_EN;
1069         else
1070                 reg &= ~MPD_EN;
1071         umac_writel(priv, reg, UMAC_MPD_CTRL);
1072
1073         if (priv->is_lite)
1074                 bit = RBUF_ACPI_EN_LITE;
1075         else
1076                 bit = RBUF_ACPI_EN;
1077
1078         reg = rbuf_readl(priv, RBUF_CONTROL);
1079         if (enable)
1080                 reg |= bit;
1081         else
1082                 reg &= ~bit;
1083         rbuf_writel(priv, reg, RBUF_CONTROL);
1084 }
1085
1086 static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)
1087 {
1088         unsigned int index;
1089         u32 reg;
1090
1091         /* Disable RXCHK, active filters and Broadcom tag matching */
1092         reg = rxchk_readl(priv, RXCHK_CONTROL);
1093         reg &= ~(RXCHK_BRCM_TAG_MATCH_MASK <<
1094                  RXCHK_BRCM_TAG_MATCH_SHIFT | RXCHK_EN | RXCHK_BRCM_TAG_EN);
1095         rxchk_writel(priv, reg, RXCHK_CONTROL);
1096
1097         /* Make sure we restore correct CID index in case HW lost
1098          * its context during deep idle state
1099          */
1100         for_each_set_bit(index, priv->filters, RXCHK_BRCM_TAG_MAX) {
1101                 rxchk_writel(priv, priv->filters_loc[index] <<
1102                              RXCHK_BRCM_TAG_CID_SHIFT, RXCHK_BRCM_TAG(index));
1103                 rxchk_writel(priv, 0xff00ffff, RXCHK_BRCM_TAG_MASK(index));
1104         }
1105
1106         /* Clear the MagicPacket detection logic */
1107         mpd_enable_set(priv, false);
1108
1109         reg = intrl2_0_readl(priv, INTRL2_CPU_STATUS);
1110         if (reg & INTRL2_0_MPD)
1111                 netdev_info(priv->netdev, "Wake-on-LAN (MPD) interrupt!\n");
1112
1113         if (reg & INTRL2_0_BRCM_MATCH_TAG) {
1114                 reg = rxchk_readl(priv, RXCHK_BRCM_TAG_MATCH_STATUS) &
1115                                   RXCHK_BRCM_TAG_MATCH_MASK;
1116                 netdev_info(priv->netdev,
1117                             "Wake-on-LAN (filters 0x%02x) interrupt!\n", reg);
1118         }
1119
1120         netif_dbg(priv, wol, priv->netdev, "resumed from WOL\n");
1121 }
1122
1123 static void bcm_sysport_dim_work(struct work_struct *work)
1124 {
1125         struct dim *dim = container_of(work, struct dim, work);
1126         struct bcm_sysport_net_dim *ndim =
1127                         container_of(dim, struct bcm_sysport_net_dim, dim);
1128         struct bcm_sysport_priv *priv =
1129                         container_of(ndim, struct bcm_sysport_priv, dim);
1130         struct dim_cq_moder cur_profile = net_dim_get_rx_moderation(dim->mode,
1131                                                                     dim->profile_ix);
1132
1133         bcm_sysport_set_rx_coalesce(priv, cur_profile.usec, cur_profile.pkts);
1134         dim->state = DIM_START_MEASURE;
1135 }
1136
1137 /* RX and misc interrupt routine */
1138 static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
1139 {
1140         struct net_device *dev = dev_id;
1141         struct bcm_sysport_priv *priv = netdev_priv(dev);
1142         struct bcm_sysport_tx_ring *txr;
1143         unsigned int ring, ring_bit;
1144
1145         priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
1146                           ~intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
1147         intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
1148
1149         if (unlikely(priv->irq0_stat == 0)) {
1150                 netdev_warn(priv->netdev, "spurious RX interrupt\n");
1151                 return IRQ_NONE;
1152         }
1153
1154         if (priv->irq0_stat & INTRL2_0_RDMA_MBDONE) {
1155                 priv->dim.event_ctr++;
1156                 if (likely(napi_schedule_prep(&priv->napi))) {
1157                         /* disable RX interrupts */
1158                         intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE);
1159                         __napi_schedule_irqoff(&priv->napi);
1160                 }
1161         }
1162
1163         /* TX ring is full, perform a full reclaim since we do not know
1164          * which one would trigger this interrupt
1165          */
1166         if (priv->irq0_stat & INTRL2_0_TX_RING_FULL)
1167                 bcm_sysport_tx_reclaim_all(priv);
1168
1169         if (!priv->is_lite)
1170                 goto out;
1171
1172         for (ring = 0; ring < dev->num_tx_queues; ring++) {
1173                 ring_bit = BIT(ring + INTRL2_0_TDMA_MBDONE_SHIFT);
1174                 if (!(priv->irq0_stat & ring_bit))
1175                         continue;
1176
1177                 txr = &priv->tx_rings[ring];
1178
1179                 if (likely(napi_schedule_prep(&txr->napi))) {
1180                         intrl2_0_mask_set(priv, ring_bit);
1181                         __napi_schedule(&txr->napi);
1182                 }
1183         }
1184 out:
1185         return IRQ_HANDLED;
1186 }
1187
1188 /* TX interrupt service routine */
1189 static irqreturn_t bcm_sysport_tx_isr(int irq, void *dev_id)
1190 {
1191         struct net_device *dev = dev_id;
1192         struct bcm_sysport_priv *priv = netdev_priv(dev);
1193         struct bcm_sysport_tx_ring *txr;
1194         unsigned int ring;
1195
1196         priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) &
1197                                 ~intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
1198         intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
1199
1200         if (unlikely(priv->irq1_stat == 0)) {
1201                 netdev_warn(priv->netdev, "spurious TX interrupt\n");
1202                 return IRQ_NONE;
1203         }
1204
1205         for (ring = 0; ring < dev->num_tx_queues; ring++) {
1206                 if (!(priv->irq1_stat & BIT(ring)))
1207                         continue;
1208
1209                 txr = &priv->tx_rings[ring];
1210
1211                 if (likely(napi_schedule_prep(&txr->napi))) {
1212                         intrl2_1_mask_set(priv, BIT(ring));
1213                         __napi_schedule_irqoff(&txr->napi);
1214                 }
1215         }
1216
1217         return IRQ_HANDLED;
1218 }
1219
1220 static irqreturn_t bcm_sysport_wol_isr(int irq, void *dev_id)
1221 {
1222         struct bcm_sysport_priv *priv = dev_id;
1223
1224         pm_wakeup_event(&priv->pdev->dev, 0);
1225
1226         return IRQ_HANDLED;
1227 }
1228
1229 #ifdef CONFIG_NET_POLL_CONTROLLER
1230 static void bcm_sysport_poll_controller(struct net_device *dev)
1231 {
1232         struct bcm_sysport_priv *priv = netdev_priv(dev);
1233
1234         disable_irq(priv->irq0);
1235         bcm_sysport_rx_isr(priv->irq0, priv);
1236         enable_irq(priv->irq0);
1237
1238         if (!priv->is_lite) {
1239                 disable_irq(priv->irq1);
1240                 bcm_sysport_tx_isr(priv->irq1, priv);
1241                 enable_irq(priv->irq1);
1242         }
1243 }
1244 #endif
1245
1246 static struct sk_buff *bcm_sysport_insert_tsb(struct sk_buff *skb,
1247                                               struct net_device *dev)
1248 {
1249         struct bcm_sysport_priv *priv = netdev_priv(dev);
1250         struct sk_buff *nskb;
1251         struct bcm_tsb *tsb;
1252         u32 csum_info;
1253         u8 ip_proto;
1254         u16 csum_start;
1255         __be16 ip_ver;
1256
1257         /* Re-allocate SKB if needed */
1258         if (unlikely(skb_headroom(skb) < sizeof(*tsb))) {
1259                 nskb = skb_realloc_headroom(skb, sizeof(*tsb));
1260                 if (!nskb) {
1261                         dev_kfree_skb_any(skb);
1262                         priv->mib.tx_realloc_tsb_failed++;
1263                         dev->stats.tx_errors++;
1264                         dev->stats.tx_dropped++;
1265                         return NULL;
1266                 }
1267                 dev_consume_skb_any(skb);
1268                 skb = nskb;
1269                 priv->mib.tx_realloc_tsb++;
1270         }
1271
1272         tsb = skb_push(skb, sizeof(*tsb));
1273         /* Zero-out TSB by default */
1274         memset(tsb, 0, sizeof(*tsb));
1275
1276         if (skb_vlan_tag_present(skb)) {
1277                 tsb->pcp_dei_vid = skb_vlan_tag_get_prio(skb) & PCP_DEI_MASK;
1278                 tsb->pcp_dei_vid |= (u32)skb_vlan_tag_get_id(skb) << VID_SHIFT;
1279         }
1280
1281         if (skb->ip_summed == CHECKSUM_PARTIAL) {
1282                 ip_ver = skb->protocol;
1283                 switch (ip_ver) {
1284                 case htons(ETH_P_IP):
1285                         ip_proto = ip_hdr(skb)->protocol;
1286                         break;
1287                 case htons(ETH_P_IPV6):
1288                         ip_proto = ipv6_hdr(skb)->nexthdr;
1289                         break;
1290                 default:
1291                         return skb;
1292                 }
1293
1294                 /* Get the checksum offset and the L4 (transport) offset */
1295                 csum_start = skb_checksum_start_offset(skb) - sizeof(*tsb);
1296                 /* Account for the HW inserted VLAN tag */
1297                 if (skb_vlan_tag_present(skb))
1298                         csum_start += VLAN_HLEN;
1299                 csum_info = (csum_start + skb->csum_offset) & L4_CSUM_PTR_MASK;
1300                 csum_info |= (csum_start << L4_PTR_SHIFT);
1301
1302                 if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) {
1303                         csum_info |= L4_LENGTH_VALID;
1304                         if (ip_proto == IPPROTO_UDP &&
1305                             ip_ver == htons(ETH_P_IP))
1306                                 csum_info |= L4_UDP;
1307                 } else {
1308                         csum_info = 0;
1309                 }
1310
1311                 tsb->l4_ptr_dest_map = csum_info;
1312         }
1313
1314         return skb;
1315 }
1316
1317 static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
1318                                     struct net_device *dev)
1319 {
1320         struct bcm_sysport_priv *priv = netdev_priv(dev);
1321         struct device *kdev = &priv->pdev->dev;
1322         struct bcm_sysport_tx_ring *ring;
1323         unsigned long flags, desc_flags;
1324         struct bcm_sysport_cb *cb;
1325         struct netdev_queue *txq;
1326         u32 len_status, addr_lo;
1327         unsigned int skb_len;
1328         dma_addr_t mapping;
1329         u16 queue;
1330         int ret;
1331
1332         queue = skb_get_queue_mapping(skb);
1333         txq = netdev_get_tx_queue(dev, queue);
1334         ring = &priv->tx_rings[queue];
1335
1336         /* lock against tx reclaim in BH context and TX ring full interrupt */
1337         spin_lock_irqsave(&ring->lock, flags);
1338         if (unlikely(ring->desc_count == 0)) {
1339                 netif_tx_stop_queue(txq);
1340                 netdev_err(dev, "queue %d awake and ring full!\n", queue);
1341                 ret = NETDEV_TX_BUSY;
1342                 goto out;
1343         }
1344
1345         /* Insert TSB and checksum infos */
1346         if (priv->tsb_en) {
1347                 skb = bcm_sysport_insert_tsb(skb, dev);
1348                 if (!skb) {
1349                         ret = NETDEV_TX_OK;
1350                         goto out;
1351                 }
1352         }
1353
1354         skb_len = skb->len;
1355
1356         mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
1357         if (dma_mapping_error(kdev, mapping)) {
1358                 priv->mib.tx_dma_failed++;
1359                 netif_err(priv, tx_err, dev, "DMA map failed at %p (len=%d)\n",
1360                           skb->data, skb_len);
1361                 ret = NETDEV_TX_OK;
1362                 goto out;
1363         }
1364
1365         /* Remember the SKB for future freeing */
1366         cb = &ring->cbs[ring->curr_desc];
1367         cb->skb = skb;
1368         dma_unmap_addr_set(cb, dma_addr, mapping);
1369         dma_unmap_len_set(cb, dma_len, skb_len);
1370
1371         addr_lo = lower_32_bits(mapping);
1372         len_status = upper_32_bits(mapping) & DESC_ADDR_HI_MASK;
1373         len_status |= (skb_len << DESC_LEN_SHIFT);
1374         len_status |= (DESC_SOP | DESC_EOP | TX_STATUS_APP_CRC) <<
1375                        DESC_STATUS_SHIFT;
1376         if (skb->ip_summed == CHECKSUM_PARTIAL)
1377                 len_status |= (DESC_L4_CSUM << DESC_STATUS_SHIFT);
1378         if (skb_vlan_tag_present(skb))
1379                 len_status |= (TX_STATUS_VLAN_VID_TSB << DESC_STATUS_SHIFT);
1380
1381         ring->curr_desc++;
1382         if (ring->curr_desc == ring->size)
1383                 ring->curr_desc = 0;
1384         ring->desc_count--;
1385
1386         /* Ports are latched, so write upper address first */
1387         spin_lock_irqsave(&priv->desc_lock, desc_flags);
1388         tdma_writel(priv, len_status, TDMA_WRITE_PORT_HI(ring->index));
1389         tdma_writel(priv, addr_lo, TDMA_WRITE_PORT_LO(ring->index));
1390         spin_unlock_irqrestore(&priv->desc_lock, desc_flags);
1391
1392         /* Check ring space and update SW control flow */
1393         if (ring->desc_count == 0)
1394                 netif_tx_stop_queue(txq);
1395
1396         netif_dbg(priv, tx_queued, dev, "ring=%d desc_count=%d, curr_desc=%d\n",
1397                   ring->index, ring->desc_count, ring->curr_desc);
1398
1399         ret = NETDEV_TX_OK;
1400 out:
1401         spin_unlock_irqrestore(&ring->lock, flags);
1402         return ret;
1403 }
1404
1405 static void bcm_sysport_tx_timeout(struct net_device *dev, unsigned int txqueue)
1406 {
1407         netdev_warn(dev, "transmit timeout!\n");
1408
1409         netif_trans_update(dev);
1410         dev->stats.tx_errors++;
1411
1412         netif_tx_wake_all_queues(dev);
1413 }
1414
1415 /* phylib adjust link callback */
1416 static void bcm_sysport_adj_link(struct net_device *dev)
1417 {
1418         struct bcm_sysport_priv *priv = netdev_priv(dev);
1419         struct phy_device *phydev = dev->phydev;
1420         unsigned int changed = 0;
1421         u32 cmd_bits = 0, reg;
1422
1423         if (priv->old_link != phydev->link) {
1424                 changed = 1;
1425                 priv->old_link = phydev->link;
1426         }
1427
1428         if (priv->old_duplex != phydev->duplex) {
1429                 changed = 1;
1430                 priv->old_duplex = phydev->duplex;
1431         }
1432
1433         if (priv->is_lite)
1434                 goto out;
1435
1436         switch (phydev->speed) {
1437         case SPEED_2500:
1438                 cmd_bits = CMD_SPEED_2500;
1439                 break;
1440         case SPEED_1000:
1441                 cmd_bits = CMD_SPEED_1000;
1442                 break;
1443         case SPEED_100:
1444                 cmd_bits = CMD_SPEED_100;
1445                 break;
1446         case SPEED_10:
1447                 cmd_bits = CMD_SPEED_10;
1448                 break;
1449         default:
1450                 break;
1451         }
1452         cmd_bits <<= CMD_SPEED_SHIFT;
1453
1454         if (phydev->duplex == DUPLEX_HALF)
1455                 cmd_bits |= CMD_HD_EN;
1456
1457         if (priv->old_pause != phydev->pause) {
1458                 changed = 1;
1459                 priv->old_pause = phydev->pause;
1460         }
1461
1462         if (!phydev->pause)
1463                 cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE;
1464
1465         if (!changed)
1466                 return;
1467
1468         if (phydev->link) {
1469                 reg = umac_readl(priv, UMAC_CMD);
1470                 reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) |
1471                         CMD_HD_EN | CMD_RX_PAUSE_IGNORE |
1472                         CMD_TX_PAUSE_IGNORE);
1473                 reg |= cmd_bits;
1474                 umac_writel(priv, reg, UMAC_CMD);
1475         }
1476 out:
1477         if (changed)
1478                 phy_print_status(phydev);
1479 }
1480
1481 static void bcm_sysport_init_dim(struct bcm_sysport_priv *priv,
1482                                  void (*cb)(struct work_struct *work))
1483 {
1484         struct bcm_sysport_net_dim *dim = &priv->dim;
1485
1486         INIT_WORK(&dim->dim.work, cb);
1487         dim->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
1488         dim->event_ctr = 0;
1489         dim->packets = 0;
1490         dim->bytes = 0;
1491 }
1492
1493 static void bcm_sysport_init_rx_coalesce(struct bcm_sysport_priv *priv)
1494 {
1495         struct bcm_sysport_net_dim *dim = &priv->dim;
1496         struct dim_cq_moder moder;
1497         u32 usecs, pkts;
1498
1499         usecs = priv->rx_coalesce_usecs;
1500         pkts = priv->rx_max_coalesced_frames;
1501
1502         /* If DIM was enabled, re-apply default parameters */
1503         if (dim->use_dim) {
1504                 moder = net_dim_get_def_rx_moderation(dim->dim.mode);
1505                 usecs = moder.usec;
1506                 pkts = moder.pkts;
1507         }
1508
1509         bcm_sysport_set_rx_coalesce(priv, usecs, pkts);
1510 }
1511
1512 static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
1513                                     unsigned int index)
1514 {
1515         struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
1516         size_t size;
1517         u32 reg;
1518
1519         /* Simple descriptors partitioning for now */
1520         size = 256;
1521
1522         ring->cbs = kcalloc(size, sizeof(struct bcm_sysport_cb), GFP_KERNEL);
1523         if (!ring->cbs) {
1524                 netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
1525                 return -ENOMEM;
1526         }
1527
1528         /* Initialize SW view of the ring */
1529         spin_lock_init(&ring->lock);
1530         ring->priv = priv;
1531         netif_napi_add_tx(priv->netdev, &ring->napi, bcm_sysport_tx_poll);
1532         ring->index = index;
1533         ring->size = size;
1534         ring->clean_index = 0;
1535         ring->alloc_size = ring->size;
1536         ring->desc_count = ring->size;
1537         ring->curr_desc = 0;
1538
1539         /* Initialize HW ring */
1540         tdma_writel(priv, RING_EN, TDMA_DESC_RING_HEAD_TAIL_PTR(index));
1541         tdma_writel(priv, 0, TDMA_DESC_RING_COUNT(index));
1542         tdma_writel(priv, 1, TDMA_DESC_RING_INTR_CONTROL(index));
1543         tdma_writel(priv, 0, TDMA_DESC_RING_PROD_CONS_INDEX(index));
1544
1545         /* Configure QID and port mapping */
1546         reg = tdma_readl(priv, TDMA_DESC_RING_MAPPING(index));
1547         reg &= ~(RING_QID_MASK | RING_PORT_ID_MASK << RING_PORT_ID_SHIFT);
1548         if (ring->inspect) {
1549                 reg |= ring->switch_queue & RING_QID_MASK;
1550                 reg |= ring->switch_port << RING_PORT_ID_SHIFT;
1551         } else {
1552                 reg |= RING_IGNORE_STATUS;
1553         }
1554         tdma_writel(priv, reg, TDMA_DESC_RING_MAPPING(index));
1555         reg = 0;
1556         /* Adjust the packet size calculations if SYSTEMPORT is responsible
1557          * for HW insertion of VLAN tags
1558          */
1559         if (priv->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)
1560                 reg = VLAN_HLEN << RING_PKT_SIZE_ADJ_SHIFT;
1561         tdma_writel(priv, reg, TDMA_DESC_RING_PCP_DEI_VID(index));
1562
1563         /* Enable ACB algorithm 2 */
1564         reg = tdma_readl(priv, TDMA_CONTROL);
1565         reg |= tdma_control_bit(priv, ACB_ALGO);
1566         tdma_writel(priv, reg, TDMA_CONTROL);
1567
1568         /* Do not use tdma_control_bit() here because TSB_SWAP1 collides
1569          * with the original definition of ACB_ALGO
1570          */
1571         reg = tdma_readl(priv, TDMA_CONTROL);
1572         if (priv->is_lite)
1573                 reg &= ~BIT(TSB_SWAP1);
1574         /* Set a correct TSB format based on host endian */
1575         if (!IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
1576                 reg |= tdma_control_bit(priv, TSB_SWAP0);
1577         else
1578                 reg &= ~tdma_control_bit(priv, TSB_SWAP0);
1579         tdma_writel(priv, reg, TDMA_CONTROL);
1580
1581         /* Program the number of descriptors as MAX_THRESHOLD and half of
1582          * its size for the hysteresis trigger
1583          */
1584         tdma_writel(priv, ring->size |
1585                         1 << RING_HYST_THRESH_SHIFT,
1586                         TDMA_DESC_RING_MAX_HYST(index));
1587
1588         /* Enable the ring queue in the arbiter */
1589         reg = tdma_readl(priv, TDMA_TIER1_ARB_0_QUEUE_EN);
1590         reg |= (1 << index);
1591         tdma_writel(priv, reg, TDMA_TIER1_ARB_0_QUEUE_EN);
1592
1593         napi_enable(&ring->napi);
1594
1595         netif_dbg(priv, hw, priv->netdev,
1596                   "TDMA cfg, size=%d, switch q=%d,port=%d\n",
1597                   ring->size, ring->switch_queue,
1598                   ring->switch_port);
1599
1600         return 0;
1601 }
1602
1603 static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
1604                                      unsigned int index)
1605 {
1606         struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
1607         u32 reg;
1608
1609         /* Caller should stop the TDMA engine */
1610         reg = tdma_readl(priv, TDMA_STATUS);
1611         if (!(reg & TDMA_DISABLED))
1612                 netdev_warn(priv->netdev, "TDMA not stopped!\n");
1613
1614         /* ring->cbs is the last part in bcm_sysport_init_tx_ring which could
1615          * fail, so by checking this pointer we know whether the TX ring was
1616          * fully initialized or not.
1617          */
1618         if (!ring->cbs)
1619                 return;
1620
1621         napi_disable(&ring->napi);
1622         netif_napi_del(&ring->napi);
1623
1624         bcm_sysport_tx_clean(priv, ring);
1625
1626         kfree(ring->cbs);
1627         ring->cbs = NULL;
1628         ring->size = 0;
1629         ring->alloc_size = 0;
1630
1631         netif_dbg(priv, hw, priv->netdev, "TDMA fini done\n");
1632 }
1633
1634 /* RDMA helper */
1635 static inline int rdma_enable_set(struct bcm_sysport_priv *priv,
1636                                   unsigned int enable)
1637 {
1638         unsigned int timeout = 1000;
1639         u32 reg;
1640
1641         reg = rdma_readl(priv, RDMA_CONTROL);
1642         if (enable)
1643                 reg |= RDMA_EN;
1644         else
1645                 reg &= ~RDMA_EN;
1646         rdma_writel(priv, reg, RDMA_CONTROL);
1647
1648         /* Poll for RMDA disabling completion */
1649         do {
1650                 reg = rdma_readl(priv, RDMA_STATUS);
1651                 if (!!(reg & RDMA_DISABLED) == !enable)
1652                         return 0;
1653                 usleep_range(1000, 2000);
1654         } while (timeout-- > 0);
1655
1656         netdev_err(priv->netdev, "timeout waiting for RDMA to finish\n");
1657
1658         return -ETIMEDOUT;
1659 }
1660
1661 /* TDMA helper */
1662 static inline int tdma_enable_set(struct bcm_sysport_priv *priv,
1663                                   unsigned int enable)
1664 {
1665         unsigned int timeout = 1000;
1666         u32 reg;
1667
1668         reg = tdma_readl(priv, TDMA_CONTROL);
1669         if (enable)
1670                 reg |= tdma_control_bit(priv, TDMA_EN);
1671         else
1672                 reg &= ~tdma_control_bit(priv, TDMA_EN);
1673         tdma_writel(priv, reg, TDMA_CONTROL);
1674
1675         /* Poll for TMDA disabling completion */
1676         do {
1677                 reg = tdma_readl(priv, TDMA_STATUS);
1678                 if (!!(reg & TDMA_DISABLED) == !enable)
1679                         return 0;
1680
1681                 usleep_range(1000, 2000);
1682         } while (timeout-- > 0);
1683
1684         netdev_err(priv->netdev, "timeout waiting for TDMA to finish\n");
1685
1686         return -ETIMEDOUT;
1687 }
1688
1689 static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv)
1690 {
1691         struct bcm_sysport_cb *cb;
1692         u32 reg;
1693         int ret;
1694         int i;
1695
1696         /* Initialize SW view of the RX ring */
1697         priv->num_rx_bds = priv->num_rx_desc_words / WORDS_PER_DESC;
1698         priv->rx_bds = priv->base + SYS_PORT_RDMA_OFFSET;
1699         priv->rx_c_index = 0;
1700         priv->rx_read_ptr = 0;
1701         priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct bcm_sysport_cb),
1702                                 GFP_KERNEL);
1703         if (!priv->rx_cbs) {
1704                 netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
1705                 return -ENOMEM;
1706         }
1707
1708         for (i = 0; i < priv->num_rx_bds; i++) {
1709                 cb = priv->rx_cbs + i;
1710                 cb->bd_addr = priv->rx_bds + i * DESC_SIZE;
1711         }
1712
1713         ret = bcm_sysport_alloc_rx_bufs(priv);
1714         if (ret) {
1715                 netif_err(priv, hw, priv->netdev, "SKB allocation failed\n");
1716                 return ret;
1717         }
1718
1719         /* Initialize HW, ensure RDMA is disabled */
1720         reg = rdma_readl(priv, RDMA_STATUS);
1721         if (!(reg & RDMA_DISABLED))
1722                 rdma_enable_set(priv, 0);
1723
1724         rdma_writel(priv, 0, RDMA_WRITE_PTR_LO);
1725         rdma_writel(priv, 0, RDMA_WRITE_PTR_HI);
1726         rdma_writel(priv, 0, RDMA_PROD_INDEX);
1727         rdma_writel(priv, 0, RDMA_CONS_INDEX);
1728         rdma_writel(priv, priv->num_rx_bds << RDMA_RING_SIZE_SHIFT |
1729                           RX_BUF_LENGTH, RDMA_RING_BUF_SIZE);
1730         /* Operate the queue in ring mode */
1731         rdma_writel(priv, 0, RDMA_START_ADDR_HI);
1732         rdma_writel(priv, 0, RDMA_START_ADDR_LO);
1733         rdma_writel(priv, 0, RDMA_END_ADDR_HI);
1734         rdma_writel(priv, priv->num_rx_desc_words - 1, RDMA_END_ADDR_LO);
1735
1736         netif_dbg(priv, hw, priv->netdev,
1737                   "RDMA cfg, num_rx_bds=%d, rx_bds=%p\n",
1738                   priv->num_rx_bds, priv->rx_bds);
1739
1740         return 0;
1741 }
1742
1743 static void bcm_sysport_fini_rx_ring(struct bcm_sysport_priv *priv)
1744 {
1745         struct bcm_sysport_cb *cb;
1746         unsigned int i;
1747         u32 reg;
1748
1749         /* Caller should ensure RDMA is disabled */
1750         reg = rdma_readl(priv, RDMA_STATUS);
1751         if (!(reg & RDMA_DISABLED))
1752                 netdev_warn(priv->netdev, "RDMA not stopped!\n");
1753
1754         for (i = 0; i < priv->num_rx_bds; i++) {
1755                 cb = &priv->rx_cbs[i];
1756                 if (dma_unmap_addr(cb, dma_addr))
1757                         dma_unmap_single(&priv->pdev->dev,
1758                                          dma_unmap_addr(cb, dma_addr),
1759                                          RX_BUF_LENGTH, DMA_FROM_DEVICE);
1760                 bcm_sysport_free_cb(cb);
1761         }
1762
1763         kfree(priv->rx_cbs);
1764         priv->rx_cbs = NULL;
1765
1766         netif_dbg(priv, hw, priv->netdev, "RDMA fini done\n");
1767 }
1768
1769 static void bcm_sysport_set_rx_mode(struct net_device *dev)
1770 {
1771         struct bcm_sysport_priv *priv = netdev_priv(dev);
1772         u32 reg;
1773
1774         if (priv->is_lite)
1775                 return;
1776
1777         reg = umac_readl(priv, UMAC_CMD);
1778         if (dev->flags & IFF_PROMISC)
1779                 reg |= CMD_PROMISC;
1780         else
1781                 reg &= ~CMD_PROMISC;
1782         umac_writel(priv, reg, UMAC_CMD);
1783
1784         /* No support for ALLMULTI */
1785         if (dev->flags & IFF_ALLMULTI)
1786                 return;
1787 }
1788
1789 static inline void umac_enable_set(struct bcm_sysport_priv *priv,
1790                                    u32 mask, unsigned int enable)
1791 {
1792         u32 reg;
1793
1794         if (!priv->is_lite) {
1795                 reg = umac_readl(priv, UMAC_CMD);
1796                 if (enable)
1797                         reg |= mask;
1798                 else
1799                         reg &= ~mask;
1800                 umac_writel(priv, reg, UMAC_CMD);
1801         } else {
1802                 reg = gib_readl(priv, GIB_CONTROL);
1803                 if (enable)
1804                         reg |= mask;
1805                 else
1806                         reg &= ~mask;
1807                 gib_writel(priv, reg, GIB_CONTROL);
1808         }
1809
1810         /* UniMAC stops on a packet boundary, wait for a full-sized packet
1811          * to be processed (1 msec).
1812          */
1813         if (enable == 0)
1814                 usleep_range(1000, 2000);
1815 }
1816
1817 static inline void umac_reset(struct bcm_sysport_priv *priv)
1818 {
1819         u32 reg;
1820
1821         if (priv->is_lite)
1822                 return;
1823
1824         reg = umac_readl(priv, UMAC_CMD);
1825         reg |= CMD_SW_RESET;
1826         umac_writel(priv, reg, UMAC_CMD);
1827         udelay(10);
1828         reg = umac_readl(priv, UMAC_CMD);
1829         reg &= ~CMD_SW_RESET;
1830         umac_writel(priv, reg, UMAC_CMD);
1831 }
1832
1833 static void umac_set_hw_addr(struct bcm_sysport_priv *priv,
1834                              const unsigned char *addr)
1835 {
1836         u32 mac0 = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) |
1837                     addr[3];
1838         u32 mac1 = (addr[4] << 8) | addr[5];
1839
1840         if (!priv->is_lite) {
1841                 umac_writel(priv, mac0, UMAC_MAC0);
1842                 umac_writel(priv, mac1, UMAC_MAC1);
1843         } else {
1844                 gib_writel(priv, mac0, GIB_MAC0);
1845                 gib_writel(priv, mac1, GIB_MAC1);
1846         }
1847 }
1848
1849 static void topctrl_flush(struct bcm_sysport_priv *priv)
1850 {
1851         topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL);
1852         topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL);
1853         mdelay(1);
1854         topctrl_writel(priv, 0, RX_FLUSH_CNTL);
1855         topctrl_writel(priv, 0, TX_FLUSH_CNTL);
1856 }
1857
1858 static int bcm_sysport_change_mac(struct net_device *dev, void *p)
1859 {
1860         struct bcm_sysport_priv *priv = netdev_priv(dev);
1861         struct sockaddr *addr = p;
1862
1863         if (!is_valid_ether_addr(addr->sa_data))
1864                 return -EINVAL;
1865
1866         eth_hw_addr_set(dev, addr->sa_data);
1867
1868         /* interface is disabled, changes to MAC will be reflected on next
1869          * open call
1870          */
1871         if (!netif_running(dev))
1872                 return 0;
1873
1874         umac_set_hw_addr(priv, dev->dev_addr);
1875
1876         return 0;
1877 }
1878
1879 static void bcm_sysport_get_stats64(struct net_device *dev,
1880                                     struct rtnl_link_stats64 *stats)
1881 {
1882         struct bcm_sysport_priv *priv = netdev_priv(dev);
1883         struct bcm_sysport_stats64 *stats64 = &priv->stats64;
1884         unsigned int start;
1885
1886         netdev_stats_to_stats64(stats, &dev->stats);
1887
1888         bcm_sysport_update_tx_stats(priv, &stats->tx_bytes,
1889                                     &stats->tx_packets);
1890
1891         do {
1892                 start = u64_stats_fetch_begin(&priv->syncp);
1893                 stats->rx_packets = stats64->rx_packets;
1894                 stats->rx_bytes = stats64->rx_bytes;
1895         } while (u64_stats_fetch_retry(&priv->syncp, start));
1896 }
1897
1898 static void bcm_sysport_netif_start(struct net_device *dev)
1899 {
1900         struct bcm_sysport_priv *priv = netdev_priv(dev);
1901
1902         /* Enable NAPI */
1903         bcm_sysport_init_dim(priv, bcm_sysport_dim_work);
1904         bcm_sysport_init_rx_coalesce(priv);
1905         napi_enable(&priv->napi);
1906
1907         /* Enable RX interrupt and TX ring full interrupt */
1908         intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
1909
1910         phy_start(dev->phydev);
1911
1912         /* Enable TX interrupts for the TXQs */
1913         if (!priv->is_lite)
1914                 intrl2_1_mask_clear(priv, 0xffffffff);
1915         else
1916                 intrl2_0_mask_clear(priv, INTRL2_0_TDMA_MBDONE_MASK);
1917 }
1918
1919 static void rbuf_init(struct bcm_sysport_priv *priv)
1920 {
1921         u32 reg;
1922
1923         reg = rbuf_readl(priv, RBUF_CONTROL);
1924         reg |= RBUF_4B_ALGN | RBUF_RSB_EN;
1925         /* Set a correct RSB format on SYSTEMPORT Lite */
1926         if (priv->is_lite)
1927                 reg &= ~RBUF_RSB_SWAP1;
1928
1929         /* Set a correct RSB format based on host endian */
1930         if (!IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
1931                 reg |= RBUF_RSB_SWAP0;
1932         else
1933                 reg &= ~RBUF_RSB_SWAP0;
1934         rbuf_writel(priv, reg, RBUF_CONTROL);
1935 }
1936
1937 static inline void bcm_sysport_mask_all_intrs(struct bcm_sysport_priv *priv)
1938 {
1939         intrl2_0_mask_set(priv, 0xffffffff);
1940         intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
1941         if (!priv->is_lite) {
1942                 intrl2_1_mask_set(priv, 0xffffffff);
1943                 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
1944         }
1945 }
1946
1947 static inline void gib_set_pad_extension(struct bcm_sysport_priv *priv)
1948 {
1949         u32 reg;
1950
1951         reg = gib_readl(priv, GIB_CONTROL);
1952         /* Include Broadcom tag in pad extension and fix up IPG_LENGTH */
1953         if (netdev_uses_dsa(priv->netdev)) {
1954                 reg &= ~(GIB_PAD_EXTENSION_MASK << GIB_PAD_EXTENSION_SHIFT);
1955                 reg |= ENET_BRCM_TAG_LEN << GIB_PAD_EXTENSION_SHIFT;
1956         }
1957         reg &= ~(GIB_IPG_LEN_MASK << GIB_IPG_LEN_SHIFT);
1958         reg |= 12 << GIB_IPG_LEN_SHIFT;
1959         gib_writel(priv, reg, GIB_CONTROL);
1960 }
1961
1962 static int bcm_sysport_open(struct net_device *dev)
1963 {
1964         struct bcm_sysport_priv *priv = netdev_priv(dev);
1965         struct phy_device *phydev;
1966         unsigned int i;
1967         int ret;
1968
1969         clk_prepare_enable(priv->clk);
1970
1971         /* Reset UniMAC */
1972         umac_reset(priv);
1973
1974         /* Flush TX and RX FIFOs at TOPCTRL level */
1975         topctrl_flush(priv);
1976
1977         /* Disable the UniMAC RX/TX */
1978         umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 0);
1979
1980         /* Enable RBUF 2bytes alignment and Receive Status Block */
1981         rbuf_init(priv);
1982
1983         /* Set maximum frame length */
1984         if (!priv->is_lite)
1985                 umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
1986         else
1987                 gib_set_pad_extension(priv);
1988
1989         /* Apply features again in case we changed them while interface was
1990          * down
1991          */
1992         bcm_sysport_set_features(dev, dev->features);
1993
1994         /* Set MAC address */
1995         umac_set_hw_addr(priv, dev->dev_addr);
1996
1997         phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link,
1998                                 0, priv->phy_interface);
1999         if (!phydev) {
2000                 netdev_err(dev, "could not attach to PHY\n");
2001                 ret = -ENODEV;
2002                 goto out_clk_disable;
2003         }
2004
2005         /* Indicate that the MAC is responsible for PHY PM */
2006         phydev->mac_managed_pm = true;
2007
2008         /* Reset house keeping link status */
2009         priv->old_duplex = -1;
2010         priv->old_link = -1;
2011         priv->old_pause = -1;
2012
2013         /* mask all interrupts and request them */
2014         bcm_sysport_mask_all_intrs(priv);
2015
2016         ret = request_irq(priv->irq0, bcm_sysport_rx_isr, 0, dev->name, dev);
2017         if (ret) {
2018                 netdev_err(dev, "failed to request RX interrupt\n");
2019                 goto out_phy_disconnect;
2020         }
2021
2022         if (!priv->is_lite) {
2023                 ret = request_irq(priv->irq1, bcm_sysport_tx_isr, 0,
2024                                   dev->name, dev);
2025                 if (ret) {
2026                         netdev_err(dev, "failed to request TX interrupt\n");
2027                         goto out_free_irq0;
2028                 }
2029         }
2030
2031         /* Initialize both hardware and software ring */
2032         spin_lock_init(&priv->desc_lock);
2033         for (i = 0; i < dev->num_tx_queues; i++) {
2034                 ret = bcm_sysport_init_tx_ring(priv, i);
2035                 if (ret) {
2036                         netdev_err(dev, "failed to initialize TX ring %d\n",
2037                                    i);
2038                         goto out_free_tx_ring;
2039                 }
2040         }
2041
2042         /* Initialize linked-list */
2043         tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS);
2044
2045         /* Initialize RX ring */
2046         ret = bcm_sysport_init_rx_ring(priv);
2047         if (ret) {
2048                 netdev_err(dev, "failed to initialize RX ring\n");
2049                 goto out_free_rx_ring;
2050         }
2051
2052         /* Turn on RDMA */
2053         ret = rdma_enable_set(priv, 1);
2054         if (ret)
2055                 goto out_free_rx_ring;
2056
2057         /* Turn on TDMA */
2058         ret = tdma_enable_set(priv, 1);
2059         if (ret)
2060                 goto out_clear_rx_int;
2061
2062         /* Turn on UniMAC TX/RX */
2063         umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 1);
2064
2065         bcm_sysport_netif_start(dev);
2066
2067         netif_tx_start_all_queues(dev);
2068
2069         return 0;
2070
2071 out_clear_rx_int:
2072         intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
2073 out_free_rx_ring:
2074         bcm_sysport_fini_rx_ring(priv);
2075 out_free_tx_ring:
2076         for (i = 0; i < dev->num_tx_queues; i++)
2077                 bcm_sysport_fini_tx_ring(priv, i);
2078         if (!priv->is_lite)
2079                 free_irq(priv->irq1, dev);
2080 out_free_irq0:
2081         free_irq(priv->irq0, dev);
2082 out_phy_disconnect:
2083         phy_disconnect(phydev);
2084 out_clk_disable:
2085         clk_disable_unprepare(priv->clk);
2086         return ret;
2087 }
2088
2089 static void bcm_sysport_netif_stop(struct net_device *dev)
2090 {
2091         struct bcm_sysport_priv *priv = netdev_priv(dev);
2092
2093         /* stop all software from updating hardware */
2094         netif_tx_disable(dev);
2095         napi_disable(&priv->napi);
2096         cancel_work_sync(&priv->dim.dim.work);
2097         phy_stop(dev->phydev);
2098
2099         /* mask all interrupts */
2100         bcm_sysport_mask_all_intrs(priv);
2101 }
2102
2103 static int bcm_sysport_stop(struct net_device *dev)
2104 {
2105         struct bcm_sysport_priv *priv = netdev_priv(dev);
2106         unsigned int i;
2107         int ret;
2108
2109         bcm_sysport_netif_stop(dev);
2110
2111         /* Disable UniMAC RX */
2112         umac_enable_set(priv, CMD_RX_EN, 0);
2113
2114         ret = tdma_enable_set(priv, 0);
2115         if (ret) {
2116                 netdev_err(dev, "timeout disabling RDMA\n");
2117                 return ret;
2118         }
2119
2120         /* Wait for a maximum packet size to be drained */
2121         usleep_range(2000, 3000);
2122
2123         ret = rdma_enable_set(priv, 0);
2124         if (ret) {
2125                 netdev_err(dev, "timeout disabling TDMA\n");
2126                 return ret;
2127         }
2128
2129         /* Disable UniMAC TX */
2130         umac_enable_set(priv, CMD_TX_EN, 0);
2131
2132         /* Free RX/TX rings SW structures */
2133         for (i = 0; i < dev->num_tx_queues; i++)
2134                 bcm_sysport_fini_tx_ring(priv, i);
2135         bcm_sysport_fini_rx_ring(priv);
2136
2137         free_irq(priv->irq0, dev);
2138         if (!priv->is_lite)
2139                 free_irq(priv->irq1, dev);
2140
2141         /* Disconnect from PHY */
2142         phy_disconnect(dev->phydev);
2143
2144         clk_disable_unprepare(priv->clk);
2145
2146         return 0;
2147 }
2148
2149 static int bcm_sysport_rule_find(struct bcm_sysport_priv *priv,
2150                                  u64 location)
2151 {
2152         unsigned int index;
2153         u32 reg;
2154
2155         for_each_set_bit(index, priv->filters, RXCHK_BRCM_TAG_MAX) {
2156                 reg = rxchk_readl(priv, RXCHK_BRCM_TAG(index));
2157                 reg >>= RXCHK_BRCM_TAG_CID_SHIFT;
2158                 reg &= RXCHK_BRCM_TAG_CID_MASK;
2159                 if (reg == location)
2160                         return index;
2161         }
2162
2163         return -EINVAL;
2164 }
2165
2166 static int bcm_sysport_rule_get(struct bcm_sysport_priv *priv,
2167                                 struct ethtool_rxnfc *nfc)
2168 {
2169         int index;
2170
2171         /* This is not a rule that we know about */
2172         index = bcm_sysport_rule_find(priv, nfc->fs.location);
2173         if (index < 0)
2174                 return -EOPNOTSUPP;
2175
2176         nfc->fs.ring_cookie = RX_CLS_FLOW_WAKE;
2177
2178         return 0;
2179 }
2180
2181 static int bcm_sysport_rule_set(struct bcm_sysport_priv *priv,
2182                                 struct ethtool_rxnfc *nfc)
2183 {
2184         unsigned int index;
2185         u32 reg;
2186
2187         /* We cannot match locations greater than what the classification ID
2188          * permits (256 entries)
2189          */
2190         if (nfc->fs.location > RXCHK_BRCM_TAG_CID_MASK)
2191                 return -E2BIG;
2192
2193         /* We cannot support flows that are not destined for a wake-up */
2194         if (nfc->fs.ring_cookie != RX_CLS_FLOW_WAKE)
2195                 return -EOPNOTSUPP;
2196
2197         index = find_first_zero_bit(priv->filters, RXCHK_BRCM_TAG_MAX);
2198         if (index >= RXCHK_BRCM_TAG_MAX)
2199                 /* All filters are already in use, we cannot match more rules */
2200                 return -ENOSPC;
2201
2202         /* Location is the classification ID, and index is the position
2203          * within one of our 8 possible filters to be programmed
2204          */
2205         reg = rxchk_readl(priv, RXCHK_BRCM_TAG(index));
2206         reg &= ~(RXCHK_BRCM_TAG_CID_MASK << RXCHK_BRCM_TAG_CID_SHIFT);
2207         reg |= nfc->fs.location << RXCHK_BRCM_TAG_CID_SHIFT;
2208         rxchk_writel(priv, reg, RXCHK_BRCM_TAG(index));
2209         rxchk_writel(priv, 0xff00ffff, RXCHK_BRCM_TAG_MASK(index));
2210
2211         priv->filters_loc[index] = nfc->fs.location;
2212         set_bit(index, priv->filters);
2213
2214         return 0;
2215 }
2216
2217 static int bcm_sysport_rule_del(struct bcm_sysport_priv *priv,
2218                                 u64 location)
2219 {
2220         int index;
2221
2222         /* This is not a rule that we know about */
2223         index = bcm_sysport_rule_find(priv, location);
2224         if (index < 0)
2225                 return -EOPNOTSUPP;
2226
2227         /* No need to disable this filter if it was enabled, this will
2228          * be taken care of during suspend time by bcm_sysport_suspend_to_wol
2229          */
2230         clear_bit(index, priv->filters);
2231         priv->filters_loc[index] = 0;
2232
2233         return 0;
2234 }
2235
2236 static int bcm_sysport_get_rxnfc(struct net_device *dev,
2237                                  struct ethtool_rxnfc *nfc, u32 *rule_locs)
2238 {
2239         struct bcm_sysport_priv *priv = netdev_priv(dev);
2240         int ret = -EOPNOTSUPP;
2241
2242         switch (nfc->cmd) {
2243         case ETHTOOL_GRXCLSRULE:
2244                 ret = bcm_sysport_rule_get(priv, nfc);
2245                 break;
2246         default:
2247                 break;
2248         }
2249
2250         return ret;
2251 }
2252
2253 static int bcm_sysport_set_rxnfc(struct net_device *dev,
2254                                  struct ethtool_rxnfc *nfc)
2255 {
2256         struct bcm_sysport_priv *priv = netdev_priv(dev);
2257         int ret = -EOPNOTSUPP;
2258
2259         switch (nfc->cmd) {
2260         case ETHTOOL_SRXCLSRLINS:
2261                 ret = bcm_sysport_rule_set(priv, nfc);
2262                 break;
2263         case ETHTOOL_SRXCLSRLDEL:
2264                 ret = bcm_sysport_rule_del(priv, nfc->fs.location);
2265                 break;
2266         default:
2267                 break;
2268         }
2269
2270         return ret;
2271 }
2272
2273 static const struct ethtool_ops bcm_sysport_ethtool_ops = {
2274         .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
2275                                      ETHTOOL_COALESCE_MAX_FRAMES |
2276                                      ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
2277         .get_drvinfo            = bcm_sysport_get_drvinfo,
2278         .get_msglevel           = bcm_sysport_get_msglvl,
2279         .set_msglevel           = bcm_sysport_set_msglvl,
2280         .get_link               = ethtool_op_get_link,
2281         .get_strings            = bcm_sysport_get_strings,
2282         .get_ethtool_stats      = bcm_sysport_get_stats,
2283         .get_sset_count         = bcm_sysport_get_sset_count,
2284         .get_wol                = bcm_sysport_get_wol,
2285         .set_wol                = bcm_sysport_set_wol,
2286         .get_coalesce           = bcm_sysport_get_coalesce,
2287         .set_coalesce           = bcm_sysport_set_coalesce,
2288         .get_link_ksettings     = phy_ethtool_get_link_ksettings,
2289         .set_link_ksettings     = phy_ethtool_set_link_ksettings,
2290         .get_rxnfc              = bcm_sysport_get_rxnfc,
2291         .set_rxnfc              = bcm_sysport_set_rxnfc,
2292 };
2293
2294 static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb,
2295                                     struct net_device *sb_dev)
2296 {
2297         struct bcm_sysport_priv *priv = netdev_priv(dev);
2298         u16 queue = skb_get_queue_mapping(skb);
2299         struct bcm_sysport_tx_ring *tx_ring;
2300         unsigned int q, port;
2301
2302         if (!netdev_uses_dsa(dev))
2303                 return netdev_pick_tx(dev, skb, NULL);
2304
2305         /* DSA tagging layer will have configured the correct queue */
2306         q = BRCM_TAG_GET_QUEUE(queue);
2307         port = BRCM_TAG_GET_PORT(queue);
2308         tx_ring = priv->ring_map[q + port * priv->per_port_num_tx_queues];
2309
2310         if (unlikely(!tx_ring))
2311                 return netdev_pick_tx(dev, skb, NULL);
2312
2313         return tx_ring->index;
2314 }
2315
2316 static const struct net_device_ops bcm_sysport_netdev_ops = {
2317         .ndo_start_xmit         = bcm_sysport_xmit,
2318         .ndo_tx_timeout         = bcm_sysport_tx_timeout,
2319         .ndo_open               = bcm_sysport_open,
2320         .ndo_stop               = bcm_sysport_stop,
2321         .ndo_set_features       = bcm_sysport_set_features,
2322         .ndo_set_rx_mode        = bcm_sysport_set_rx_mode,
2323         .ndo_set_mac_address    = bcm_sysport_change_mac,
2324 #ifdef CONFIG_NET_POLL_CONTROLLER
2325         .ndo_poll_controller    = bcm_sysport_poll_controller,
2326 #endif
2327         .ndo_get_stats64        = bcm_sysport_get_stats64,
2328         .ndo_select_queue       = bcm_sysport_select_queue,
2329 };
2330
2331 static int bcm_sysport_map_queues(struct net_device *dev,
2332                                   struct net_device *slave_dev)
2333 {
2334         struct dsa_port *dp = dsa_port_from_netdev(slave_dev);
2335         struct bcm_sysport_priv *priv = netdev_priv(dev);
2336         struct bcm_sysport_tx_ring *ring;
2337         unsigned int num_tx_queues;
2338         unsigned int q, qp, port;
2339
2340         /* We can't be setting up queue inspection for non directly attached
2341          * switches
2342          */
2343         if (dp->ds->index)
2344                 return 0;
2345
2346         port = dp->index;
2347
2348         /* On SYSTEMPORT Lite we have twice as less queues, so we cannot do a
2349          * 1:1 mapping, we can only do a 2:1 mapping. By reducing the number of
2350          * per-port (slave_dev) network devices queue, we achieve just that.
2351          * This need to happen now before any slave network device is used such
2352          * it accurately reflects the number of real TX queues.
2353          */
2354         if (priv->is_lite)
2355                 netif_set_real_num_tx_queues(slave_dev,
2356                                              slave_dev->num_tx_queues / 2);
2357
2358         num_tx_queues = slave_dev->real_num_tx_queues;
2359
2360         if (priv->per_port_num_tx_queues &&
2361             priv->per_port_num_tx_queues != num_tx_queues)
2362                 netdev_warn(slave_dev, "asymmetric number of per-port queues\n");
2363
2364         priv->per_port_num_tx_queues = num_tx_queues;
2365
2366         for (q = 0, qp = 0; q < dev->num_tx_queues && qp < num_tx_queues;
2367              q++) {
2368                 ring = &priv->tx_rings[q];
2369
2370                 if (ring->inspect)
2371                         continue;
2372
2373                 /* Just remember the mapping actual programming done
2374                  * during bcm_sysport_init_tx_ring
2375                  */
2376                 ring->switch_queue = qp;
2377                 ring->switch_port = port;
2378                 ring->inspect = true;
2379                 priv->ring_map[qp + port * num_tx_queues] = ring;
2380                 qp++;
2381         }
2382
2383         return 0;
2384 }
2385
2386 static int bcm_sysport_unmap_queues(struct net_device *dev,
2387                                     struct net_device *slave_dev)
2388 {
2389         struct dsa_port *dp = dsa_port_from_netdev(slave_dev);
2390         struct bcm_sysport_priv *priv = netdev_priv(dev);
2391         struct bcm_sysport_tx_ring *ring;
2392         unsigned int num_tx_queues;
2393         unsigned int q, qp, port;
2394
2395         port = dp->index;
2396
2397         num_tx_queues = slave_dev->real_num_tx_queues;
2398
2399         for (q = 0; q < dev->num_tx_queues; q++) {
2400                 ring = &priv->tx_rings[q];
2401
2402                 if (ring->switch_port != port)
2403                         continue;
2404
2405                 if (!ring->inspect)
2406                         continue;
2407
2408                 ring->inspect = false;
2409                 qp = ring->switch_queue;
2410                 priv->ring_map[qp + port * num_tx_queues] = NULL;
2411         }
2412
2413         return 0;
2414 }
2415
2416 static int bcm_sysport_netdevice_event(struct notifier_block *nb,
2417                                        unsigned long event, void *ptr)
2418 {
2419         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2420         struct netdev_notifier_changeupper_info *info = ptr;
2421         struct bcm_sysport_priv *priv;
2422         int ret = 0;
2423
2424         priv = container_of(nb, struct bcm_sysport_priv, netdev_notifier);
2425         if (priv->netdev != dev)
2426                 return NOTIFY_DONE;
2427
2428         switch (event) {
2429         case NETDEV_CHANGEUPPER:
2430                 if (dev->netdev_ops != &bcm_sysport_netdev_ops)
2431                         return NOTIFY_DONE;
2432
2433                 if (!dsa_user_dev_check(info->upper_dev))
2434                         return NOTIFY_DONE;
2435
2436                 if (info->linking)
2437                         ret = bcm_sysport_map_queues(dev, info->upper_dev);
2438                 else
2439                         ret = bcm_sysport_unmap_queues(dev, info->upper_dev);
2440                 break;
2441         }
2442
2443         return notifier_from_errno(ret);
2444 }
2445
2446 #define REV_FMT "v%2x.%02x"
2447
2448 static const struct bcm_sysport_hw_params bcm_sysport_params[] = {
2449         [SYSTEMPORT] = {
2450                 .is_lite = false,
2451                 .num_rx_desc_words = SP_NUM_HW_RX_DESC_WORDS,
2452         },
2453         [SYSTEMPORT_LITE] = {
2454                 .is_lite = true,
2455                 .num_rx_desc_words = SP_LT_NUM_HW_RX_DESC_WORDS,
2456         },
2457 };
2458
2459 static const struct of_device_id bcm_sysport_of_match[] = {
2460         { .compatible = "brcm,systemportlite-v1.00",
2461           .data = &bcm_sysport_params[SYSTEMPORT_LITE] },
2462         { .compatible = "brcm,systemport-v1.00",
2463           .data = &bcm_sysport_params[SYSTEMPORT] },
2464         { .compatible = "brcm,systemport",
2465           .data = &bcm_sysport_params[SYSTEMPORT] },
2466         { /* sentinel */ }
2467 };
2468 MODULE_DEVICE_TABLE(of, bcm_sysport_of_match);
2469
2470 static int bcm_sysport_probe(struct platform_device *pdev)
2471 {
2472         const struct bcm_sysport_hw_params *params;
2473         const struct of_device_id *of_id = NULL;
2474         struct bcm_sysport_priv *priv;
2475         struct device_node *dn;
2476         struct net_device *dev;
2477         u32 txq, rxq;
2478         int ret;
2479
2480         dn = pdev->dev.of_node;
2481         of_id = of_match_node(bcm_sysport_of_match, dn);
2482         if (!of_id || !of_id->data)
2483                 return -EINVAL;
2484
2485         ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
2486         if (ret)
2487                 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
2488         if (ret) {
2489                 dev_err(&pdev->dev, "unable to set DMA mask: %d\n", ret);
2490                 return ret;
2491         }
2492
2493         /* Fairly quickly we need to know the type of adapter we have */
2494         params = of_id->data;
2495
2496         /* Read the Transmit/Receive Queue properties */
2497         if (of_property_read_u32(dn, "systemport,num-txq", &txq))
2498                 txq = TDMA_NUM_RINGS;
2499         if (of_property_read_u32(dn, "systemport,num-rxq", &rxq))
2500                 rxq = 1;
2501
2502         /* Sanity check the number of transmit queues */
2503         if (!txq || txq > TDMA_NUM_RINGS)
2504                 return -EINVAL;
2505
2506         dev = alloc_etherdev_mqs(sizeof(*priv), txq, rxq);
2507         if (!dev)
2508                 return -ENOMEM;
2509
2510         /* Initialize private members */
2511         priv = netdev_priv(dev);
2512
2513         priv->clk = devm_clk_get_optional(&pdev->dev, "sw_sysport");
2514         if (IS_ERR(priv->clk)) {
2515                 ret = PTR_ERR(priv->clk);
2516                 goto err_free_netdev;
2517         }
2518
2519         /* Allocate number of TX rings */
2520         priv->tx_rings = devm_kcalloc(&pdev->dev, txq,
2521                                       sizeof(struct bcm_sysport_tx_ring),
2522                                       GFP_KERNEL);
2523         if (!priv->tx_rings) {
2524                 ret = -ENOMEM;
2525                 goto err_free_netdev;
2526         }
2527
2528         priv->is_lite = params->is_lite;
2529         priv->num_rx_desc_words = params->num_rx_desc_words;
2530
2531         priv->irq0 = platform_get_irq(pdev, 0);
2532         if (!priv->is_lite) {
2533                 priv->irq1 = platform_get_irq(pdev, 1);
2534                 priv->wol_irq = platform_get_irq_optional(pdev, 2);
2535         } else {
2536                 priv->wol_irq = platform_get_irq_optional(pdev, 1);
2537         }
2538         if (priv->irq0 <= 0 || (priv->irq1 <= 0 && !priv->is_lite)) {
2539                 ret = -EINVAL;
2540                 goto err_free_netdev;
2541         }
2542
2543         priv->base = devm_platform_ioremap_resource(pdev, 0);
2544         if (IS_ERR(priv->base)) {
2545                 ret = PTR_ERR(priv->base);
2546                 goto err_free_netdev;
2547         }
2548
2549         priv->netdev = dev;
2550         priv->pdev = pdev;
2551
2552         ret = of_get_phy_mode(dn, &priv->phy_interface);
2553         /* Default to GMII interface mode */
2554         if (ret)
2555                 priv->phy_interface = PHY_INTERFACE_MODE_GMII;
2556
2557         /* In the case of a fixed PHY, the DT node associated
2558          * to the PHY is the Ethernet MAC DT node.
2559          */
2560         if (of_phy_is_fixed_link(dn)) {
2561                 ret = of_phy_register_fixed_link(dn);
2562                 if (ret) {
2563                         dev_err(&pdev->dev, "failed to register fixed PHY\n");
2564                         goto err_free_netdev;
2565                 }
2566
2567                 priv->phy_dn = dn;
2568         }
2569
2570         /* Initialize netdevice members */
2571         ret = of_get_ethdev_address(dn, dev);
2572         if (ret) {
2573                 dev_warn(&pdev->dev, "using random Ethernet MAC\n");
2574                 eth_hw_addr_random(dev);
2575         }
2576
2577         SET_NETDEV_DEV(dev, &pdev->dev);
2578         dev_set_drvdata(&pdev->dev, dev);
2579         dev->ethtool_ops = &bcm_sysport_ethtool_ops;
2580         dev->netdev_ops = &bcm_sysport_netdev_ops;
2581         netif_napi_add(dev, &priv->napi, bcm_sysport_poll);
2582
2583         dev->features |= NETIF_F_RXCSUM | NETIF_F_HIGHDMA |
2584                          NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2585                          NETIF_F_HW_VLAN_CTAG_TX;
2586         dev->hw_features |= dev->features;
2587         dev->vlan_features |= dev->features;
2588         dev->max_mtu = UMAC_MAX_MTU_SIZE;
2589
2590         /* Request the WOL interrupt and advertise suspend if available */
2591         priv->wol_irq_disabled = 1;
2592         ret = devm_request_irq(&pdev->dev, priv->wol_irq,
2593                                bcm_sysport_wol_isr, 0, dev->name, priv);
2594         if (!ret)
2595                 device_set_wakeup_capable(&pdev->dev, 1);
2596
2597         priv->wol_clk = devm_clk_get_optional(&pdev->dev, "sw_sysportwol");
2598         if (IS_ERR(priv->wol_clk)) {
2599                 ret = PTR_ERR(priv->wol_clk);
2600                 goto err_deregister_fixed_link;
2601         }
2602
2603         /* Set the needed headroom once and for all */
2604         BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8);
2605         dev->needed_headroom += sizeof(struct bcm_tsb);
2606
2607         /* libphy will adjust the link state accordingly */
2608         netif_carrier_off(dev);
2609
2610         priv->rx_max_coalesced_frames = 1;
2611         u64_stats_init(&priv->syncp);
2612
2613         priv->netdev_notifier.notifier_call = bcm_sysport_netdevice_event;
2614
2615         ret = register_netdevice_notifier(&priv->netdev_notifier);
2616         if (ret) {
2617                 dev_err(&pdev->dev, "failed to register DSA notifier\n");
2618                 goto err_deregister_fixed_link;
2619         }
2620
2621         ret = register_netdev(dev);
2622         if (ret) {
2623                 dev_err(&pdev->dev, "failed to register net_device\n");
2624                 goto err_deregister_notifier;
2625         }
2626
2627         clk_prepare_enable(priv->clk);
2628
2629         priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK;
2630         dev_info(&pdev->dev,
2631                  "Broadcom SYSTEMPORT%s " REV_FMT
2632                  " (irqs: %d, %d, TXQs: %d, RXQs: %d)\n",
2633                  priv->is_lite ? " Lite" : "",
2634                  (priv->rev >> 8) & 0xff, priv->rev & 0xff,
2635                  priv->irq0, priv->irq1, txq, rxq);
2636
2637         clk_disable_unprepare(priv->clk);
2638
2639         return 0;
2640
2641 err_deregister_notifier:
2642         unregister_netdevice_notifier(&priv->netdev_notifier);
2643 err_deregister_fixed_link:
2644         if (of_phy_is_fixed_link(dn))
2645                 of_phy_deregister_fixed_link(dn);
2646 err_free_netdev:
2647         free_netdev(dev);
2648         return ret;
2649 }
2650
2651 static void bcm_sysport_remove(struct platform_device *pdev)
2652 {
2653         struct net_device *dev = dev_get_drvdata(&pdev->dev);
2654         struct bcm_sysport_priv *priv = netdev_priv(dev);
2655         struct device_node *dn = pdev->dev.of_node;
2656
2657         /* Not much to do, ndo_close has been called
2658          * and we use managed allocations
2659          */
2660         unregister_netdevice_notifier(&priv->netdev_notifier);
2661         unregister_netdev(dev);
2662         if (of_phy_is_fixed_link(dn))
2663                 of_phy_deregister_fixed_link(dn);
2664         free_netdev(dev);
2665         dev_set_drvdata(&pdev->dev, NULL);
2666 }
2667
2668 static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv)
2669 {
2670         struct net_device *ndev = priv->netdev;
2671         unsigned int timeout = 1000;
2672         unsigned int index, i = 0;
2673         u32 reg;
2674
2675         reg = umac_readl(priv, UMAC_MPD_CTRL);
2676         if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE))
2677                 reg |= MPD_EN;
2678         reg &= ~PSW_EN;
2679         if (priv->wolopts & WAKE_MAGICSECURE) {
2680                 /* Program the SecureOn password */
2681                 umac_writel(priv, get_unaligned_be16(&priv->sopass[0]),
2682                             UMAC_PSW_MS);
2683                 umac_writel(priv, get_unaligned_be32(&priv->sopass[2]),
2684                             UMAC_PSW_LS);
2685                 reg |= PSW_EN;
2686         }
2687         umac_writel(priv, reg, UMAC_MPD_CTRL);
2688
2689         if (priv->wolopts & WAKE_FILTER) {
2690                 /* Turn on ACPI matching to steal packets from RBUF */
2691                 reg = rbuf_readl(priv, RBUF_CONTROL);
2692                 if (priv->is_lite)
2693                         reg |= RBUF_ACPI_EN_LITE;
2694                 else
2695                         reg |= RBUF_ACPI_EN;
2696                 rbuf_writel(priv, reg, RBUF_CONTROL);
2697
2698                 /* Enable RXCHK, active filters and Broadcom tag matching */
2699                 reg = rxchk_readl(priv, RXCHK_CONTROL);
2700                 reg &= ~(RXCHK_BRCM_TAG_MATCH_MASK <<
2701                          RXCHK_BRCM_TAG_MATCH_SHIFT);
2702                 for_each_set_bit(index, priv->filters, RXCHK_BRCM_TAG_MAX) {
2703                         reg |= BIT(RXCHK_BRCM_TAG_MATCH_SHIFT + i);
2704                         i++;
2705                 }
2706                 reg |= RXCHK_EN | RXCHK_BRCM_TAG_EN;
2707                 rxchk_writel(priv, reg, RXCHK_CONTROL);
2708         }
2709
2710         /* Make sure RBUF entered WoL mode as result */
2711         do {
2712                 reg = rbuf_readl(priv, RBUF_STATUS);
2713                 if (reg & RBUF_WOL_MODE)
2714                         break;
2715
2716                 udelay(10);
2717         } while (timeout-- > 0);
2718
2719         /* Do not leave the UniMAC RBUF matching only MPD packets */
2720         if (!timeout) {
2721                 mpd_enable_set(priv, false);
2722                 netif_err(priv, wol, ndev, "failed to enter WOL mode\n");
2723                 return -ETIMEDOUT;
2724         }
2725
2726         /* UniMAC receive needs to be turned on */
2727         umac_enable_set(priv, CMD_RX_EN, 1);
2728
2729         netif_dbg(priv, wol, ndev, "entered WOL mode\n");
2730
2731         return 0;
2732 }
2733
2734 static int __maybe_unused bcm_sysport_suspend(struct device *d)
2735 {
2736         struct net_device *dev = dev_get_drvdata(d);
2737         struct bcm_sysport_priv *priv = netdev_priv(dev);
2738         unsigned int i;
2739         int ret = 0;
2740         u32 reg;
2741
2742         if (!netif_running(dev))
2743                 return 0;
2744
2745         netif_device_detach(dev);
2746
2747         bcm_sysport_netif_stop(dev);
2748
2749         phy_suspend(dev->phydev);
2750
2751         /* Disable UniMAC RX */
2752         umac_enable_set(priv, CMD_RX_EN, 0);
2753
2754         ret = rdma_enable_set(priv, 0);
2755         if (ret) {
2756                 netdev_err(dev, "RDMA timeout!\n");
2757                 return ret;
2758         }
2759
2760         /* Disable RXCHK if enabled */
2761         if (priv->rx_chk_en) {
2762                 reg = rxchk_readl(priv, RXCHK_CONTROL);
2763                 reg &= ~RXCHK_EN;
2764                 rxchk_writel(priv, reg, RXCHK_CONTROL);
2765         }
2766
2767         /* Flush RX pipe */
2768         if (!priv->wolopts)
2769                 topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL);
2770
2771         ret = tdma_enable_set(priv, 0);
2772         if (ret) {
2773                 netdev_err(dev, "TDMA timeout!\n");
2774                 return ret;
2775         }
2776
2777         /* Wait for a packet boundary */
2778         usleep_range(2000, 3000);
2779
2780         umac_enable_set(priv, CMD_TX_EN, 0);
2781
2782         topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL);
2783
2784         /* Free RX/TX rings SW structures */
2785         for (i = 0; i < dev->num_tx_queues; i++)
2786                 bcm_sysport_fini_tx_ring(priv, i);
2787         bcm_sysport_fini_rx_ring(priv);
2788
2789         /* Get prepared for Wake-on-LAN */
2790         if (device_may_wakeup(d) && priv->wolopts) {
2791                 clk_prepare_enable(priv->wol_clk);
2792                 ret = bcm_sysport_suspend_to_wol(priv);
2793         }
2794
2795         clk_disable_unprepare(priv->clk);
2796
2797         return ret;
2798 }
2799
2800 static int __maybe_unused bcm_sysport_resume(struct device *d)
2801 {
2802         struct net_device *dev = dev_get_drvdata(d);
2803         struct bcm_sysport_priv *priv = netdev_priv(dev);
2804         unsigned int i;
2805         int ret;
2806
2807         if (!netif_running(dev))
2808                 return 0;
2809
2810         clk_prepare_enable(priv->clk);
2811         if (priv->wolopts)
2812                 clk_disable_unprepare(priv->wol_clk);
2813
2814         umac_reset(priv);
2815
2816         /* Disable the UniMAC RX/TX */
2817         umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 0);
2818
2819         /* We may have been suspended and never received a WOL event that
2820          * would turn off MPD detection, take care of that now
2821          */
2822         bcm_sysport_resume_from_wol(priv);
2823
2824         /* Initialize both hardware and software ring */
2825         for (i = 0; i < dev->num_tx_queues; i++) {
2826                 ret = bcm_sysport_init_tx_ring(priv, i);
2827                 if (ret) {
2828                         netdev_err(dev, "failed to initialize TX ring %d\n",
2829                                    i);
2830                         goto out_free_tx_rings;
2831                 }
2832         }
2833
2834         /* Initialize linked-list */
2835         tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS);
2836
2837         /* Initialize RX ring */
2838         ret = bcm_sysport_init_rx_ring(priv);
2839         if (ret) {
2840                 netdev_err(dev, "failed to initialize RX ring\n");
2841                 goto out_free_rx_ring;
2842         }
2843
2844         /* RX pipe enable */
2845         topctrl_writel(priv, 0, RX_FLUSH_CNTL);
2846
2847         ret = rdma_enable_set(priv, 1);
2848         if (ret) {
2849                 netdev_err(dev, "failed to enable RDMA\n");
2850                 goto out_free_rx_ring;
2851         }
2852
2853         /* Restore enabled features */
2854         bcm_sysport_set_features(dev, dev->features);
2855
2856         rbuf_init(priv);
2857
2858         /* Set maximum frame length */
2859         if (!priv->is_lite)
2860                 umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
2861         else
2862                 gib_set_pad_extension(priv);
2863
2864         /* Set MAC address */
2865         umac_set_hw_addr(priv, dev->dev_addr);
2866
2867         umac_enable_set(priv, CMD_RX_EN, 1);
2868
2869         /* TX pipe enable */
2870         topctrl_writel(priv, 0, TX_FLUSH_CNTL);
2871
2872         umac_enable_set(priv, CMD_TX_EN, 1);
2873
2874         ret = tdma_enable_set(priv, 1);
2875         if (ret) {
2876                 netdev_err(dev, "TDMA timeout!\n");
2877                 goto out_free_rx_ring;
2878         }
2879
2880         phy_resume(dev->phydev);
2881
2882         bcm_sysport_netif_start(dev);
2883
2884         netif_device_attach(dev);
2885
2886         return 0;
2887
2888 out_free_rx_ring:
2889         bcm_sysport_fini_rx_ring(priv);
2890 out_free_tx_rings:
2891         for (i = 0; i < dev->num_tx_queues; i++)
2892                 bcm_sysport_fini_tx_ring(priv, i);
2893         clk_disable_unprepare(priv->clk);
2894         return ret;
2895 }
2896
2897 static SIMPLE_DEV_PM_OPS(bcm_sysport_pm_ops,
2898                 bcm_sysport_suspend, bcm_sysport_resume);
2899
2900 static struct platform_driver bcm_sysport_driver = {
2901         .probe  = bcm_sysport_probe,
2902         .remove_new = bcm_sysport_remove,
2903         .driver =  {
2904                 .name = "brcm-systemport",
2905                 .of_match_table = bcm_sysport_of_match,
2906                 .pm = &bcm_sysport_pm_ops,
2907         },
2908 };
2909 module_platform_driver(bcm_sysport_driver);
2910
2911 MODULE_AUTHOR("Broadcom Corporation");
2912 MODULE_DESCRIPTION("Broadcom System Port Ethernet MAC driver");
2913 MODULE_ALIAS("platform:brcm-systemport");
2914 MODULE_LICENSE("GPL");