net: rswitch: Fix return value in rswitch_start_xmit()
[linux-2.6-microblaze.git] / drivers / net / ethernet / renesas / rswitch.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Renesas Ethernet Switch device driver
3  *
4  * Copyright (C) 2022 Renesas Electronics Corporation
5  */
6
7 #include <linux/clk.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/err.h>
10 #include <linux/etherdevice.h>
11 #include <linux/iopoll.h>
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/net_tstamp.h>
15 #include <linux/of.h>
16 #include <linux/of_mdio.h>
17 #include <linux/of_net.h>
18 #include <linux/phy/phy.h>
19 #include <linux/platform_device.h>
20 #include <linux/pm.h>
21 #include <linux/pm_runtime.h>
22 #include <linux/rtnetlink.h>
23 #include <linux/slab.h>
24 #include <linux/spinlock.h>
25 #include <linux/sys_soc.h>
26
27 #include "rswitch.h"
28
29 static int rswitch_reg_wait(void __iomem *addr, u32 offs, u32 mask, u32 expected)
30 {
31         u32 val;
32
33         return readl_poll_timeout_atomic(addr + offs, val, (val & mask) == expected,
34                                          1, RSWITCH_TIMEOUT_US);
35 }
36
37 static void rswitch_modify(void __iomem *addr, enum rswitch_reg reg, u32 clear, u32 set)
38 {
39         iowrite32((ioread32(addr + reg) & ~clear) | set, addr + reg);
40 }
41
42 /* Common Agent block (COMA) */
43 static void rswitch_reset(struct rswitch_private *priv)
44 {
45         iowrite32(RRC_RR, priv->addr + RRC);
46         iowrite32(RRC_RR_CLR, priv->addr + RRC);
47 }
48
49 static void rswitch_clock_enable(struct rswitch_private *priv)
50 {
51         iowrite32(RCEC_ACE_DEFAULT | RCEC_RCE, priv->addr + RCEC);
52 }
53
54 static void rswitch_clock_disable(struct rswitch_private *priv)
55 {
56         iowrite32(RCDC_RCD, priv->addr + RCDC);
57 }
58
59 static bool rswitch_agent_clock_is_enabled(void __iomem *coma_addr, int port)
60 {
61         u32 val = ioread32(coma_addr + RCEC);
62
63         if (val & RCEC_RCE)
64                 return (val & BIT(port)) ? true : false;
65         else
66                 return false;
67 }
68
69 static void rswitch_agent_clock_ctrl(void __iomem *coma_addr, int port, int enable)
70 {
71         u32 val;
72
73         if (enable) {
74                 val = ioread32(coma_addr + RCEC);
75                 iowrite32(val | RCEC_RCE | BIT(port), coma_addr + RCEC);
76         } else {
77                 val = ioread32(coma_addr + RCDC);
78                 iowrite32(val | BIT(port), coma_addr + RCDC);
79         }
80 }
81
82 static int rswitch_bpool_config(struct rswitch_private *priv)
83 {
84         u32 val;
85
86         val = ioread32(priv->addr + CABPIRM);
87         if (val & CABPIRM_BPR)
88                 return 0;
89
90         iowrite32(CABPIRM_BPIOG, priv->addr + CABPIRM);
91
92         return rswitch_reg_wait(priv->addr, CABPIRM, CABPIRM_BPR, CABPIRM_BPR);
93 }
94
95 static void rswitch_coma_init(struct rswitch_private *priv)
96 {
97         iowrite32(CABPPFLC_INIT_VALUE, priv->addr + CABPPFLC0);
98 }
99
100 /* R-Switch-2 block (TOP) */
101 static void rswitch_top_init(struct rswitch_private *priv)
102 {
103         int i;
104
105         for (i = 0; i < RSWITCH_MAX_NUM_QUEUES; i++)
106                 iowrite32((i / 16) << (GWCA_INDEX * 8), priv->addr + TPEMIMC7(i));
107 }
108
109 /* Forwarding engine block (MFWD) */
110 static void rswitch_fwd_init(struct rswitch_private *priv)
111 {
112         int i;
113
114         /* For ETHA */
115         for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
116                 iowrite32(FWPC0_DEFAULT, priv->addr + FWPC0(i));
117                 iowrite32(0, priv->addr + FWPBFC(i));
118         }
119
120         for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
121                 iowrite32(priv->rdev[i]->rx_queue->index,
122                           priv->addr + FWPBFCSDC(GWCA_INDEX, i));
123                 iowrite32(BIT(priv->gwca.index), priv->addr + FWPBFC(i));
124         }
125
126         /* For GWCA */
127         iowrite32(FWPC0_DEFAULT, priv->addr + FWPC0(priv->gwca.index));
128         iowrite32(FWPC1_DDE, priv->addr + FWPC1(priv->gwca.index));
129         iowrite32(0, priv->addr + FWPBFC(priv->gwca.index));
130         iowrite32(GENMASK(RSWITCH_NUM_PORTS - 1, 0), priv->addr + FWPBFC(priv->gwca.index));
131 }
132
133 /* Gateway CPU agent block (GWCA) */
134 static int rswitch_gwca_change_mode(struct rswitch_private *priv,
135                                     enum rswitch_gwca_mode mode)
136 {
137         int ret;
138
139         if (!rswitch_agent_clock_is_enabled(priv->addr, priv->gwca.index))
140                 rswitch_agent_clock_ctrl(priv->addr, priv->gwca.index, 1);
141
142         iowrite32(mode, priv->addr + GWMC);
143
144         ret = rswitch_reg_wait(priv->addr, GWMS, GWMS_OPS_MASK, mode);
145
146         if (mode == GWMC_OPC_DISABLE)
147                 rswitch_agent_clock_ctrl(priv->addr, priv->gwca.index, 0);
148
149         return ret;
150 }
151
152 static int rswitch_gwca_mcast_table_reset(struct rswitch_private *priv)
153 {
154         iowrite32(GWMTIRM_MTIOG, priv->addr + GWMTIRM);
155
156         return rswitch_reg_wait(priv->addr, GWMTIRM, GWMTIRM_MTR, GWMTIRM_MTR);
157 }
158
159 static int rswitch_gwca_axi_ram_reset(struct rswitch_private *priv)
160 {
161         iowrite32(GWARIRM_ARIOG, priv->addr + GWARIRM);
162
163         return rswitch_reg_wait(priv->addr, GWARIRM, GWARIRM_ARR, GWARIRM_ARR);
164 }
165
166 static bool rswitch_is_any_data_irq(struct rswitch_private *priv, u32 *dis, bool tx)
167 {
168         u32 *mask = tx ? priv->gwca.tx_irq_bits : priv->gwca.rx_irq_bits;
169         int i;
170
171         for (i = 0; i < RSWITCH_NUM_IRQ_REGS; i++) {
172                 if (dis[i] & mask[i])
173                         return true;
174         }
175
176         return false;
177 }
178
179 static void rswitch_get_data_irq_status(struct rswitch_private *priv, u32 *dis)
180 {
181         int i;
182
183         for (i = 0; i < RSWITCH_NUM_IRQ_REGS; i++) {
184                 dis[i] = ioread32(priv->addr + GWDIS(i));
185                 dis[i] &= ioread32(priv->addr + GWDIE(i));
186         }
187 }
188
189 static void rswitch_enadis_data_irq(struct rswitch_private *priv, int index, bool enable)
190 {
191         u32 offs = enable ? GWDIE(index / 32) : GWDID(index / 32);
192
193         iowrite32(BIT(index % 32), priv->addr + offs);
194 }
195
196 static void rswitch_ack_data_irq(struct rswitch_private *priv, int index)
197 {
198         u32 offs = GWDIS(index / 32);
199
200         iowrite32(BIT(index % 32), priv->addr + offs);
201 }
202
203 static int rswitch_next_queue_index(struct rswitch_gwca_queue *gq, bool cur, int num)
204 {
205         int index = cur ? gq->cur : gq->dirty;
206
207         if (index + num >= gq->ring_size)
208                 index = (index + num) % gq->ring_size;
209         else
210                 index += num;
211
212         return index;
213 }
214
215 static int rswitch_get_num_cur_queues(struct rswitch_gwca_queue *gq)
216 {
217         if (gq->cur >= gq->dirty)
218                 return gq->cur - gq->dirty;
219         else
220                 return gq->ring_size - gq->dirty + gq->cur;
221 }
222
223 static bool rswitch_is_queue_rxed(struct rswitch_gwca_queue *gq)
224 {
225         struct rswitch_ext_ts_desc *desc = &gq->rx_ring[gq->dirty];
226
227         if ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY)
228                 return true;
229
230         return false;
231 }
232
233 static int rswitch_gwca_queue_alloc_skb(struct rswitch_gwca_queue *gq,
234                                         int start_index, int num)
235 {
236         int i, index;
237
238         for (i = 0; i < num; i++) {
239                 index = (i + start_index) % gq->ring_size;
240                 if (gq->skbs[index])
241                         continue;
242                 gq->skbs[index] = netdev_alloc_skb_ip_align(gq->ndev,
243                                                             PKT_BUF_SZ + RSWITCH_ALIGN - 1);
244                 if (!gq->skbs[index])
245                         goto err;
246         }
247
248         return 0;
249
250 err:
251         for (i--; i >= 0; i--) {
252                 index = (i + start_index) % gq->ring_size;
253                 dev_kfree_skb(gq->skbs[index]);
254                 gq->skbs[index] = NULL;
255         }
256
257         return -ENOMEM;
258 }
259
260 static void rswitch_gwca_queue_free(struct net_device *ndev,
261                                     struct rswitch_gwca_queue *gq)
262 {
263         int i;
264
265         if (!gq->dir_tx) {
266                 dma_free_coherent(ndev->dev.parent,
267                                   sizeof(struct rswitch_ext_ts_desc) *
268                                   (gq->ring_size + 1), gq->rx_ring, gq->ring_dma);
269                 gq->rx_ring = NULL;
270
271                 for (i = 0; i < gq->ring_size; i++)
272                         dev_kfree_skb(gq->skbs[i]);
273         } else {
274                 dma_free_coherent(ndev->dev.parent,
275                                   sizeof(struct rswitch_ext_desc) *
276                                   (gq->ring_size + 1), gq->tx_ring, gq->ring_dma);
277                 gq->tx_ring = NULL;
278         }
279
280         kfree(gq->skbs);
281         gq->skbs = NULL;
282 }
283
284 static void rswitch_gwca_ts_queue_free(struct rswitch_private *priv)
285 {
286         struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue;
287
288         dma_free_coherent(&priv->pdev->dev,
289                           sizeof(struct rswitch_ts_desc) * (gq->ring_size + 1),
290                           gq->ts_ring, gq->ring_dma);
291         gq->ts_ring = NULL;
292 }
293
294 static int rswitch_gwca_queue_alloc(struct net_device *ndev,
295                                     struct rswitch_private *priv,
296                                     struct rswitch_gwca_queue *gq,
297                                     bool dir_tx, int ring_size)
298 {
299         int i, bit;
300
301         gq->dir_tx = dir_tx;
302         gq->ring_size = ring_size;
303         gq->ndev = ndev;
304
305         gq->skbs = kcalloc(gq->ring_size, sizeof(*gq->skbs), GFP_KERNEL);
306         if (!gq->skbs)
307                 return -ENOMEM;
308
309         if (!dir_tx) {
310                 rswitch_gwca_queue_alloc_skb(gq, 0, gq->ring_size);
311
312                 gq->rx_ring = dma_alloc_coherent(ndev->dev.parent,
313                                                  sizeof(struct rswitch_ext_ts_desc) *
314                                                  (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL);
315         } else {
316                 gq->tx_ring = dma_alloc_coherent(ndev->dev.parent,
317                                                  sizeof(struct rswitch_ext_desc) *
318                                                  (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL);
319         }
320
321         if (!gq->rx_ring && !gq->tx_ring)
322                 goto out;
323
324         i = gq->index / 32;
325         bit = BIT(gq->index % 32);
326         if (dir_tx)
327                 priv->gwca.tx_irq_bits[i] |= bit;
328         else
329                 priv->gwca.rx_irq_bits[i] |= bit;
330
331         return 0;
332
333 out:
334         rswitch_gwca_queue_free(ndev, gq);
335
336         return -ENOMEM;
337 }
338
339 static void rswitch_desc_set_dptr(struct rswitch_desc *desc, dma_addr_t addr)
340 {
341         desc->dptrl = cpu_to_le32(lower_32_bits(addr));
342         desc->dptrh = upper_32_bits(addr) & 0xff;
343 }
344
345 static dma_addr_t rswitch_desc_get_dptr(const struct rswitch_desc *desc)
346 {
347         return __le32_to_cpu(desc->dptrl) | (u64)(desc->dptrh) << 32;
348 }
349
350 static int rswitch_gwca_queue_format(struct net_device *ndev,
351                                      struct rswitch_private *priv,
352                                      struct rswitch_gwca_queue *gq)
353 {
354         int ring_size = sizeof(struct rswitch_ext_desc) * gq->ring_size;
355         struct rswitch_ext_desc *desc;
356         struct rswitch_desc *linkfix;
357         dma_addr_t dma_addr;
358         int i;
359
360         memset(gq->tx_ring, 0, ring_size);
361         for (i = 0, desc = gq->tx_ring; i < gq->ring_size; i++, desc++) {
362                 if (!gq->dir_tx) {
363                         dma_addr = dma_map_single(ndev->dev.parent,
364                                                   gq->skbs[i]->data, PKT_BUF_SZ,
365                                                   DMA_FROM_DEVICE);
366                         if (dma_mapping_error(ndev->dev.parent, dma_addr))
367                                 goto err;
368
369                         desc->desc.info_ds = cpu_to_le16(PKT_BUF_SZ);
370                         rswitch_desc_set_dptr(&desc->desc, dma_addr);
371                         desc->desc.die_dt = DT_FEMPTY | DIE;
372                 } else {
373                         desc->desc.die_dt = DT_EEMPTY | DIE;
374                 }
375         }
376         rswitch_desc_set_dptr(&desc->desc, gq->ring_dma);
377         desc->desc.die_dt = DT_LINKFIX;
378
379         linkfix = &priv->gwca.linkfix_table[gq->index];
380         linkfix->die_dt = DT_LINKFIX;
381         rswitch_desc_set_dptr(linkfix, gq->ring_dma);
382
383         iowrite32(GWDCC_BALR | (gq->dir_tx ? GWDCC_DCP(GWCA_IPV_NUM) | GWDCC_DQT : 0) | GWDCC_EDE,
384                   priv->addr + GWDCC_OFFS(gq->index));
385
386         return 0;
387
388 err:
389         if (!gq->dir_tx) {
390                 for (i--, desc = gq->tx_ring; i >= 0; i--, desc++) {
391                         dma_addr = rswitch_desc_get_dptr(&desc->desc);
392                         dma_unmap_single(ndev->dev.parent, dma_addr, PKT_BUF_SZ,
393                                          DMA_FROM_DEVICE);
394                 }
395         }
396
397         return -ENOMEM;
398 }
399
400 static void rswitch_gwca_ts_queue_fill(struct rswitch_private *priv,
401                                        int start_index, int num)
402 {
403         struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue;
404         struct rswitch_ts_desc *desc;
405         int i, index;
406
407         for (i = 0; i < num; i++) {
408                 index = (i + start_index) % gq->ring_size;
409                 desc = &gq->ts_ring[index];
410                 desc->desc.die_dt = DT_FEMPTY_ND | DIE;
411         }
412 }
413
414 static int rswitch_gwca_queue_ext_ts_fill(struct net_device *ndev,
415                                           struct rswitch_gwca_queue *gq,
416                                           int start_index, int num)
417 {
418         struct rswitch_device *rdev = netdev_priv(ndev);
419         struct rswitch_ext_ts_desc *desc;
420         dma_addr_t dma_addr;
421         int i, index;
422
423         for (i = 0; i < num; i++) {
424                 index = (i + start_index) % gq->ring_size;
425                 desc = &gq->rx_ring[index];
426                 if (!gq->dir_tx) {
427                         dma_addr = dma_map_single(ndev->dev.parent,
428                                                   gq->skbs[index]->data, PKT_BUF_SZ,
429                                                   DMA_FROM_DEVICE);
430                         if (dma_mapping_error(ndev->dev.parent, dma_addr))
431                                 goto err;
432
433                         desc->desc.info_ds = cpu_to_le16(PKT_BUF_SZ);
434                         rswitch_desc_set_dptr(&desc->desc, dma_addr);
435                         dma_wmb();
436                         desc->desc.die_dt = DT_FEMPTY | DIE;
437                         desc->info1 = cpu_to_le64(INFO1_SPN(rdev->etha->index));
438                 } else {
439                         desc->desc.die_dt = DT_EEMPTY | DIE;
440                 }
441         }
442
443         return 0;
444
445 err:
446         if (!gq->dir_tx) {
447                 for (i--; i >= 0; i--) {
448                         index = (i + start_index) % gq->ring_size;
449                         desc = &gq->rx_ring[index];
450                         dma_addr = rswitch_desc_get_dptr(&desc->desc);
451                         dma_unmap_single(ndev->dev.parent, dma_addr, PKT_BUF_SZ,
452                                          DMA_FROM_DEVICE);
453                 }
454         }
455
456         return -ENOMEM;
457 }
458
459 static int rswitch_gwca_queue_ext_ts_format(struct net_device *ndev,
460                                             struct rswitch_private *priv,
461                                             struct rswitch_gwca_queue *gq)
462 {
463         int ring_size = sizeof(struct rswitch_ext_ts_desc) * gq->ring_size;
464         struct rswitch_ext_ts_desc *desc;
465         struct rswitch_desc *linkfix;
466         int err;
467
468         memset(gq->rx_ring, 0, ring_size);
469         err = rswitch_gwca_queue_ext_ts_fill(ndev, gq, 0, gq->ring_size);
470         if (err < 0)
471                 return err;
472
473         desc = &gq->rx_ring[gq->ring_size];     /* Last */
474         rswitch_desc_set_dptr(&desc->desc, gq->ring_dma);
475         desc->desc.die_dt = DT_LINKFIX;
476
477         linkfix = &priv->gwca.linkfix_table[gq->index];
478         linkfix->die_dt = DT_LINKFIX;
479         rswitch_desc_set_dptr(linkfix, gq->ring_dma);
480
481         iowrite32(GWDCC_BALR | (gq->dir_tx ? GWDCC_DCP(GWCA_IPV_NUM) | GWDCC_DQT : 0) |
482                   GWDCC_ETS | GWDCC_EDE,
483                   priv->addr + GWDCC_OFFS(gq->index));
484
485         return 0;
486 }
487
488 static int rswitch_gwca_linkfix_alloc(struct rswitch_private *priv)
489 {
490         int i, num_queues = priv->gwca.num_queues;
491         struct rswitch_gwca *gwca = &priv->gwca;
492         struct device *dev = &priv->pdev->dev;
493
494         gwca->linkfix_table_size = sizeof(struct rswitch_desc) * num_queues;
495         gwca->linkfix_table = dma_alloc_coherent(dev, gwca->linkfix_table_size,
496                                                  &gwca->linkfix_table_dma, GFP_KERNEL);
497         if (!gwca->linkfix_table)
498                 return -ENOMEM;
499         for (i = 0; i < num_queues; i++)
500                 gwca->linkfix_table[i].die_dt = DT_EOS;
501
502         return 0;
503 }
504
505 static void rswitch_gwca_linkfix_free(struct rswitch_private *priv)
506 {
507         struct rswitch_gwca *gwca = &priv->gwca;
508
509         if (gwca->linkfix_table)
510                 dma_free_coherent(&priv->pdev->dev, gwca->linkfix_table_size,
511                                   gwca->linkfix_table, gwca->linkfix_table_dma);
512         gwca->linkfix_table = NULL;
513 }
514
515 static int rswitch_gwca_ts_queue_alloc(struct rswitch_private *priv)
516 {
517         struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue;
518         struct rswitch_ts_desc *desc;
519
520         gq->ring_size = TS_RING_SIZE;
521         gq->ts_ring = dma_alloc_coherent(&priv->pdev->dev,
522                                          sizeof(struct rswitch_ts_desc) *
523                                          (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL);
524
525         if (!gq->ts_ring)
526                 return -ENOMEM;
527
528         rswitch_gwca_ts_queue_fill(priv, 0, TS_RING_SIZE);
529         desc = &gq->ts_ring[gq->ring_size];
530         desc->desc.die_dt = DT_LINKFIX;
531         rswitch_desc_set_dptr(&desc->desc, gq->ring_dma);
532         INIT_LIST_HEAD(&priv->gwca.ts_info_list);
533
534         return 0;
535 }
536
537 static struct rswitch_gwca_queue *rswitch_gwca_get(struct rswitch_private *priv)
538 {
539         struct rswitch_gwca_queue *gq;
540         int index;
541
542         index = find_first_zero_bit(priv->gwca.used, priv->gwca.num_queues);
543         if (index >= priv->gwca.num_queues)
544                 return NULL;
545         set_bit(index, priv->gwca.used);
546         gq = &priv->gwca.queues[index];
547         memset(gq, 0, sizeof(*gq));
548         gq->index = index;
549
550         return gq;
551 }
552
553 static void rswitch_gwca_put(struct rswitch_private *priv,
554                              struct rswitch_gwca_queue *gq)
555 {
556         clear_bit(gq->index, priv->gwca.used);
557 }
558
559 static int rswitch_txdmac_alloc(struct net_device *ndev)
560 {
561         struct rswitch_device *rdev = netdev_priv(ndev);
562         struct rswitch_private *priv = rdev->priv;
563         int err;
564
565         rdev->tx_queue = rswitch_gwca_get(priv);
566         if (!rdev->tx_queue)
567                 return -EBUSY;
568
569         err = rswitch_gwca_queue_alloc(ndev, priv, rdev->tx_queue, true, TX_RING_SIZE);
570         if (err < 0) {
571                 rswitch_gwca_put(priv, rdev->tx_queue);
572                 return err;
573         }
574
575         return 0;
576 }
577
578 static void rswitch_txdmac_free(struct net_device *ndev)
579 {
580         struct rswitch_device *rdev = netdev_priv(ndev);
581
582         rswitch_gwca_queue_free(ndev, rdev->tx_queue);
583         rswitch_gwca_put(rdev->priv, rdev->tx_queue);
584 }
585
586 static int rswitch_txdmac_init(struct rswitch_private *priv, int index)
587 {
588         struct rswitch_device *rdev = priv->rdev[index];
589
590         return rswitch_gwca_queue_format(rdev->ndev, priv, rdev->tx_queue);
591 }
592
593 static int rswitch_rxdmac_alloc(struct net_device *ndev)
594 {
595         struct rswitch_device *rdev = netdev_priv(ndev);
596         struct rswitch_private *priv = rdev->priv;
597         int err;
598
599         rdev->rx_queue = rswitch_gwca_get(priv);
600         if (!rdev->rx_queue)
601                 return -EBUSY;
602
603         err = rswitch_gwca_queue_alloc(ndev, priv, rdev->rx_queue, false, RX_RING_SIZE);
604         if (err < 0) {
605                 rswitch_gwca_put(priv, rdev->rx_queue);
606                 return err;
607         }
608
609         return 0;
610 }
611
612 static void rswitch_rxdmac_free(struct net_device *ndev)
613 {
614         struct rswitch_device *rdev = netdev_priv(ndev);
615
616         rswitch_gwca_queue_free(ndev, rdev->rx_queue);
617         rswitch_gwca_put(rdev->priv, rdev->rx_queue);
618 }
619
620 static int rswitch_rxdmac_init(struct rswitch_private *priv, int index)
621 {
622         struct rswitch_device *rdev = priv->rdev[index];
623         struct net_device *ndev = rdev->ndev;
624
625         return rswitch_gwca_queue_ext_ts_format(ndev, priv, rdev->rx_queue);
626 }
627
628 static int rswitch_gwca_hw_init(struct rswitch_private *priv)
629 {
630         int i, err;
631
632         err = rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE);
633         if (err < 0)
634                 return err;
635         err = rswitch_gwca_change_mode(priv, GWMC_OPC_CONFIG);
636         if (err < 0)
637                 return err;
638
639         err = rswitch_gwca_mcast_table_reset(priv);
640         if (err < 0)
641                 return err;
642         err = rswitch_gwca_axi_ram_reset(priv);
643         if (err < 0)
644                 return err;
645
646         iowrite32(GWVCC_VEM_SC_TAG, priv->addr + GWVCC);
647         iowrite32(0, priv->addr + GWTTFC);
648         iowrite32(lower_32_bits(priv->gwca.linkfix_table_dma), priv->addr + GWDCBAC1);
649         iowrite32(upper_32_bits(priv->gwca.linkfix_table_dma), priv->addr + GWDCBAC0);
650         iowrite32(lower_32_bits(priv->gwca.ts_queue.ring_dma), priv->addr + GWTDCAC10);
651         iowrite32(upper_32_bits(priv->gwca.ts_queue.ring_dma), priv->addr + GWTDCAC00);
652         iowrite32(GWCA_TS_IRQ_BIT, priv->addr + GWTSDCC0);
653
654         iowrite32(GWTPC_PPPL(GWCA_IPV_NUM), priv->addr + GWTPC0);
655
656         for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
657                 err = rswitch_rxdmac_init(priv, i);
658                 if (err < 0)
659                         return err;
660                 err = rswitch_txdmac_init(priv, i);
661                 if (err < 0)
662                         return err;
663         }
664
665         err = rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE);
666         if (err < 0)
667                 return err;
668         return rswitch_gwca_change_mode(priv, GWMC_OPC_OPERATION);
669 }
670
671 static int rswitch_gwca_hw_deinit(struct rswitch_private *priv)
672 {
673         int err;
674
675         err = rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE);
676         if (err < 0)
677                 return err;
678         err = rswitch_gwca_change_mode(priv, GWMC_OPC_RESET);
679         if (err < 0)
680                 return err;
681
682         return rswitch_gwca_change_mode(priv, GWMC_OPC_DISABLE);
683 }
684
685 static int rswitch_gwca_halt(struct rswitch_private *priv)
686 {
687         int err;
688
689         priv->gwca_halt = true;
690         err = rswitch_gwca_hw_deinit(priv);
691         dev_err(&priv->pdev->dev, "halted (%d)\n", err);
692
693         return err;
694 }
695
696 static bool rswitch_rx(struct net_device *ndev, int *quota)
697 {
698         struct rswitch_device *rdev = netdev_priv(ndev);
699         struct rswitch_gwca_queue *gq = rdev->rx_queue;
700         struct rswitch_ext_ts_desc *desc;
701         int limit, boguscnt, num, ret;
702         struct sk_buff *skb;
703         dma_addr_t dma_addr;
704         u16 pkt_len;
705         u32 get_ts;
706
707         if (*quota <= 0)
708                 return true;
709
710         boguscnt = min_t(int, gq->ring_size, *quota);
711         limit = boguscnt;
712
713         desc = &gq->rx_ring[gq->cur];
714         while ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY) {
715                 dma_rmb();
716                 pkt_len = le16_to_cpu(desc->desc.info_ds) & RX_DS;
717                 skb = gq->skbs[gq->cur];
718                 gq->skbs[gq->cur] = NULL;
719                 dma_addr = rswitch_desc_get_dptr(&desc->desc);
720                 dma_unmap_single(ndev->dev.parent, dma_addr, PKT_BUF_SZ, DMA_FROM_DEVICE);
721                 get_ts = rdev->priv->ptp_priv->tstamp_rx_ctrl & RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT;
722                 if (get_ts) {
723                         struct skb_shared_hwtstamps *shhwtstamps;
724                         struct timespec64 ts;
725
726                         shhwtstamps = skb_hwtstamps(skb);
727                         memset(shhwtstamps, 0, sizeof(*shhwtstamps));
728                         ts.tv_sec = __le32_to_cpu(desc->ts_sec);
729                         ts.tv_nsec = __le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff));
730                         shhwtstamps->hwtstamp = timespec64_to_ktime(ts);
731                 }
732                 skb_put(skb, pkt_len);
733                 skb->protocol = eth_type_trans(skb, ndev);
734                 napi_gro_receive(&rdev->napi, skb);
735                 rdev->ndev->stats.rx_packets++;
736                 rdev->ndev->stats.rx_bytes += pkt_len;
737
738                 gq->cur = rswitch_next_queue_index(gq, true, 1);
739                 desc = &gq->rx_ring[gq->cur];
740
741                 if (--boguscnt <= 0)
742                         break;
743         }
744
745         num = rswitch_get_num_cur_queues(gq);
746         ret = rswitch_gwca_queue_alloc_skb(gq, gq->dirty, num);
747         if (ret < 0)
748                 goto err;
749         ret = rswitch_gwca_queue_ext_ts_fill(ndev, gq, gq->dirty, num);
750         if (ret < 0)
751                 goto err;
752         gq->dirty = rswitch_next_queue_index(gq, false, num);
753
754         *quota -= limit - boguscnt;
755
756         return boguscnt <= 0;
757
758 err:
759         rswitch_gwca_halt(rdev->priv);
760
761         return 0;
762 }
763
764 static int rswitch_tx_free(struct net_device *ndev, bool free_txed_only)
765 {
766         struct rswitch_device *rdev = netdev_priv(ndev);
767         struct rswitch_gwca_queue *gq = rdev->tx_queue;
768         struct rswitch_ext_desc *desc;
769         dma_addr_t dma_addr;
770         struct sk_buff *skb;
771         int free_num = 0;
772         int size;
773
774         for (; rswitch_get_num_cur_queues(gq) > 0;
775              gq->dirty = rswitch_next_queue_index(gq, false, 1)) {
776                 desc = &gq->tx_ring[gq->dirty];
777                 if (free_txed_only && (desc->desc.die_dt & DT_MASK) != DT_FEMPTY)
778                         break;
779
780                 dma_rmb();
781                 size = le16_to_cpu(desc->desc.info_ds) & TX_DS;
782                 skb = gq->skbs[gq->dirty];
783                 if (skb) {
784                         dma_addr = rswitch_desc_get_dptr(&desc->desc);
785                         dma_unmap_single(ndev->dev.parent, dma_addr,
786                                          size, DMA_TO_DEVICE);
787                         dev_kfree_skb_any(gq->skbs[gq->dirty]);
788                         gq->skbs[gq->dirty] = NULL;
789                         free_num++;
790                 }
791                 desc->desc.die_dt = DT_EEMPTY;
792                 rdev->ndev->stats.tx_packets++;
793                 rdev->ndev->stats.tx_bytes += size;
794         }
795
796         return free_num;
797 }
798
799 static int rswitch_poll(struct napi_struct *napi, int budget)
800 {
801         struct net_device *ndev = napi->dev;
802         struct rswitch_private *priv;
803         struct rswitch_device *rdev;
804         unsigned long flags;
805         int quota = budget;
806
807         rdev = netdev_priv(ndev);
808         priv = rdev->priv;
809
810 retry:
811         rswitch_tx_free(ndev, true);
812
813         if (rswitch_rx(ndev, &quota))
814                 goto out;
815         else if (rdev->priv->gwca_halt)
816                 goto err;
817         else if (rswitch_is_queue_rxed(rdev->rx_queue))
818                 goto retry;
819
820         netif_wake_subqueue(ndev, 0);
821
822         if (napi_complete_done(napi, budget - quota)) {
823                 spin_lock_irqsave(&priv->lock, flags);
824                 rswitch_enadis_data_irq(priv, rdev->tx_queue->index, true);
825                 rswitch_enadis_data_irq(priv, rdev->rx_queue->index, true);
826                 spin_unlock_irqrestore(&priv->lock, flags);
827         }
828
829 out:
830         return budget - quota;
831
832 err:
833         napi_complete(napi);
834
835         return 0;
836 }
837
838 static void rswitch_queue_interrupt(struct net_device *ndev)
839 {
840         struct rswitch_device *rdev = netdev_priv(ndev);
841
842         if (napi_schedule_prep(&rdev->napi)) {
843                 spin_lock(&rdev->priv->lock);
844                 rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false);
845                 rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false);
846                 spin_unlock(&rdev->priv->lock);
847                 __napi_schedule(&rdev->napi);
848         }
849 }
850
851 static irqreturn_t rswitch_data_irq(struct rswitch_private *priv, u32 *dis)
852 {
853         struct rswitch_gwca_queue *gq;
854         int i, index, bit;
855
856         for (i = 0; i < priv->gwca.num_queues; i++) {
857                 gq = &priv->gwca.queues[i];
858                 index = gq->index / 32;
859                 bit = BIT(gq->index % 32);
860                 if (!(dis[index] & bit))
861                         continue;
862
863                 rswitch_ack_data_irq(priv, gq->index);
864                 rswitch_queue_interrupt(gq->ndev);
865         }
866
867         return IRQ_HANDLED;
868 }
869
870 static irqreturn_t rswitch_gwca_irq(int irq, void *dev_id)
871 {
872         struct rswitch_private *priv = dev_id;
873         u32 dis[RSWITCH_NUM_IRQ_REGS];
874         irqreturn_t ret = IRQ_NONE;
875
876         rswitch_get_data_irq_status(priv, dis);
877
878         if (rswitch_is_any_data_irq(priv, dis, true) ||
879             rswitch_is_any_data_irq(priv, dis, false))
880                 ret = rswitch_data_irq(priv, dis);
881
882         return ret;
883 }
884
885 static int rswitch_gwca_request_irqs(struct rswitch_private *priv)
886 {
887         char *resource_name, *irq_name;
888         int i, ret, irq;
889
890         for (i = 0; i < GWCA_NUM_IRQS; i++) {
891                 resource_name = kasprintf(GFP_KERNEL, GWCA_IRQ_RESOURCE_NAME, i);
892                 if (!resource_name)
893                         return -ENOMEM;
894
895                 irq = platform_get_irq_byname(priv->pdev, resource_name);
896                 kfree(resource_name);
897                 if (irq < 0)
898                         return irq;
899
900                 irq_name = devm_kasprintf(&priv->pdev->dev, GFP_KERNEL,
901                                           GWCA_IRQ_NAME, i);
902                 if (!irq_name)
903                         return -ENOMEM;
904
905                 ret = devm_request_irq(&priv->pdev->dev, irq, rswitch_gwca_irq,
906                                        0, irq_name, priv);
907                 if (ret < 0)
908                         return ret;
909         }
910
911         return 0;
912 }
913
914 static void rswitch_ts(struct rswitch_private *priv)
915 {
916         struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue;
917         struct rswitch_gwca_ts_info *ts_info, *ts_info2;
918         struct skb_shared_hwtstamps shhwtstamps;
919         struct rswitch_ts_desc *desc;
920         struct timespec64 ts;
921         u32 tag, port;
922         int num;
923
924         desc = &gq->ts_ring[gq->cur];
925         while ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY_ND) {
926                 dma_rmb();
927
928                 port = TS_DESC_DPN(__le32_to_cpu(desc->desc.dptrl));
929                 tag = TS_DESC_TSUN(__le32_to_cpu(desc->desc.dptrl));
930
931                 list_for_each_entry_safe(ts_info, ts_info2, &priv->gwca.ts_info_list, list) {
932                         if (!(ts_info->port == port && ts_info->tag == tag))
933                                 continue;
934
935                         memset(&shhwtstamps, 0, sizeof(shhwtstamps));
936                         ts.tv_sec = __le32_to_cpu(desc->ts_sec);
937                         ts.tv_nsec = __le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff));
938                         shhwtstamps.hwtstamp = timespec64_to_ktime(ts);
939                         skb_tstamp_tx(ts_info->skb, &shhwtstamps);
940                         dev_consume_skb_irq(ts_info->skb);
941                         list_del(&ts_info->list);
942                         kfree(ts_info);
943                         break;
944                 }
945
946                 gq->cur = rswitch_next_queue_index(gq, true, 1);
947                 desc = &gq->ts_ring[gq->cur];
948         }
949
950         num = rswitch_get_num_cur_queues(gq);
951         rswitch_gwca_ts_queue_fill(priv, gq->dirty, num);
952         gq->dirty = rswitch_next_queue_index(gq, false, num);
953 }
954
955 static irqreturn_t rswitch_gwca_ts_irq(int irq, void *dev_id)
956 {
957         struct rswitch_private *priv = dev_id;
958
959         if (ioread32(priv->addr + GWTSDIS) & GWCA_TS_IRQ_BIT) {
960                 iowrite32(GWCA_TS_IRQ_BIT, priv->addr + GWTSDIS);
961                 rswitch_ts(priv);
962
963                 return IRQ_HANDLED;
964         }
965
966         return IRQ_NONE;
967 }
968
969 static int rswitch_gwca_ts_request_irqs(struct rswitch_private *priv)
970 {
971         int irq;
972
973         irq = platform_get_irq_byname(priv->pdev, GWCA_TS_IRQ_RESOURCE_NAME);
974         if (irq < 0)
975                 return irq;
976
977         return devm_request_irq(&priv->pdev->dev, irq, rswitch_gwca_ts_irq,
978                                 0, GWCA_TS_IRQ_NAME, priv);
979 }
980
981 /* Ethernet TSN Agent block (ETHA) and Ethernet MAC IP block (RMAC) */
982 static int rswitch_etha_change_mode(struct rswitch_etha *etha,
983                                     enum rswitch_etha_mode mode)
984 {
985         int ret;
986
987         if (!rswitch_agent_clock_is_enabled(etha->coma_addr, etha->index))
988                 rswitch_agent_clock_ctrl(etha->coma_addr, etha->index, 1);
989
990         iowrite32(mode, etha->addr + EAMC);
991
992         ret = rswitch_reg_wait(etha->addr, EAMS, EAMS_OPS_MASK, mode);
993
994         if (mode == EAMC_OPC_DISABLE)
995                 rswitch_agent_clock_ctrl(etha->coma_addr, etha->index, 0);
996
997         return ret;
998 }
999
1000 static void rswitch_etha_read_mac_address(struct rswitch_etha *etha)
1001 {
1002         u32 mrmac0 = ioread32(etha->addr + MRMAC0);
1003         u32 mrmac1 = ioread32(etha->addr + MRMAC1);
1004         u8 *mac = &etha->mac_addr[0];
1005
1006         mac[0] = (mrmac0 >>  8) & 0xFF;
1007         mac[1] = (mrmac0 >>  0) & 0xFF;
1008         mac[2] = (mrmac1 >> 24) & 0xFF;
1009         mac[3] = (mrmac1 >> 16) & 0xFF;
1010         mac[4] = (mrmac1 >>  8) & 0xFF;
1011         mac[5] = (mrmac1 >>  0) & 0xFF;
1012 }
1013
1014 static void rswitch_etha_write_mac_address(struct rswitch_etha *etha, const u8 *mac)
1015 {
1016         iowrite32((mac[0] << 8) | mac[1], etha->addr + MRMAC0);
1017         iowrite32((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5],
1018                   etha->addr + MRMAC1);
1019 }
1020
1021 static int rswitch_etha_wait_link_verification(struct rswitch_etha *etha)
1022 {
1023         iowrite32(MLVC_PLV, etha->addr + MLVC);
1024
1025         return rswitch_reg_wait(etha->addr, MLVC, MLVC_PLV, 0);
1026 }
1027
1028 static void rswitch_rmac_setting(struct rswitch_etha *etha, const u8 *mac)
1029 {
1030         u32 val;
1031
1032         rswitch_etha_write_mac_address(etha, mac);
1033
1034         switch (etha->speed) {
1035         case 100:
1036                 val = MPIC_LSC_100M;
1037                 break;
1038         case 1000:
1039                 val = MPIC_LSC_1G;
1040                 break;
1041         case 2500:
1042                 val = MPIC_LSC_2_5G;
1043                 break;
1044         default:
1045                 return;
1046         }
1047
1048         iowrite32(MPIC_PIS_GMII | val, etha->addr + MPIC);
1049 }
1050
1051 static void rswitch_etha_enable_mii(struct rswitch_etha *etha)
1052 {
1053         rswitch_modify(etha->addr, MPIC, MPIC_PSMCS_MASK | MPIC_PSMHT_MASK,
1054                        MPIC_PSMCS(etha->psmcs) | MPIC_PSMHT(0x06));
1055         rswitch_modify(etha->addr, MPSM, 0, MPSM_MFF_C45);
1056 }
1057
1058 static int rswitch_etha_hw_init(struct rswitch_etha *etha, const u8 *mac)
1059 {
1060         int err;
1061
1062         err = rswitch_etha_change_mode(etha, EAMC_OPC_DISABLE);
1063         if (err < 0)
1064                 return err;
1065         err = rswitch_etha_change_mode(etha, EAMC_OPC_CONFIG);
1066         if (err < 0)
1067                 return err;
1068
1069         iowrite32(EAVCC_VEM_SC_TAG, etha->addr + EAVCC);
1070         rswitch_rmac_setting(etha, mac);
1071         rswitch_etha_enable_mii(etha);
1072
1073         err = rswitch_etha_wait_link_verification(etha);
1074         if (err < 0)
1075                 return err;
1076
1077         err = rswitch_etha_change_mode(etha, EAMC_OPC_DISABLE);
1078         if (err < 0)
1079                 return err;
1080
1081         return rswitch_etha_change_mode(etha, EAMC_OPC_OPERATION);
1082 }
1083
1084 static int rswitch_etha_set_access(struct rswitch_etha *etha, bool read,
1085                                    int phyad, int devad, int regad, int data)
1086 {
1087         int pop = read ? MDIO_READ_C45 : MDIO_WRITE_C45;
1088         u32 val;
1089         int ret;
1090
1091         if (devad == 0xffffffff)
1092                 return -ENODEV;
1093
1094         writel(MMIS1_CLEAR_FLAGS, etha->addr + MMIS1);
1095
1096         val = MPSM_PSME | MPSM_MFF_C45;
1097         iowrite32((regad << 16) | (devad << 8) | (phyad << 3) | val, etha->addr + MPSM);
1098
1099         ret = rswitch_reg_wait(etha->addr, MMIS1, MMIS1_PAACS, MMIS1_PAACS);
1100         if (ret)
1101                 return ret;
1102
1103         rswitch_modify(etha->addr, MMIS1, MMIS1_PAACS, MMIS1_PAACS);
1104
1105         if (read) {
1106                 writel((pop << 13) | (devad << 8) | (phyad << 3) | val, etha->addr + MPSM);
1107
1108                 ret = rswitch_reg_wait(etha->addr, MMIS1, MMIS1_PRACS, MMIS1_PRACS);
1109                 if (ret)
1110                         return ret;
1111
1112                 ret = (ioread32(etha->addr + MPSM) & MPSM_PRD_MASK) >> 16;
1113
1114                 rswitch_modify(etha->addr, MMIS1, MMIS1_PRACS, MMIS1_PRACS);
1115         } else {
1116                 iowrite32((data << 16) | (pop << 13) | (devad << 8) | (phyad << 3) | val,
1117                           etha->addr + MPSM);
1118
1119                 ret = rswitch_reg_wait(etha->addr, MMIS1, MMIS1_PWACS, MMIS1_PWACS);
1120         }
1121
1122         return ret;
1123 }
1124
1125 static int rswitch_etha_mii_read_c45(struct mii_bus *bus, int addr, int devad,
1126                                      int regad)
1127 {
1128         struct rswitch_etha *etha = bus->priv;
1129
1130         return rswitch_etha_set_access(etha, true, addr, devad, regad, 0);
1131 }
1132
1133 static int rswitch_etha_mii_write_c45(struct mii_bus *bus, int addr, int devad,
1134                                       int regad, u16 val)
1135 {
1136         struct rswitch_etha *etha = bus->priv;
1137
1138         return rswitch_etha_set_access(etha, false, addr, devad, regad, val);
1139 }
1140
1141 /* Call of_node_put(port) after done */
1142 static struct device_node *rswitch_get_port_node(struct rswitch_device *rdev)
1143 {
1144         struct device_node *ports, *port;
1145         int err = 0;
1146         u32 index;
1147
1148         ports = of_get_child_by_name(rdev->ndev->dev.parent->of_node,
1149                                      "ethernet-ports");
1150         if (!ports)
1151                 return NULL;
1152
1153         for_each_child_of_node(ports, port) {
1154                 err = of_property_read_u32(port, "reg", &index);
1155                 if (err < 0) {
1156                         port = NULL;
1157                         goto out;
1158                 }
1159                 if (index == rdev->etha->index) {
1160                         if (!of_device_is_available(port))
1161                                 port = NULL;
1162                         break;
1163                 }
1164         }
1165
1166 out:
1167         of_node_put(ports);
1168
1169         return port;
1170 }
1171
1172 static int rswitch_etha_get_params(struct rswitch_device *rdev)
1173 {
1174         u32 max_speed;
1175         int err;
1176
1177         if (!rdev->np_port)
1178                 return 0;       /* ignored */
1179
1180         err = of_get_phy_mode(rdev->np_port, &rdev->etha->phy_interface);
1181         if (err)
1182                 return err;
1183
1184         err = of_property_read_u32(rdev->np_port, "max-speed", &max_speed);
1185         if (!err) {
1186                 rdev->etha->speed = max_speed;
1187                 return 0;
1188         }
1189
1190         /* if no "max-speed" property, let's use default speed */
1191         switch (rdev->etha->phy_interface) {
1192         case PHY_INTERFACE_MODE_MII:
1193                 rdev->etha->speed = SPEED_100;
1194                 break;
1195         case PHY_INTERFACE_MODE_SGMII:
1196                 rdev->etha->speed = SPEED_1000;
1197                 break;
1198         case PHY_INTERFACE_MODE_USXGMII:
1199                 rdev->etha->speed = SPEED_2500;
1200                 break;
1201         default:
1202                 return -EINVAL;
1203         }
1204
1205         return 0;
1206 }
1207
1208 static int rswitch_mii_register(struct rswitch_device *rdev)
1209 {
1210         struct device_node *mdio_np;
1211         struct mii_bus *mii_bus;
1212         int err;
1213
1214         mii_bus = mdiobus_alloc();
1215         if (!mii_bus)
1216                 return -ENOMEM;
1217
1218         mii_bus->name = "rswitch_mii";
1219         sprintf(mii_bus->id, "etha%d", rdev->etha->index);
1220         mii_bus->priv = rdev->etha;
1221         mii_bus->read_c45 = rswitch_etha_mii_read_c45;
1222         mii_bus->write_c45 = rswitch_etha_mii_write_c45;
1223         mii_bus->parent = &rdev->priv->pdev->dev;
1224
1225         mdio_np = of_get_child_by_name(rdev->np_port, "mdio");
1226         err = of_mdiobus_register(mii_bus, mdio_np);
1227         if (err < 0) {
1228                 mdiobus_free(mii_bus);
1229                 goto out;
1230         }
1231
1232         rdev->etha->mii = mii_bus;
1233
1234 out:
1235         of_node_put(mdio_np);
1236
1237         return err;
1238 }
1239
1240 static void rswitch_mii_unregister(struct rswitch_device *rdev)
1241 {
1242         if (rdev->etha->mii) {
1243                 mdiobus_unregister(rdev->etha->mii);
1244                 mdiobus_free(rdev->etha->mii);
1245                 rdev->etha->mii = NULL;
1246         }
1247 }
1248
1249 static void rswitch_adjust_link(struct net_device *ndev)
1250 {
1251         struct rswitch_device *rdev = netdev_priv(ndev);
1252         struct phy_device *phydev = ndev->phydev;
1253
1254         if (phydev->link != rdev->etha->link) {
1255                 phy_print_status(phydev);
1256                 if (phydev->link)
1257                         phy_power_on(rdev->serdes);
1258                 else if (rdev->serdes->power_count)
1259                         phy_power_off(rdev->serdes);
1260
1261                 rdev->etha->link = phydev->link;
1262
1263                 if (!rdev->priv->etha_no_runtime_change &&
1264                     phydev->speed != rdev->etha->speed) {
1265                         rdev->etha->speed = phydev->speed;
1266
1267                         rswitch_etha_hw_init(rdev->etha, rdev->ndev->dev_addr);
1268                         phy_set_speed(rdev->serdes, rdev->etha->speed);
1269                 }
1270         }
1271 }
1272
1273 static void rswitch_phy_remove_link_mode(struct rswitch_device *rdev,
1274                                          struct phy_device *phydev)
1275 {
1276         if (!rdev->priv->etha_no_runtime_change)
1277                 return;
1278
1279         switch (rdev->etha->speed) {
1280         case SPEED_2500:
1281                 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Full_BIT);
1282                 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Full_BIT);
1283                 break;
1284         case SPEED_1000:
1285                 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_2500baseX_Full_BIT);
1286                 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Full_BIT);
1287                 break;
1288         case SPEED_100:
1289                 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_2500baseX_Full_BIT);
1290                 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Full_BIT);
1291                 break;
1292         default:
1293                 break;
1294         }
1295
1296         phy_set_max_speed(phydev, rdev->etha->speed);
1297 }
1298
1299 static int rswitch_phy_device_init(struct rswitch_device *rdev)
1300 {
1301         struct phy_device *phydev;
1302         struct device_node *phy;
1303         int err = -ENOENT;
1304
1305         if (!rdev->np_port)
1306                 return -ENODEV;
1307
1308         phy = of_parse_phandle(rdev->np_port, "phy-handle", 0);
1309         if (!phy)
1310                 return -ENODEV;
1311
1312         /* Set phydev->host_interfaces before calling of_phy_connect() to
1313          * configure the PHY with the information of host_interfaces.
1314          */
1315         phydev = of_phy_find_device(phy);
1316         if (!phydev)
1317                 goto out;
1318         __set_bit(rdev->etha->phy_interface, phydev->host_interfaces);
1319         phydev->mac_managed_pm = true;
1320
1321         phydev = of_phy_connect(rdev->ndev, phy, rswitch_adjust_link, 0,
1322                                 rdev->etha->phy_interface);
1323         if (!phydev)
1324                 goto out;
1325
1326         phy_set_max_speed(phydev, SPEED_2500);
1327         phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
1328         phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT);
1329         phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
1330         phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
1331         rswitch_phy_remove_link_mode(rdev, phydev);
1332
1333         phy_attached_info(phydev);
1334
1335         err = 0;
1336 out:
1337         of_node_put(phy);
1338
1339         return err;
1340 }
1341
1342 static void rswitch_phy_device_deinit(struct rswitch_device *rdev)
1343 {
1344         if (rdev->ndev->phydev)
1345                 phy_disconnect(rdev->ndev->phydev);
1346 }
1347
1348 static int rswitch_serdes_set_params(struct rswitch_device *rdev)
1349 {
1350         int err;
1351
1352         err = phy_set_mode_ext(rdev->serdes, PHY_MODE_ETHERNET,
1353                                rdev->etha->phy_interface);
1354         if (err < 0)
1355                 return err;
1356
1357         return phy_set_speed(rdev->serdes, rdev->etha->speed);
1358 }
1359
1360 static int rswitch_ether_port_init_one(struct rswitch_device *rdev)
1361 {
1362         int err;
1363
1364         if (!rdev->etha->operated) {
1365                 err = rswitch_etha_hw_init(rdev->etha, rdev->ndev->dev_addr);
1366                 if (err < 0)
1367                         return err;
1368                 if (rdev->priv->etha_no_runtime_change)
1369                         rdev->etha->operated = true;
1370         }
1371
1372         err = rswitch_mii_register(rdev);
1373         if (err < 0)
1374                 return err;
1375
1376         err = rswitch_phy_device_init(rdev);
1377         if (err < 0)
1378                 goto err_phy_device_init;
1379
1380         rdev->serdes = devm_of_phy_get(&rdev->priv->pdev->dev, rdev->np_port, NULL);
1381         if (IS_ERR(rdev->serdes)) {
1382                 err = PTR_ERR(rdev->serdes);
1383                 goto err_serdes_phy_get;
1384         }
1385
1386         err = rswitch_serdes_set_params(rdev);
1387         if (err < 0)
1388                 goto err_serdes_set_params;
1389
1390         return 0;
1391
1392 err_serdes_set_params:
1393 err_serdes_phy_get:
1394         rswitch_phy_device_deinit(rdev);
1395
1396 err_phy_device_init:
1397         rswitch_mii_unregister(rdev);
1398
1399         return err;
1400 }
1401
1402 static void rswitch_ether_port_deinit_one(struct rswitch_device *rdev)
1403 {
1404         rswitch_phy_device_deinit(rdev);
1405         rswitch_mii_unregister(rdev);
1406 }
1407
1408 static int rswitch_ether_port_init_all(struct rswitch_private *priv)
1409 {
1410         unsigned int i;
1411         int err;
1412
1413         rswitch_for_each_enabled_port(priv, i) {
1414                 err = rswitch_ether_port_init_one(priv->rdev[i]);
1415                 if (err)
1416                         goto err_init_one;
1417         }
1418
1419         rswitch_for_each_enabled_port(priv, i) {
1420                 err = phy_init(priv->rdev[i]->serdes);
1421                 if (err)
1422                         goto err_serdes;
1423         }
1424
1425         return 0;
1426
1427 err_serdes:
1428         rswitch_for_each_enabled_port_continue_reverse(priv, i)
1429                 phy_exit(priv->rdev[i]->serdes);
1430         i = RSWITCH_NUM_PORTS;
1431
1432 err_init_one:
1433         rswitch_for_each_enabled_port_continue_reverse(priv, i)
1434                 rswitch_ether_port_deinit_one(priv->rdev[i]);
1435
1436         return err;
1437 }
1438
1439 static void rswitch_ether_port_deinit_all(struct rswitch_private *priv)
1440 {
1441         int i;
1442
1443         for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
1444                 phy_exit(priv->rdev[i]->serdes);
1445                 rswitch_ether_port_deinit_one(priv->rdev[i]);
1446         }
1447 }
1448
1449 static int rswitch_open(struct net_device *ndev)
1450 {
1451         struct rswitch_device *rdev = netdev_priv(ndev);
1452         unsigned long flags;
1453
1454         phy_start(ndev->phydev);
1455
1456         napi_enable(&rdev->napi);
1457         netif_start_queue(ndev);
1458
1459         spin_lock_irqsave(&rdev->priv->lock, flags);
1460         rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, true);
1461         rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, true);
1462         spin_unlock_irqrestore(&rdev->priv->lock, flags);
1463
1464         if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS))
1465                 iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDIE);
1466
1467         bitmap_set(rdev->priv->opened_ports, rdev->port, 1);
1468
1469         return 0;
1470 };
1471
1472 static int rswitch_stop(struct net_device *ndev)
1473 {
1474         struct rswitch_device *rdev = netdev_priv(ndev);
1475         struct rswitch_gwca_ts_info *ts_info, *ts_info2;
1476         unsigned long flags;
1477
1478         netif_tx_stop_all_queues(ndev);
1479         bitmap_clear(rdev->priv->opened_ports, rdev->port, 1);
1480
1481         if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS))
1482                 iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDID);
1483
1484         list_for_each_entry_safe(ts_info, ts_info2, &rdev->priv->gwca.ts_info_list, list) {
1485                 if (ts_info->port != rdev->port)
1486                         continue;
1487                 dev_kfree_skb_irq(ts_info->skb);
1488                 list_del(&ts_info->list);
1489                 kfree(ts_info);
1490         }
1491
1492         spin_lock_irqsave(&rdev->priv->lock, flags);
1493         rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false);
1494         rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false);
1495         spin_unlock_irqrestore(&rdev->priv->lock, flags);
1496
1497         phy_stop(ndev->phydev);
1498         napi_disable(&rdev->napi);
1499
1500         return 0;
1501 };
1502
1503 static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1504 {
1505         struct rswitch_device *rdev = netdev_priv(ndev);
1506         struct rswitch_gwca_queue *gq = rdev->tx_queue;
1507         netdev_tx_t ret = NETDEV_TX_OK;
1508         struct rswitch_ext_desc *desc;
1509         dma_addr_t dma_addr;
1510
1511         if (rswitch_get_num_cur_queues(gq) >= gq->ring_size - 1) {
1512                 netif_stop_subqueue(ndev, 0);
1513                 return NETDEV_TX_BUSY;
1514         }
1515
1516         if (skb_put_padto(skb, ETH_ZLEN))
1517                 return ret;
1518
1519         dma_addr = dma_map_single(ndev->dev.parent, skb->data, skb->len, DMA_TO_DEVICE);
1520         if (dma_mapping_error(ndev->dev.parent, dma_addr)) {
1521                 dev_kfree_skb_any(skb);
1522                 return ret;
1523         }
1524
1525         gq->skbs[gq->cur] = skb;
1526         desc = &gq->tx_ring[gq->cur];
1527         rswitch_desc_set_dptr(&desc->desc, dma_addr);
1528         desc->desc.info_ds = cpu_to_le16(skb->len);
1529
1530         desc->info1 = cpu_to_le64(INFO1_DV(BIT(rdev->etha->index)) |
1531                                   INFO1_IPV(GWCA_IPV_NUM) | INFO1_FMT);
1532         if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
1533                 struct rswitch_gwca_ts_info *ts_info;
1534
1535                 ts_info = kzalloc(sizeof(*ts_info), GFP_ATOMIC);
1536                 if (!ts_info) {
1537                         dma_unmap_single(ndev->dev.parent, dma_addr, skb->len, DMA_TO_DEVICE);
1538                         return ret;
1539                 }
1540
1541                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1542                 rdev->ts_tag++;
1543                 desc->info1 |= cpu_to_le64(INFO1_TSUN(rdev->ts_tag) | INFO1_TXC);
1544
1545                 ts_info->skb = skb_get(skb);
1546                 ts_info->port = rdev->port;
1547                 ts_info->tag = rdev->ts_tag;
1548                 list_add_tail(&ts_info->list, &rdev->priv->gwca.ts_info_list);
1549
1550                 skb_tx_timestamp(skb);
1551         }
1552
1553         dma_wmb();
1554
1555         desc->desc.die_dt = DT_FSINGLE | DIE;
1556         wmb();  /* gq->cur must be incremented after die_dt was set */
1557
1558         gq->cur = rswitch_next_queue_index(gq, true, 1);
1559         rswitch_modify(rdev->addr, GWTRC(gq->index), 0, BIT(gq->index % 32));
1560
1561         return ret;
1562 }
1563
1564 static struct net_device_stats *rswitch_get_stats(struct net_device *ndev)
1565 {
1566         return &ndev->stats;
1567 }
1568
1569 static int rswitch_hwstamp_get(struct net_device *ndev, struct ifreq *req)
1570 {
1571         struct rswitch_device *rdev = netdev_priv(ndev);
1572         struct rcar_gen4_ptp_private *ptp_priv;
1573         struct hwtstamp_config config;
1574
1575         ptp_priv = rdev->priv->ptp_priv;
1576
1577         config.flags = 0;
1578         config.tx_type = ptp_priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON :
1579                                                     HWTSTAMP_TX_OFF;
1580         switch (ptp_priv->tstamp_rx_ctrl & RCAR_GEN4_RXTSTAMP_TYPE) {
1581         case RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT:
1582                 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
1583                 break;
1584         case RCAR_GEN4_RXTSTAMP_TYPE_ALL:
1585                 config.rx_filter = HWTSTAMP_FILTER_ALL;
1586                 break;
1587         default:
1588                 config.rx_filter = HWTSTAMP_FILTER_NONE;
1589                 break;
1590         }
1591
1592         return copy_to_user(req->ifr_data, &config, sizeof(config)) ? -EFAULT : 0;
1593 }
1594
1595 static int rswitch_hwstamp_set(struct net_device *ndev, struct ifreq *req)
1596 {
1597         struct rswitch_device *rdev = netdev_priv(ndev);
1598         u32 tstamp_rx_ctrl = RCAR_GEN4_RXTSTAMP_ENABLED;
1599         struct hwtstamp_config config;
1600         u32 tstamp_tx_ctrl;
1601
1602         if (copy_from_user(&config, req->ifr_data, sizeof(config)))
1603                 return -EFAULT;
1604
1605         if (config.flags)
1606                 return -EINVAL;
1607
1608         switch (config.tx_type) {
1609         case HWTSTAMP_TX_OFF:
1610                 tstamp_tx_ctrl = 0;
1611                 break;
1612         case HWTSTAMP_TX_ON:
1613                 tstamp_tx_ctrl = RCAR_GEN4_TXTSTAMP_ENABLED;
1614                 break;
1615         default:
1616                 return -ERANGE;
1617         }
1618
1619         switch (config.rx_filter) {
1620         case HWTSTAMP_FILTER_NONE:
1621                 tstamp_rx_ctrl = 0;
1622                 break;
1623         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1624                 tstamp_rx_ctrl |= RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT;
1625                 break;
1626         default:
1627                 config.rx_filter = HWTSTAMP_FILTER_ALL;
1628                 tstamp_rx_ctrl |= RCAR_GEN4_RXTSTAMP_TYPE_ALL;
1629                 break;
1630         }
1631
1632         rdev->priv->ptp_priv->tstamp_tx_ctrl = tstamp_tx_ctrl;
1633         rdev->priv->ptp_priv->tstamp_rx_ctrl = tstamp_rx_ctrl;
1634
1635         return copy_to_user(req->ifr_data, &config, sizeof(config)) ? -EFAULT : 0;
1636 }
1637
1638 static int rswitch_eth_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
1639 {
1640         if (!netif_running(ndev))
1641                 return -EINVAL;
1642
1643         switch (cmd) {
1644         case SIOCGHWTSTAMP:
1645                 return rswitch_hwstamp_get(ndev, req);
1646         case SIOCSHWTSTAMP:
1647                 return rswitch_hwstamp_set(ndev, req);
1648         default:
1649                 return phy_mii_ioctl(ndev->phydev, req, cmd);
1650         }
1651 }
1652
1653 static const struct net_device_ops rswitch_netdev_ops = {
1654         .ndo_open = rswitch_open,
1655         .ndo_stop = rswitch_stop,
1656         .ndo_start_xmit = rswitch_start_xmit,
1657         .ndo_get_stats = rswitch_get_stats,
1658         .ndo_eth_ioctl = rswitch_eth_ioctl,
1659         .ndo_validate_addr = eth_validate_addr,
1660         .ndo_set_mac_address = eth_mac_addr,
1661 };
1662
1663 static int rswitch_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info)
1664 {
1665         struct rswitch_device *rdev = netdev_priv(ndev);
1666
1667         info->phc_index = ptp_clock_index(rdev->priv->ptp_priv->clock);
1668         info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
1669                                 SOF_TIMESTAMPING_RX_SOFTWARE |
1670                                 SOF_TIMESTAMPING_SOFTWARE |
1671                                 SOF_TIMESTAMPING_TX_HARDWARE |
1672                                 SOF_TIMESTAMPING_RX_HARDWARE |
1673                                 SOF_TIMESTAMPING_RAW_HARDWARE;
1674         info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
1675         info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL);
1676
1677         return 0;
1678 }
1679
1680 static const struct ethtool_ops rswitch_ethtool_ops = {
1681         .get_ts_info = rswitch_get_ts_info,
1682         .get_link_ksettings = phy_ethtool_get_link_ksettings,
1683         .set_link_ksettings = phy_ethtool_set_link_ksettings,
1684 };
1685
1686 static const struct of_device_id renesas_eth_sw_of_table[] = {
1687         { .compatible = "renesas,r8a779f0-ether-switch", },
1688         { }
1689 };
1690 MODULE_DEVICE_TABLE(of, renesas_eth_sw_of_table);
1691
1692 static void rswitch_etha_init(struct rswitch_private *priv, int index)
1693 {
1694         struct rswitch_etha *etha = &priv->etha[index];
1695
1696         memset(etha, 0, sizeof(*etha));
1697         etha->index = index;
1698         etha->addr = priv->addr + RSWITCH_ETHA_OFFSET + index * RSWITCH_ETHA_SIZE;
1699         etha->coma_addr = priv->addr;
1700
1701         /* MPIC.PSMCS = (clk [MHz] / (MDC frequency [MHz] * 2) - 1.
1702          * Calculating PSMCS value as MDC frequency = 2.5MHz. So, multiply
1703          * both the numerator and the denominator by 10.
1704          */
1705         etha->psmcs = clk_get_rate(priv->clk) / 100000 / (25 * 2) - 1;
1706 }
1707
1708 static int rswitch_device_alloc(struct rswitch_private *priv, int index)
1709 {
1710         struct platform_device *pdev = priv->pdev;
1711         struct rswitch_device *rdev;
1712         struct net_device *ndev;
1713         int err;
1714
1715         if (index >= RSWITCH_NUM_PORTS)
1716                 return -EINVAL;
1717
1718         ndev = alloc_etherdev_mqs(sizeof(struct rswitch_device), 1, 1);
1719         if (!ndev)
1720                 return -ENOMEM;
1721
1722         SET_NETDEV_DEV(ndev, &pdev->dev);
1723         ether_setup(ndev);
1724
1725         rdev = netdev_priv(ndev);
1726         rdev->ndev = ndev;
1727         rdev->priv = priv;
1728         priv->rdev[index] = rdev;
1729         rdev->port = index;
1730         rdev->etha = &priv->etha[index];
1731         rdev->addr = priv->addr;
1732
1733         ndev->base_addr = (unsigned long)rdev->addr;
1734         snprintf(ndev->name, IFNAMSIZ, "tsn%d", index);
1735         ndev->netdev_ops = &rswitch_netdev_ops;
1736         ndev->ethtool_ops = &rswitch_ethtool_ops;
1737
1738         netif_napi_add(ndev, &rdev->napi, rswitch_poll);
1739
1740         rdev->np_port = rswitch_get_port_node(rdev);
1741         rdev->disabled = !rdev->np_port;
1742         err = of_get_ethdev_address(rdev->np_port, ndev);
1743         of_node_put(rdev->np_port);
1744         if (err) {
1745                 if (is_valid_ether_addr(rdev->etha->mac_addr))
1746                         eth_hw_addr_set(ndev, rdev->etha->mac_addr);
1747                 else
1748                         eth_hw_addr_random(ndev);
1749         }
1750
1751         err = rswitch_etha_get_params(rdev);
1752         if (err < 0)
1753                 goto out_get_params;
1754
1755         if (rdev->priv->gwca.speed < rdev->etha->speed)
1756                 rdev->priv->gwca.speed = rdev->etha->speed;
1757
1758         err = rswitch_rxdmac_alloc(ndev);
1759         if (err < 0)
1760                 goto out_rxdmac;
1761
1762         err = rswitch_txdmac_alloc(ndev);
1763         if (err < 0)
1764                 goto out_txdmac;
1765
1766         return 0;
1767
1768 out_txdmac:
1769         rswitch_rxdmac_free(ndev);
1770
1771 out_rxdmac:
1772 out_get_params:
1773         netif_napi_del(&rdev->napi);
1774         free_netdev(ndev);
1775
1776         return err;
1777 }
1778
1779 static void rswitch_device_free(struct rswitch_private *priv, int index)
1780 {
1781         struct rswitch_device *rdev = priv->rdev[index];
1782         struct net_device *ndev = rdev->ndev;
1783
1784         rswitch_txdmac_free(ndev);
1785         rswitch_rxdmac_free(ndev);
1786         netif_napi_del(&rdev->napi);
1787         free_netdev(ndev);
1788 }
1789
1790 static int rswitch_init(struct rswitch_private *priv)
1791 {
1792         unsigned int i;
1793         int err;
1794
1795         for (i = 0; i < RSWITCH_NUM_PORTS; i++)
1796                 rswitch_etha_init(priv, i);
1797
1798         rswitch_clock_enable(priv);
1799         for (i = 0; i < RSWITCH_NUM_PORTS; i++)
1800                 rswitch_etha_read_mac_address(&priv->etha[i]);
1801
1802         rswitch_reset(priv);
1803
1804         rswitch_clock_enable(priv);
1805         rswitch_top_init(priv);
1806         err = rswitch_bpool_config(priv);
1807         if (err < 0)
1808                 return err;
1809
1810         rswitch_coma_init(priv);
1811
1812         err = rswitch_gwca_linkfix_alloc(priv);
1813         if (err < 0)
1814                 return -ENOMEM;
1815
1816         err = rswitch_gwca_ts_queue_alloc(priv);
1817         if (err < 0)
1818                 goto err_ts_queue_alloc;
1819
1820         for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
1821                 err = rswitch_device_alloc(priv, i);
1822                 if (err < 0) {
1823                         for (; i-- > 0; )
1824                                 rswitch_device_free(priv, i);
1825                         goto err_device_alloc;
1826                 }
1827         }
1828
1829         rswitch_fwd_init(priv);
1830
1831         err = rcar_gen4_ptp_register(priv->ptp_priv, RCAR_GEN4_PTP_REG_LAYOUT_S4,
1832                                      RCAR_GEN4_PTP_CLOCK_S4);
1833         if (err < 0)
1834                 goto err_ptp_register;
1835
1836         err = rswitch_gwca_request_irqs(priv);
1837         if (err < 0)
1838                 goto err_gwca_request_irq;
1839
1840         err = rswitch_gwca_ts_request_irqs(priv);
1841         if (err < 0)
1842                 goto err_gwca_ts_request_irq;
1843
1844         err = rswitch_gwca_hw_init(priv);
1845         if (err < 0)
1846                 goto err_gwca_hw_init;
1847
1848         err = rswitch_ether_port_init_all(priv);
1849         if (err)
1850                 goto err_ether_port_init_all;
1851
1852         rswitch_for_each_enabled_port(priv, i) {
1853                 err = register_netdev(priv->rdev[i]->ndev);
1854                 if (err) {
1855                         rswitch_for_each_enabled_port_continue_reverse(priv, i)
1856                                 unregister_netdev(priv->rdev[i]->ndev);
1857                         goto err_register_netdev;
1858                 }
1859         }
1860
1861         rswitch_for_each_enabled_port(priv, i)
1862                 netdev_info(priv->rdev[i]->ndev, "MAC address %pM\n",
1863                             priv->rdev[i]->ndev->dev_addr);
1864
1865         return 0;
1866
1867 err_register_netdev:
1868         rswitch_ether_port_deinit_all(priv);
1869
1870 err_ether_port_init_all:
1871         rswitch_gwca_hw_deinit(priv);
1872
1873 err_gwca_hw_init:
1874 err_gwca_ts_request_irq:
1875 err_gwca_request_irq:
1876         rcar_gen4_ptp_unregister(priv->ptp_priv);
1877
1878 err_ptp_register:
1879         for (i = 0; i < RSWITCH_NUM_PORTS; i++)
1880                 rswitch_device_free(priv, i);
1881
1882 err_device_alloc:
1883         rswitch_gwca_ts_queue_free(priv);
1884
1885 err_ts_queue_alloc:
1886         rswitch_gwca_linkfix_free(priv);
1887
1888         return err;
1889 }
1890
1891 static const struct soc_device_attribute rswitch_soc_no_speed_change[]  = {
1892         { .soc_id = "r8a779f0", .revision = "ES1.0" },
1893         { /* Sentinel */ }
1894 };
1895
1896 static int renesas_eth_sw_probe(struct platform_device *pdev)
1897 {
1898         const struct soc_device_attribute *attr;
1899         struct rswitch_private *priv;
1900         struct resource *res;
1901         int ret;
1902
1903         res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "secure_base");
1904         if (!res) {
1905                 dev_err(&pdev->dev, "invalid resource\n");
1906                 return -EINVAL;
1907         }
1908
1909         priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
1910         if (!priv)
1911                 return -ENOMEM;
1912         spin_lock_init(&priv->lock);
1913
1914         priv->clk = devm_clk_get(&pdev->dev, NULL);
1915         if (IS_ERR(priv->clk))
1916                 return PTR_ERR(priv->clk);
1917
1918         attr = soc_device_match(rswitch_soc_no_speed_change);
1919         if (attr)
1920                 priv->etha_no_runtime_change = true;
1921
1922         priv->ptp_priv = rcar_gen4_ptp_alloc(pdev);
1923         if (!priv->ptp_priv)
1924                 return -ENOMEM;
1925
1926         platform_set_drvdata(pdev, priv);
1927         priv->pdev = pdev;
1928         priv->addr = devm_ioremap_resource(&pdev->dev, res);
1929         if (IS_ERR(priv->addr))
1930                 return PTR_ERR(priv->addr);
1931
1932         priv->ptp_priv->addr = priv->addr + RCAR_GEN4_GPTP_OFFSET_S4;
1933
1934         ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
1935         if (ret < 0) {
1936                 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1937                 if (ret < 0)
1938                         return ret;
1939         }
1940
1941         priv->gwca.index = AGENT_INDEX_GWCA;
1942         priv->gwca.num_queues = min(RSWITCH_NUM_PORTS * NUM_QUEUES_PER_NDEV,
1943                                     RSWITCH_MAX_NUM_QUEUES);
1944         priv->gwca.queues = devm_kcalloc(&pdev->dev, priv->gwca.num_queues,
1945                                          sizeof(*priv->gwca.queues), GFP_KERNEL);
1946         if (!priv->gwca.queues)
1947                 return -ENOMEM;
1948
1949         pm_runtime_enable(&pdev->dev);
1950         pm_runtime_get_sync(&pdev->dev);
1951
1952         ret = rswitch_init(priv);
1953         if (ret < 0) {
1954                 pm_runtime_put(&pdev->dev);
1955                 pm_runtime_disable(&pdev->dev);
1956                 return ret;
1957         }
1958
1959         device_set_wakeup_capable(&pdev->dev, 1);
1960
1961         return ret;
1962 }
1963
1964 static void rswitch_deinit(struct rswitch_private *priv)
1965 {
1966         unsigned int i;
1967
1968         rswitch_gwca_hw_deinit(priv);
1969         rcar_gen4_ptp_unregister(priv->ptp_priv);
1970
1971         rswitch_for_each_enabled_port(priv, i) {
1972                 struct rswitch_device *rdev = priv->rdev[i];
1973
1974                 unregister_netdev(rdev->ndev);
1975                 rswitch_ether_port_deinit_one(rdev);
1976                 phy_exit(priv->rdev[i]->serdes);
1977         }
1978
1979         for (i = 0; i < RSWITCH_NUM_PORTS; i++)
1980                 rswitch_device_free(priv, i);
1981
1982         rswitch_gwca_ts_queue_free(priv);
1983         rswitch_gwca_linkfix_free(priv);
1984
1985         rswitch_clock_disable(priv);
1986 }
1987
1988 static void renesas_eth_sw_remove(struct platform_device *pdev)
1989 {
1990         struct rswitch_private *priv = platform_get_drvdata(pdev);
1991
1992         rswitch_deinit(priv);
1993
1994         pm_runtime_put(&pdev->dev);
1995         pm_runtime_disable(&pdev->dev);
1996
1997         platform_set_drvdata(pdev, NULL);
1998 }
1999
2000 static int renesas_eth_sw_suspend(struct device *dev)
2001 {
2002         struct rswitch_private *priv = dev_get_drvdata(dev);
2003         struct net_device *ndev;
2004         unsigned int i;
2005
2006         rswitch_for_each_enabled_port(priv, i) {
2007                 ndev = priv->rdev[i]->ndev;
2008                 if (netif_running(ndev)) {
2009                         netif_device_detach(ndev);
2010                         rswitch_stop(ndev);
2011                 }
2012                 if (priv->rdev[i]->serdes->init_count)
2013                         phy_exit(priv->rdev[i]->serdes);
2014         }
2015
2016         return 0;
2017 }
2018
2019 static int renesas_eth_sw_resume(struct device *dev)
2020 {
2021         struct rswitch_private *priv = dev_get_drvdata(dev);
2022         struct net_device *ndev;
2023         unsigned int i;
2024
2025         rswitch_for_each_enabled_port(priv, i) {
2026                 phy_init(priv->rdev[i]->serdes);
2027                 ndev = priv->rdev[i]->ndev;
2028                 if (netif_running(ndev)) {
2029                         rswitch_open(ndev);
2030                         netif_device_attach(ndev);
2031                 }
2032         }
2033
2034         return 0;
2035 }
2036
2037 static DEFINE_SIMPLE_DEV_PM_OPS(renesas_eth_sw_pm_ops, renesas_eth_sw_suspend,
2038                                 renesas_eth_sw_resume);
2039
2040 static struct platform_driver renesas_eth_sw_driver_platform = {
2041         .probe = renesas_eth_sw_probe,
2042         .remove_new = renesas_eth_sw_remove,
2043         .driver = {
2044                 .name = "renesas_eth_sw",
2045                 .pm = pm_sleep_ptr(&renesas_eth_sw_pm_ops),
2046                 .of_match_table = renesas_eth_sw_of_table,
2047         }
2048 };
2049 module_platform_driver(renesas_eth_sw_driver_platform);
2050 MODULE_AUTHOR("Yoshihiro Shimoda");
2051 MODULE_DESCRIPTION("Renesas Ethernet Switch device driver");
2052 MODULE_LICENSE("GPL");