Merge branch 'for-5.6' of https://git.kernel.org/pub/scm/linux/kernel/git/broonie...
[linux-2.6-microblaze.git] / drivers / net / ethernet / ti / cpsw_ethtool.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Texas Instruments Ethernet Switch Driver ethtool intf
4  *
5  * Copyright (C) 2019 Texas Instruments
6  */
7
8 #include <linux/if_ether.h>
9 #include <linux/if_vlan.h>
10 #include <linux/kmemleak.h>
11 #include <linux/module.h>
12 #include <linux/netdevice.h>
13 #include <linux/net_tstamp.h>
14 #include <linux/phy.h>
15 #include <linux/pm_runtime.h>
16 #include <linux/skbuff.h>
17
18 #include "cpsw.h"
19 #include "cpts.h"
20 #include "cpsw_ale.h"
21 #include "cpsw_priv.h"
22 #include "davinci_cpdma.h"
23
24 struct cpsw_hw_stats {
25         u32     rxgoodframes;
26         u32     rxbroadcastframes;
27         u32     rxmulticastframes;
28         u32     rxpauseframes;
29         u32     rxcrcerrors;
30         u32     rxaligncodeerrors;
31         u32     rxoversizedframes;
32         u32     rxjabberframes;
33         u32     rxundersizedframes;
34         u32     rxfragments;
35         u32     __pad_0[2];
36         u32     rxoctets;
37         u32     txgoodframes;
38         u32     txbroadcastframes;
39         u32     txmulticastframes;
40         u32     txpauseframes;
41         u32     txdeferredframes;
42         u32     txcollisionframes;
43         u32     txsinglecollframes;
44         u32     txmultcollframes;
45         u32     txexcessivecollisions;
46         u32     txlatecollisions;
47         u32     txunderrun;
48         u32     txcarriersenseerrors;
49         u32     txoctets;
50         u32     octetframes64;
51         u32     octetframes65t127;
52         u32     octetframes128t255;
53         u32     octetframes256t511;
54         u32     octetframes512t1023;
55         u32     octetframes1024tup;
56         u32     netoctets;
57         u32     rxsofoverruns;
58         u32     rxmofoverruns;
59         u32     rxdmaoverruns;
60 };
61
62 struct cpsw_stats {
63         char stat_string[ETH_GSTRING_LEN];
64         int type;
65         int sizeof_stat;
66         int stat_offset;
67 };
68
69 enum {
70         CPSW_STATS,
71         CPDMA_RX_STATS,
72         CPDMA_TX_STATS,
73 };
74
75 #define CPSW_STAT(m)            CPSW_STATS,                             \
76                                 sizeof_field(struct cpsw_hw_stats, m), \
77                                 offsetof(struct cpsw_hw_stats, m)
78 #define CPDMA_RX_STAT(m)        CPDMA_RX_STATS,                            \
79                                 sizeof_field(struct cpdma_chan_stats, m), \
80                                 offsetof(struct cpdma_chan_stats, m)
81 #define CPDMA_TX_STAT(m)        CPDMA_TX_STATS,                            \
82                                 sizeof_field(struct cpdma_chan_stats, m), \
83                                 offsetof(struct cpdma_chan_stats, m)
84
85 static const struct cpsw_stats cpsw_gstrings_stats[] = {
86         { "Good Rx Frames", CPSW_STAT(rxgoodframes) },
87         { "Broadcast Rx Frames", CPSW_STAT(rxbroadcastframes) },
88         { "Multicast Rx Frames", CPSW_STAT(rxmulticastframes) },
89         { "Pause Rx Frames", CPSW_STAT(rxpauseframes) },
90         { "Rx CRC Errors", CPSW_STAT(rxcrcerrors) },
91         { "Rx Align/Code Errors", CPSW_STAT(rxaligncodeerrors) },
92         { "Oversize Rx Frames", CPSW_STAT(rxoversizedframes) },
93         { "Rx Jabbers", CPSW_STAT(rxjabberframes) },
94         { "Undersize (Short) Rx Frames", CPSW_STAT(rxundersizedframes) },
95         { "Rx Fragments", CPSW_STAT(rxfragments) },
96         { "Rx Octets", CPSW_STAT(rxoctets) },
97         { "Good Tx Frames", CPSW_STAT(txgoodframes) },
98         { "Broadcast Tx Frames", CPSW_STAT(txbroadcastframes) },
99         { "Multicast Tx Frames", CPSW_STAT(txmulticastframes) },
100         { "Pause Tx Frames", CPSW_STAT(txpauseframes) },
101         { "Deferred Tx Frames", CPSW_STAT(txdeferredframes) },
102         { "Collisions", CPSW_STAT(txcollisionframes) },
103         { "Single Collision Tx Frames", CPSW_STAT(txsinglecollframes) },
104         { "Multiple Collision Tx Frames", CPSW_STAT(txmultcollframes) },
105         { "Excessive Collisions", CPSW_STAT(txexcessivecollisions) },
106         { "Late Collisions", CPSW_STAT(txlatecollisions) },
107         { "Tx Underrun", CPSW_STAT(txunderrun) },
108         { "Carrier Sense Errors", CPSW_STAT(txcarriersenseerrors) },
109         { "Tx Octets", CPSW_STAT(txoctets) },
110         { "Rx + Tx 64 Octet Frames", CPSW_STAT(octetframes64) },
111         { "Rx + Tx 65-127 Octet Frames", CPSW_STAT(octetframes65t127) },
112         { "Rx + Tx 128-255 Octet Frames", CPSW_STAT(octetframes128t255) },
113         { "Rx + Tx 256-511 Octet Frames", CPSW_STAT(octetframes256t511) },
114         { "Rx + Tx 512-1023 Octet Frames", CPSW_STAT(octetframes512t1023) },
115         { "Rx + Tx 1024-Up Octet Frames", CPSW_STAT(octetframes1024tup) },
116         { "Net Octets", CPSW_STAT(netoctets) },
117         { "Rx Start of Frame Overruns", CPSW_STAT(rxsofoverruns) },
118         { "Rx Middle of Frame Overruns", CPSW_STAT(rxmofoverruns) },
119         { "Rx DMA Overruns", CPSW_STAT(rxdmaoverruns) },
120 };
121
122 static const struct cpsw_stats cpsw_gstrings_ch_stats[] = {
123         { "head_enqueue", CPDMA_RX_STAT(head_enqueue) },
124         { "tail_enqueue", CPDMA_RX_STAT(tail_enqueue) },
125         { "pad_enqueue", CPDMA_RX_STAT(pad_enqueue) },
126         { "misqueued", CPDMA_RX_STAT(misqueued) },
127         { "desc_alloc_fail", CPDMA_RX_STAT(desc_alloc_fail) },
128         { "pad_alloc_fail", CPDMA_RX_STAT(pad_alloc_fail) },
129         { "runt_receive_buf", CPDMA_RX_STAT(runt_receive_buff) },
130         { "runt_transmit_buf", CPDMA_RX_STAT(runt_transmit_buff) },
131         { "empty_dequeue", CPDMA_RX_STAT(empty_dequeue) },
132         { "busy_dequeue", CPDMA_RX_STAT(busy_dequeue) },
133         { "good_dequeue", CPDMA_RX_STAT(good_dequeue) },
134         { "requeue", CPDMA_RX_STAT(requeue) },
135         { "teardown_dequeue", CPDMA_RX_STAT(teardown_dequeue) },
136 };
137
138 #define CPSW_STATS_COMMON_LEN   ARRAY_SIZE(cpsw_gstrings_stats)
139 #define CPSW_STATS_CH_LEN       ARRAY_SIZE(cpsw_gstrings_ch_stats)
140
141 u32 cpsw_get_msglevel(struct net_device *ndev)
142 {
143         struct cpsw_priv *priv = netdev_priv(ndev);
144
145         return priv->msg_enable;
146 }
147
148 void cpsw_set_msglevel(struct net_device *ndev, u32 value)
149 {
150         struct cpsw_priv *priv = netdev_priv(ndev);
151
152         priv->msg_enable = value;
153 }
154
155 int cpsw_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal)
156 {
157         struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
158
159         coal->rx_coalesce_usecs = cpsw->coal_intvl;
160         return 0;
161 }
162
163 int cpsw_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal)
164 {
165         struct cpsw_priv *priv = netdev_priv(ndev);
166         u32 int_ctrl;
167         u32 num_interrupts = 0;
168         u32 prescale = 0;
169         u32 addnl_dvdr = 1;
170         u32 coal_intvl = 0;
171         struct cpsw_common *cpsw = priv->cpsw;
172
173         coal_intvl = coal->rx_coalesce_usecs;
174
175         int_ctrl =  readl(&cpsw->wr_regs->int_control);
176         prescale = cpsw->bus_freq_mhz * 4;
177
178         if (!coal->rx_coalesce_usecs) {
179                 int_ctrl &= ~(CPSW_INTPRESCALE_MASK | CPSW_INTPACEEN);
180                 goto update_return;
181         }
182
183         if (coal_intvl < CPSW_CMINTMIN_INTVL)
184                 coal_intvl = CPSW_CMINTMIN_INTVL;
185
186         if (coal_intvl > CPSW_CMINTMAX_INTVL) {
187                 /* Interrupt pacer works with 4us Pulse, we can
188                  * throttle further by dilating the 4us pulse.
189                  */
190                 addnl_dvdr = CPSW_INTPRESCALE_MASK / prescale;
191
192                 if (addnl_dvdr > 1) {
193                         prescale *= addnl_dvdr;
194                         if (coal_intvl > (CPSW_CMINTMAX_INTVL * addnl_dvdr))
195                                 coal_intvl = (CPSW_CMINTMAX_INTVL
196                                                 * addnl_dvdr);
197                 } else {
198                         addnl_dvdr = 1;
199                         coal_intvl = CPSW_CMINTMAX_INTVL;
200                 }
201         }
202
203         num_interrupts = (1000 * addnl_dvdr) / coal_intvl;
204         writel(num_interrupts, &cpsw->wr_regs->rx_imax);
205         writel(num_interrupts, &cpsw->wr_regs->tx_imax);
206
207         int_ctrl |= CPSW_INTPACEEN;
208         int_ctrl &= (~CPSW_INTPRESCALE_MASK);
209         int_ctrl |= (prescale & CPSW_INTPRESCALE_MASK);
210
211 update_return:
212         writel(int_ctrl, &cpsw->wr_regs->int_control);
213
214         cpsw_notice(priv, timer, "Set coalesce to %d usecs.\n", coal_intvl);
215         cpsw->coal_intvl = coal_intvl;
216
217         return 0;
218 }
219
220 int cpsw_get_sset_count(struct net_device *ndev, int sset)
221 {
222         struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
223
224         switch (sset) {
225         case ETH_SS_STATS:
226                 return (CPSW_STATS_COMMON_LEN +
227                        (cpsw->rx_ch_num + cpsw->tx_ch_num) *
228                        CPSW_STATS_CH_LEN);
229         default:
230                 return -EOPNOTSUPP;
231         }
232 }
233
234 static void cpsw_add_ch_strings(u8 **p, int ch_num, int rx_dir)
235 {
236         int ch_stats_len;
237         int line;
238         int i;
239
240         ch_stats_len = CPSW_STATS_CH_LEN * ch_num;
241         for (i = 0; i < ch_stats_len; i++) {
242                 line = i % CPSW_STATS_CH_LEN;
243                 snprintf(*p, ETH_GSTRING_LEN,
244                          "%s DMA chan %ld: %s", rx_dir ? "Rx" : "Tx",
245                          (long)(i / CPSW_STATS_CH_LEN),
246                          cpsw_gstrings_ch_stats[line].stat_string);
247                 *p += ETH_GSTRING_LEN;
248         }
249 }
250
251 void cpsw_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
252 {
253         struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
254         u8 *p = data;
255         int i;
256
257         switch (stringset) {
258         case ETH_SS_STATS:
259                 for (i = 0; i < CPSW_STATS_COMMON_LEN; i++) {
260                         memcpy(p, cpsw_gstrings_stats[i].stat_string,
261                                ETH_GSTRING_LEN);
262                         p += ETH_GSTRING_LEN;
263                 }
264
265                 cpsw_add_ch_strings(&p, cpsw->rx_ch_num, 1);
266                 cpsw_add_ch_strings(&p, cpsw->tx_ch_num, 0);
267                 break;
268         }
269 }
270
271 void cpsw_get_ethtool_stats(struct net_device *ndev,
272                             struct ethtool_stats *stats, u64 *data)
273 {
274         u8 *p;
275         struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
276         struct cpdma_chan_stats ch_stats;
277         int i, l, ch;
278
279         /* Collect Davinci CPDMA stats for Rx and Tx Channel */
280         for (l = 0; l < CPSW_STATS_COMMON_LEN; l++)
281                 data[l] = readl(cpsw->hw_stats +
282                                 cpsw_gstrings_stats[l].stat_offset);
283
284         for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
285                 cpdma_chan_get_stats(cpsw->rxv[ch].ch, &ch_stats);
286                 for (i = 0; i < CPSW_STATS_CH_LEN; i++, l++) {
287                         p = (u8 *)&ch_stats +
288                                 cpsw_gstrings_ch_stats[i].stat_offset;
289                         data[l] = *(u32 *)p;
290                 }
291         }
292
293         for (ch = 0; ch < cpsw->tx_ch_num; ch++) {
294                 cpdma_chan_get_stats(cpsw->txv[ch].ch, &ch_stats);
295                 for (i = 0; i < CPSW_STATS_CH_LEN; i++, l++) {
296                         p = (u8 *)&ch_stats +
297                                 cpsw_gstrings_ch_stats[i].stat_offset;
298                         data[l] = *(u32 *)p;
299                 }
300         }
301 }
302
303 void cpsw_get_pauseparam(struct net_device *ndev,
304                          struct ethtool_pauseparam *pause)
305 {
306         struct cpsw_priv *priv = netdev_priv(ndev);
307
308         pause->autoneg = AUTONEG_DISABLE;
309         pause->rx_pause = priv->rx_pause ? true : false;
310         pause->tx_pause = priv->tx_pause ? true : false;
311 }
312
313 void cpsw_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
314 {
315         struct cpsw_priv *priv = netdev_priv(ndev);
316         struct cpsw_common *cpsw = priv->cpsw;
317         int slave_no = cpsw_slave_index(cpsw, priv);
318
319         wol->supported = 0;
320         wol->wolopts = 0;
321
322         if (cpsw->slaves[slave_no].phy)
323                 phy_ethtool_get_wol(cpsw->slaves[slave_no].phy, wol);
324 }
325
326 int cpsw_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
327 {
328         struct cpsw_priv *priv = netdev_priv(ndev);
329         struct cpsw_common *cpsw = priv->cpsw;
330         int slave_no = cpsw_slave_index(cpsw, priv);
331
332         if (cpsw->slaves[slave_no].phy)
333                 return phy_ethtool_set_wol(cpsw->slaves[slave_no].phy, wol);
334         else
335                 return -EOPNOTSUPP;
336 }
337
338 int cpsw_get_regs_len(struct net_device *ndev)
339 {
340         struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
341
342         return cpsw->data.ale_entries * ALE_ENTRY_WORDS * sizeof(u32);
343 }
344
345 void cpsw_get_regs(struct net_device *ndev, struct ethtool_regs *regs, void *p)
346 {
347         u32 *reg = p;
348         struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
349
350         /* update CPSW IP version */
351         regs->version = cpsw->version;
352
353         cpsw_ale_dump(cpsw->ale, reg);
354 }
355
356 int cpsw_ethtool_op_begin(struct net_device *ndev)
357 {
358         struct cpsw_priv *priv = netdev_priv(ndev);
359         struct cpsw_common *cpsw = priv->cpsw;
360         int ret;
361
362         ret = pm_runtime_get_sync(cpsw->dev);
363         if (ret < 0) {
364                 cpsw_err(priv, drv, "ethtool begin failed %d\n", ret);
365                 pm_runtime_put_noidle(cpsw->dev);
366         }
367
368         return ret;
369 }
370
371 void cpsw_ethtool_op_complete(struct net_device *ndev)
372 {
373         struct cpsw_priv *priv = netdev_priv(ndev);
374         int ret;
375
376         ret = pm_runtime_put(priv->cpsw->dev);
377         if (ret < 0)
378                 cpsw_err(priv, drv, "ethtool complete failed %d\n", ret);
379 }
380
381 void cpsw_get_channels(struct net_device *ndev, struct ethtool_channels *ch)
382 {
383         struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
384
385         ch->max_rx = cpsw->quirk_irq ? 1 : CPSW_MAX_QUEUES;
386         ch->max_tx = cpsw->quirk_irq ? 1 : CPSW_MAX_QUEUES;
387         ch->max_combined = 0;
388         ch->max_other = 0;
389         ch->other_count = 0;
390         ch->rx_count = cpsw->rx_ch_num;
391         ch->tx_count = cpsw->tx_ch_num;
392         ch->combined_count = 0;
393 }
394
395 int cpsw_get_link_ksettings(struct net_device *ndev,
396                             struct ethtool_link_ksettings *ecmd)
397 {
398         struct cpsw_priv *priv = netdev_priv(ndev);
399         struct cpsw_common *cpsw = priv->cpsw;
400         int slave_no = cpsw_slave_index(cpsw, priv);
401
402         if (!cpsw->slaves[slave_no].phy)
403                 return -EOPNOTSUPP;
404
405         phy_ethtool_ksettings_get(cpsw->slaves[slave_no].phy, ecmd);
406         return 0;
407 }
408
409 int cpsw_set_link_ksettings(struct net_device *ndev,
410                             const struct ethtool_link_ksettings *ecmd)
411 {
412         struct cpsw_priv *priv = netdev_priv(ndev);
413         struct cpsw_common *cpsw = priv->cpsw;
414         int slave_no = cpsw_slave_index(cpsw, priv);
415
416         if (!cpsw->slaves[slave_no].phy)
417                 return -EOPNOTSUPP;
418
419         return phy_ethtool_ksettings_set(cpsw->slaves[slave_no].phy, ecmd);
420 }
421
422 int cpsw_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
423 {
424         struct cpsw_priv *priv = netdev_priv(ndev);
425         struct cpsw_common *cpsw = priv->cpsw;
426         int slave_no = cpsw_slave_index(cpsw, priv);
427
428         if (cpsw->slaves[slave_no].phy)
429                 return phy_ethtool_get_eee(cpsw->slaves[slave_no].phy, edata);
430         else
431                 return -EOPNOTSUPP;
432 }
433
434 int cpsw_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
435 {
436         struct cpsw_priv *priv = netdev_priv(ndev);
437         struct cpsw_common *cpsw = priv->cpsw;
438         int slave_no = cpsw_slave_index(cpsw, priv);
439
440         if (cpsw->slaves[slave_no].phy)
441                 return phy_ethtool_set_eee(cpsw->slaves[slave_no].phy, edata);
442         else
443                 return -EOPNOTSUPP;
444 }
445
446 int cpsw_nway_reset(struct net_device *ndev)
447 {
448         struct cpsw_priv *priv = netdev_priv(ndev);
449         struct cpsw_common *cpsw = priv->cpsw;
450         int slave_no = cpsw_slave_index(cpsw, priv);
451
452         if (cpsw->slaves[slave_no].phy)
453                 return genphy_restart_aneg(cpsw->slaves[slave_no].phy);
454         else
455                 return -EOPNOTSUPP;
456 }
457
458 static void cpsw_suspend_data_pass(struct net_device *ndev)
459 {
460         struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
461         int i;
462
463         /* Disable NAPI scheduling */
464         cpsw_intr_disable(cpsw);
465
466         /* Stop all transmit queues for every network device.
467          */
468         for (i = 0; i < cpsw->data.slaves; i++) {
469                 ndev = cpsw->slaves[i].ndev;
470                 if (!(ndev && netif_running(ndev)))
471                         continue;
472
473                 netif_tx_stop_all_queues(ndev);
474
475                 /* Barrier, so that stop_queue visible to other cpus */
476                 smp_mb__after_atomic();
477         }
478
479         /* Handle rest of tx packets and stop cpdma channels */
480         cpdma_ctlr_stop(cpsw->dma);
481 }
482
483 static int cpsw_resume_data_pass(struct net_device *ndev)
484 {
485         struct cpsw_priv *priv = netdev_priv(ndev);
486         struct cpsw_common *cpsw = priv->cpsw;
487         int i, ret;
488
489         /* After this receive is started */
490         if (cpsw->usage_count) {
491                 ret = cpsw_fill_rx_channels(priv);
492                 if (ret)
493                         return ret;
494
495                 cpdma_ctlr_start(cpsw->dma);
496                 cpsw_intr_enable(cpsw);
497         }
498
499         /* Resume transmit for every affected interface */
500         for (i = 0; i < cpsw->data.slaves; i++) {
501                 ndev = cpsw->slaves[i].ndev;
502                 if (ndev && netif_running(ndev))
503                         netif_tx_start_all_queues(ndev);
504         }
505
506         return 0;
507 }
508
509 static int cpsw_check_ch_settings(struct cpsw_common *cpsw,
510                                   struct ethtool_channels *ch)
511 {
512         if (cpsw->quirk_irq) {
513                 dev_err(cpsw->dev, "Maximum one tx/rx queue is allowed");
514                 return -EOPNOTSUPP;
515         }
516
517         if (ch->combined_count)
518                 return -EINVAL;
519
520         /* verify we have at least one channel in each direction */
521         if (!ch->rx_count || !ch->tx_count)
522                 return -EINVAL;
523
524         if (ch->rx_count > cpsw->data.channels ||
525             ch->tx_count > cpsw->data.channels)
526                 return -EINVAL;
527
528         return 0;
529 }
530
531 static int cpsw_update_channels_res(struct cpsw_priv *priv, int ch_num, int rx,
532                                     cpdma_handler_fn rx_handler)
533 {
534         struct cpsw_common *cpsw = priv->cpsw;
535         void (*handler)(void *, int, int);
536         struct netdev_queue *queue;
537         struct cpsw_vector *vec;
538         int ret, *ch, vch;
539
540         if (rx) {
541                 ch = &cpsw->rx_ch_num;
542                 vec = cpsw->rxv;
543                 handler = rx_handler;
544         } else {
545                 ch = &cpsw->tx_ch_num;
546                 vec = cpsw->txv;
547                 handler = cpsw_tx_handler;
548         }
549
550         while (*ch < ch_num) {
551                 vch = rx ? *ch : 7 - *ch;
552                 vec[*ch].ch = cpdma_chan_create(cpsw->dma, vch, handler, rx);
553                 queue = netdev_get_tx_queue(priv->ndev, *ch);
554                 queue->tx_maxrate = 0;
555
556                 if (IS_ERR(vec[*ch].ch))
557                         return PTR_ERR(vec[*ch].ch);
558
559                 if (!vec[*ch].ch)
560                         return -EINVAL;
561
562                 cpsw_info(priv, ifup, "created new %d %s channel\n", *ch,
563                           (rx ? "rx" : "tx"));
564                 (*ch)++;
565         }
566
567         while (*ch > ch_num) {
568                 (*ch)--;
569
570                 ret = cpdma_chan_destroy(vec[*ch].ch);
571                 if (ret)
572                         return ret;
573
574                 cpsw_info(priv, ifup, "destroyed %d %s channel\n", *ch,
575                           (rx ? "rx" : "tx"));
576         }
577
578         return 0;
579 }
580
581 static void cpsw_fail(struct cpsw_common *cpsw)
582 {
583         struct net_device *ndev;
584         int i;
585
586         for (i = 0; i < cpsw->data.slaves; i++) {
587                 ndev = cpsw->slaves[i].ndev;
588                 if (ndev)
589                         dev_close(ndev);
590         }
591 }
592
593 int cpsw_set_channels_common(struct net_device *ndev,
594                              struct ethtool_channels *chs,
595                              cpdma_handler_fn rx_handler)
596 {
597         struct cpsw_priv *priv = netdev_priv(ndev);
598         struct cpsw_common *cpsw = priv->cpsw;
599         struct net_device *sl_ndev;
600         int i, new_pools, ret;
601
602         ret = cpsw_check_ch_settings(cpsw, chs);
603         if (ret < 0)
604                 return ret;
605
606         cpsw_suspend_data_pass(ndev);
607
608         new_pools = (chs->rx_count != cpsw->rx_ch_num) && cpsw->usage_count;
609
610         ret = cpsw_update_channels_res(priv, chs->rx_count, 1, rx_handler);
611         if (ret)
612                 goto err;
613
614         ret = cpsw_update_channels_res(priv, chs->tx_count, 0, rx_handler);
615         if (ret)
616                 goto err;
617
618         for (i = 0; i < cpsw->data.slaves; i++) {
619                 sl_ndev = cpsw->slaves[i].ndev;
620                 if (!(sl_ndev && netif_running(sl_ndev)))
621                         continue;
622
623                 /* Inform stack about new count of queues */
624                 ret = netif_set_real_num_tx_queues(sl_ndev, cpsw->tx_ch_num);
625                 if (ret) {
626                         dev_err(priv->dev, "cannot set real number of tx queues\n");
627                         goto err;
628                 }
629
630                 ret = netif_set_real_num_rx_queues(sl_ndev, cpsw->rx_ch_num);
631                 if (ret) {
632                         dev_err(priv->dev, "cannot set real number of rx queues\n");
633                         goto err;
634                 }
635         }
636
637         cpsw_split_res(cpsw);
638
639         if (new_pools) {
640                 cpsw_destroy_xdp_rxqs(cpsw);
641                 ret = cpsw_create_xdp_rxqs(cpsw);
642                 if (ret)
643                         goto err;
644         }
645
646         ret = cpsw_resume_data_pass(ndev);
647         if (!ret)
648                 return 0;
649 err:
650         dev_err(priv->dev, "cannot update channels number, closing device\n");
651         cpsw_fail(cpsw);
652         return ret;
653 }
654
655 void cpsw_get_ringparam(struct net_device *ndev,
656                         struct ethtool_ringparam *ering)
657 {
658         struct cpsw_priv *priv = netdev_priv(ndev);
659         struct cpsw_common *cpsw = priv->cpsw;
660
661         /* not supported */
662         ering->tx_max_pending = cpsw->descs_pool_size - CPSW_MAX_QUEUES;
663         ering->tx_pending = cpdma_get_num_tx_descs(cpsw->dma);
664         ering->rx_max_pending = cpsw->descs_pool_size - CPSW_MAX_QUEUES;
665         ering->rx_pending = cpdma_get_num_rx_descs(cpsw->dma);
666 }
667
668 int cpsw_set_ringparam(struct net_device *ndev,
669                        struct ethtool_ringparam *ering)
670 {
671         struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
672         int descs_num, ret;
673
674         /* ignore ering->tx_pending - only rx_pending adjustment is supported */
675
676         if (ering->rx_mini_pending || ering->rx_jumbo_pending ||
677             ering->rx_pending < CPSW_MAX_QUEUES ||
678             ering->rx_pending > (cpsw->descs_pool_size - CPSW_MAX_QUEUES))
679                 return -EINVAL;
680
681         descs_num = cpdma_get_num_rx_descs(cpsw->dma);
682         if (ering->rx_pending == descs_num)
683                 return 0;
684
685         cpsw_suspend_data_pass(ndev);
686
687         ret = cpdma_set_num_rx_descs(cpsw->dma, ering->rx_pending);
688         if (ret) {
689                 if (cpsw_resume_data_pass(ndev))
690                         goto err;
691
692                 return ret;
693         }
694
695         if (cpsw->usage_count) {
696                 cpsw_destroy_xdp_rxqs(cpsw);
697                 ret = cpsw_create_xdp_rxqs(cpsw);
698                 if (ret)
699                         goto err;
700         }
701
702         ret = cpsw_resume_data_pass(ndev);
703         if (!ret)
704                 return 0;
705 err:
706         cpdma_set_num_rx_descs(cpsw->dma, descs_num);
707         dev_err(cpsw->dev, "cannot set ring params, closing device\n");
708         cpsw_fail(cpsw);
709         return ret;
710 }
711
712 #if IS_ENABLED(CONFIG_TI_CPTS)
713 int cpsw_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info)
714 {
715         struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
716
717         info->so_timestamping =
718                 SOF_TIMESTAMPING_TX_HARDWARE |
719                 SOF_TIMESTAMPING_TX_SOFTWARE |
720                 SOF_TIMESTAMPING_RX_HARDWARE |
721                 SOF_TIMESTAMPING_RX_SOFTWARE |
722                 SOF_TIMESTAMPING_SOFTWARE |
723                 SOF_TIMESTAMPING_RAW_HARDWARE;
724         info->phc_index = cpsw->cpts->phc_index;
725         info->tx_types =
726                 (1 << HWTSTAMP_TX_OFF) |
727                 (1 << HWTSTAMP_TX_ON);
728         info->rx_filters =
729                 (1 << HWTSTAMP_FILTER_NONE) |
730                 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
731                 (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
732         return 0;
733 }
734 #else
735 int cpsw_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info)
736 {
737         info->so_timestamping =
738                 SOF_TIMESTAMPING_TX_SOFTWARE |
739                 SOF_TIMESTAMPING_RX_SOFTWARE |
740                 SOF_TIMESTAMPING_SOFTWARE;
741         info->phc_index = -1;
742         info->tx_types = 0;
743         info->rx_filters = 0;
744         return 0;
745 }
746 #endif