dpaa2-eth: Add helper functions
[linux-2.6-microblaze.git] / drivers / net / ethernet / freescale / dpaa2 / dpaa2-ethtool.c
1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /* Copyright 2014-2016 Freescale Semiconductor Inc.
3  * Copyright 2016 NXP
4  */
5
6 #include <linux/net_tstamp.h>
7 #include <linux/nospec.h>
8
9 #include "dpni.h"       /* DPNI_LINK_OPT_* */
10 #include "dpaa2-eth.h"
11
12 /* To be kept in sync with DPNI statistics */
13 static char dpaa2_ethtool_stats[][ETH_GSTRING_LEN] = {
14         "[hw] rx frames",
15         "[hw] rx bytes",
16         "[hw] rx mcast frames",
17         "[hw] rx mcast bytes",
18         "[hw] rx bcast frames",
19         "[hw] rx bcast bytes",
20         "[hw] tx frames",
21         "[hw] tx bytes",
22         "[hw] tx mcast frames",
23         "[hw] tx mcast bytes",
24         "[hw] tx bcast frames",
25         "[hw] tx bcast bytes",
26         "[hw] rx filtered frames",
27         "[hw] rx discarded frames",
28         "[hw] rx nobuffer discards",
29         "[hw] tx discarded frames",
30         "[hw] tx confirmed frames",
31         "[hw] tx dequeued bytes",
32         "[hw] tx dequeued frames",
33         "[hw] tx rejected bytes",
34         "[hw] tx rejected frames",
35         "[hw] tx pending frames",
36 };
37
38 #define DPAA2_ETH_NUM_STATS     ARRAY_SIZE(dpaa2_ethtool_stats)
39
40 static char dpaa2_ethtool_extras[][ETH_GSTRING_LEN] = {
41         /* per-cpu stats */
42         "[drv] tx conf frames",
43         "[drv] tx conf bytes",
44         "[drv] tx sg frames",
45         "[drv] tx sg bytes",
46         "[drv] tx realloc frames",
47         "[drv] rx sg frames",
48         "[drv] rx sg bytes",
49         "[drv] enqueue portal busy",
50         /* Channel stats */
51         "[drv] dequeue portal busy",
52         "[drv] channel pull errors",
53         "[drv] cdan",
54         "[drv] xdp drop",
55         "[drv] xdp tx",
56         "[drv] xdp tx errors",
57         "[drv] xdp redirect",
58         /* FQ stats */
59         "[qbman] rx pending frames",
60         "[qbman] rx pending bytes",
61         "[qbman] tx conf pending frames",
62         "[qbman] tx conf pending bytes",
63         "[qbman] buffer count",
64 };
65
66 #define DPAA2_ETH_NUM_EXTRA_STATS       ARRAY_SIZE(dpaa2_ethtool_extras)
67
68 static void dpaa2_eth_get_drvinfo(struct net_device *net_dev,
69                                   struct ethtool_drvinfo *drvinfo)
70 {
71         struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
72
73         strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
74
75         snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
76                  "%u.%u", priv->dpni_ver_major, priv->dpni_ver_minor);
77
78         strlcpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
79                 sizeof(drvinfo->bus_info));
80 }
81
82 static int dpaa2_eth_nway_reset(struct net_device *net_dev)
83 {
84         struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
85
86         if (priv->mac)
87                 return phylink_ethtool_nway_reset(priv->mac->phylink);
88
89         return -EOPNOTSUPP;
90 }
91
92 static int
93 dpaa2_eth_get_link_ksettings(struct net_device *net_dev,
94                              struct ethtool_link_ksettings *link_settings)
95 {
96         struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
97
98         if (priv->mac)
99                 return phylink_ethtool_ksettings_get(priv->mac->phylink,
100                                                      link_settings);
101
102         link_settings->base.autoneg = AUTONEG_DISABLE;
103         if (!(priv->link_state.options & DPNI_LINK_OPT_HALF_DUPLEX))
104                 link_settings->base.duplex = DUPLEX_FULL;
105         link_settings->base.speed = priv->link_state.rate;
106
107         return 0;
108 }
109
110 static int
111 dpaa2_eth_set_link_ksettings(struct net_device *net_dev,
112                              const struct ethtool_link_ksettings *link_settings)
113 {
114         struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
115
116         if (!priv->mac)
117                 return -ENOTSUPP;
118
119         return phylink_ethtool_ksettings_set(priv->mac->phylink, link_settings);
120 }
121
122 static void dpaa2_eth_get_pauseparam(struct net_device *net_dev,
123                                      struct ethtool_pauseparam *pause)
124 {
125         struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
126         u64 link_options = priv->link_state.options;
127
128         if (priv->mac) {
129                 phylink_ethtool_get_pauseparam(priv->mac->phylink, pause);
130                 return;
131         }
132
133         pause->rx_pause = dpaa2_eth_rx_pause_enabled(link_options);
134         pause->tx_pause = dpaa2_eth_tx_pause_enabled(link_options);
135         pause->autoneg = AUTONEG_DISABLE;
136 }
137
138 static int dpaa2_eth_set_pauseparam(struct net_device *net_dev,
139                                     struct ethtool_pauseparam *pause)
140 {
141         struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
142         struct dpni_link_cfg cfg = {0};
143         int err;
144
145         if (!dpaa2_eth_has_pause_support(priv)) {
146                 netdev_info(net_dev, "No pause frame support for DPNI version < %d.%d\n",
147                             DPNI_PAUSE_VER_MAJOR, DPNI_PAUSE_VER_MINOR);
148                 return -EOPNOTSUPP;
149         }
150
151         if (priv->mac)
152                 return phylink_ethtool_set_pauseparam(priv->mac->phylink,
153                                                       pause);
154         if (pause->autoneg)
155                 return -EOPNOTSUPP;
156
157         cfg.rate = priv->link_state.rate;
158         cfg.options = priv->link_state.options;
159         if (pause->rx_pause)
160                 cfg.options |= DPNI_LINK_OPT_PAUSE;
161         else
162                 cfg.options &= ~DPNI_LINK_OPT_PAUSE;
163         if (!!pause->rx_pause ^ !!pause->tx_pause)
164                 cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
165         else
166                 cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
167
168         if (cfg.options == priv->link_state.options)
169                 return 0;
170
171         err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg);
172         if (err) {
173                 netdev_err(net_dev, "dpni_set_link_state failed\n");
174                 return err;
175         }
176
177         priv->link_state.options = cfg.options;
178
179         return 0;
180 }
181
182 static void dpaa2_eth_get_strings(struct net_device *netdev, u32 stringset,
183                                   u8 *data)
184 {
185         struct dpaa2_eth_priv *priv = netdev_priv(netdev);
186         u8 *p = data;
187         int i;
188
189         switch (stringset) {
190         case ETH_SS_STATS:
191                 for (i = 0; i < DPAA2_ETH_NUM_STATS; i++) {
192                         strlcpy(p, dpaa2_ethtool_stats[i], ETH_GSTRING_LEN);
193                         p += ETH_GSTRING_LEN;
194                 }
195                 for (i = 0; i < DPAA2_ETH_NUM_EXTRA_STATS; i++) {
196                         strlcpy(p, dpaa2_ethtool_extras[i], ETH_GSTRING_LEN);
197                         p += ETH_GSTRING_LEN;
198                 }
199                 if (priv->mac)
200                         dpaa2_mac_get_strings(p);
201                 break;
202         }
203 }
204
205 static int dpaa2_eth_get_sset_count(struct net_device *net_dev, int sset)
206 {
207         int num_ss_stats = DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS;
208         struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
209
210         switch (sset) {
211         case ETH_SS_STATS: /* ethtool_get_stats(), ethtool_get_drvinfo() */
212                 if (priv->mac)
213                         num_ss_stats += dpaa2_mac_get_sset_count();
214                 return num_ss_stats;
215         default:
216                 return -EOPNOTSUPP;
217         }
218 }
219
220 /** Fill in hardware counters, as returned by MC.
221  */
222 static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
223                                         struct ethtool_stats *stats,
224                                         u64 *data)
225 {
226         int i = 0;
227         int j, k, err;
228         int num_cnt;
229         union dpni_statistics dpni_stats;
230         u32 fcnt, bcnt;
231         u32 fcnt_rx_total = 0, fcnt_tx_total = 0;
232         u32 bcnt_rx_total = 0, bcnt_tx_total = 0;
233         u32 buf_cnt;
234         struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
235         struct dpaa2_eth_drv_stats *extras;
236         struct dpaa2_eth_ch_stats *ch_stats;
237         int dpni_stats_page_size[DPNI_STATISTICS_CNT] = {
238                 sizeof(dpni_stats.page_0),
239                 sizeof(dpni_stats.page_1),
240                 sizeof(dpni_stats.page_2),
241                 sizeof(dpni_stats.page_3),
242                 sizeof(dpni_stats.page_4),
243                 sizeof(dpni_stats.page_5),
244                 sizeof(dpni_stats.page_6),
245         };
246
247         memset(data, 0,
248                sizeof(u64) * (DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS));
249
250         /* Print standard counters, from DPNI statistics */
251         for (j = 0; j <= 6; j++) {
252                 /* We're not interested in pages 4 & 5 for now */
253                 if (j == 4 || j == 5)
254                         continue;
255                 err = dpni_get_statistics(priv->mc_io, 0, priv->mc_token,
256                                           j, &dpni_stats);
257                 if (err == -EINVAL)
258                         /* Older firmware versions don't support all pages */
259                         memset(&dpni_stats, 0, sizeof(dpni_stats));
260                 else if (err)
261                         netdev_warn(net_dev, "dpni_get_stats(%d) failed\n", j);
262
263                 num_cnt = dpni_stats_page_size[j] / sizeof(u64);
264                 for (k = 0; k < num_cnt; k++)
265                         *(data + i++) = dpni_stats.raw.counter[k];
266         }
267
268         /* Print per-cpu extra stats */
269         for_each_online_cpu(k) {
270                 extras = per_cpu_ptr(priv->percpu_extras, k);
271                 for (j = 0; j < sizeof(*extras) / sizeof(__u64); j++)
272                         *((__u64 *)data + i + j) += *((__u64 *)extras + j);
273         }
274         i += j;
275
276         /* Per-channel stats */
277         for (k = 0; k < priv->num_channels; k++) {
278                 ch_stats = &priv->channel[k]->stats;
279                 for (j = 0; j < sizeof(*ch_stats) / sizeof(__u64) - 1; j++)
280                         *((__u64 *)data + i + j) += *((__u64 *)ch_stats + j);
281         }
282         i += j;
283
284         for (j = 0; j < priv->num_fqs; j++) {
285                 /* Print FQ instantaneous counts */
286                 err = dpaa2_io_query_fq_count(NULL, priv->fq[j].fqid,
287                                               &fcnt, &bcnt);
288                 if (err) {
289                         netdev_warn(net_dev, "FQ query error %d", err);
290                         return;
291                 }
292
293                 if (priv->fq[j].type == DPAA2_TX_CONF_FQ) {
294                         fcnt_tx_total += fcnt;
295                         bcnt_tx_total += bcnt;
296                 } else {
297                         fcnt_rx_total += fcnt;
298                         bcnt_rx_total += bcnt;
299                 }
300         }
301
302         *(data + i++) = fcnt_rx_total;
303         *(data + i++) = bcnt_rx_total;
304         *(data + i++) = fcnt_tx_total;
305         *(data + i++) = bcnt_tx_total;
306
307         err = dpaa2_io_query_bp_count(NULL, priv->bpid, &buf_cnt);
308         if (err) {
309                 netdev_warn(net_dev, "Buffer count query error %d\n", err);
310                 return;
311         }
312         *(data + i++) = buf_cnt;
313
314         if (priv->mac)
315                 dpaa2_mac_get_ethtool_stats(priv->mac, data + i);
316 }
317
318 static int prep_eth_rule(struct ethhdr *eth_value, struct ethhdr *eth_mask,
319                          void *key, void *mask, u64 *fields)
320 {
321         int off;
322
323         if (eth_mask->h_proto) {
324                 off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
325                 *(__be16 *)(key + off) = eth_value->h_proto;
326                 *(__be16 *)(mask + off) = eth_mask->h_proto;
327                 *fields |= DPAA2_ETH_DIST_ETHTYPE;
328         }
329
330         if (!is_zero_ether_addr(eth_mask->h_source)) {
331                 off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_SA);
332                 ether_addr_copy(key + off, eth_value->h_source);
333                 ether_addr_copy(mask + off, eth_mask->h_source);
334                 *fields |= DPAA2_ETH_DIST_ETHSRC;
335         }
336
337         if (!is_zero_ether_addr(eth_mask->h_dest)) {
338                 off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_DA);
339                 ether_addr_copy(key + off, eth_value->h_dest);
340                 ether_addr_copy(mask + off, eth_mask->h_dest);
341                 *fields |= DPAA2_ETH_DIST_ETHDST;
342         }
343
344         return 0;
345 }
346
347 static int prep_uip_rule(struct ethtool_usrip4_spec *uip_value,
348                          struct ethtool_usrip4_spec *uip_mask,
349                          void *key, void *mask, u64 *fields)
350 {
351         int off;
352         u32 tmp_value, tmp_mask;
353
354         if (uip_mask->tos || uip_mask->ip_ver)
355                 return -EOPNOTSUPP;
356
357         if (uip_mask->ip4src) {
358                 off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_SRC);
359                 *(__be32 *)(key + off) = uip_value->ip4src;
360                 *(__be32 *)(mask + off) = uip_mask->ip4src;
361                 *fields |= DPAA2_ETH_DIST_IPSRC;
362         }
363
364         if (uip_mask->ip4dst) {
365                 off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_DST);
366                 *(__be32 *)(key + off) = uip_value->ip4dst;
367                 *(__be32 *)(mask + off) = uip_mask->ip4dst;
368                 *fields |= DPAA2_ETH_DIST_IPDST;
369         }
370
371         if (uip_mask->proto) {
372                 off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_PROTO);
373                 *(u8 *)(key + off) = uip_value->proto;
374                 *(u8 *)(mask + off) = uip_mask->proto;
375                 *fields |= DPAA2_ETH_DIST_IPPROTO;
376         }
377
378         if (uip_mask->l4_4_bytes) {
379                 tmp_value = be32_to_cpu(uip_value->l4_4_bytes);
380                 tmp_mask = be32_to_cpu(uip_mask->l4_4_bytes);
381
382                 off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
383                 *(__be16 *)(key + off) = htons(tmp_value >> 16);
384                 *(__be16 *)(mask + off) = htons(tmp_mask >> 16);
385                 *fields |= DPAA2_ETH_DIST_L4SRC;
386
387                 off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
388                 *(__be16 *)(key + off) = htons(tmp_value & 0xFFFF);
389                 *(__be16 *)(mask + off) = htons(tmp_mask & 0xFFFF);
390                 *fields |= DPAA2_ETH_DIST_L4DST;
391         }
392
393         /* Only apply the rule for IPv4 frames */
394         off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
395         *(__be16 *)(key + off) = htons(ETH_P_IP);
396         *(__be16 *)(mask + off) = htons(0xFFFF);
397         *fields |= DPAA2_ETH_DIST_ETHTYPE;
398
399         return 0;
400 }
401
402 static int prep_l4_rule(struct ethtool_tcpip4_spec *l4_value,
403                         struct ethtool_tcpip4_spec *l4_mask,
404                         void *key, void *mask, u8 l4_proto, u64 *fields)
405 {
406         int off;
407
408         if (l4_mask->tos)
409                 return -EOPNOTSUPP;
410
411         if (l4_mask->ip4src) {
412                 off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_SRC);
413                 *(__be32 *)(key + off) = l4_value->ip4src;
414                 *(__be32 *)(mask + off) = l4_mask->ip4src;
415                 *fields |= DPAA2_ETH_DIST_IPSRC;
416         }
417
418         if (l4_mask->ip4dst) {
419                 off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_DST);
420                 *(__be32 *)(key + off) = l4_value->ip4dst;
421                 *(__be32 *)(mask + off) = l4_mask->ip4dst;
422                 *fields |= DPAA2_ETH_DIST_IPDST;
423         }
424
425         if (l4_mask->psrc) {
426                 off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
427                 *(__be16 *)(key + off) = l4_value->psrc;
428                 *(__be16 *)(mask + off) = l4_mask->psrc;
429                 *fields |= DPAA2_ETH_DIST_L4SRC;
430         }
431
432         if (l4_mask->pdst) {
433                 off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
434                 *(__be16 *)(key + off) = l4_value->pdst;
435                 *(__be16 *)(mask + off) = l4_mask->pdst;
436                 *fields |= DPAA2_ETH_DIST_L4DST;
437         }
438
439         /* Only apply the rule for IPv4 frames with the specified L4 proto */
440         off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
441         *(__be16 *)(key + off) = htons(ETH_P_IP);
442         *(__be16 *)(mask + off) = htons(0xFFFF);
443         *fields |= DPAA2_ETH_DIST_ETHTYPE;
444
445         off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_PROTO);
446         *(u8 *)(key + off) = l4_proto;
447         *(u8 *)(mask + off) = 0xFF;
448         *fields |= DPAA2_ETH_DIST_IPPROTO;
449
450         return 0;
451 }
452
453 static int prep_ext_rule(struct ethtool_flow_ext *ext_value,
454                          struct ethtool_flow_ext *ext_mask,
455                          void *key, void *mask, u64 *fields)
456 {
457         int off;
458
459         if (ext_mask->vlan_etype)
460                 return -EOPNOTSUPP;
461
462         if (ext_mask->vlan_tci) {
463                 off = dpaa2_eth_cls_fld_off(NET_PROT_VLAN, NH_FLD_VLAN_TCI);
464                 *(__be16 *)(key + off) = ext_value->vlan_tci;
465                 *(__be16 *)(mask + off) = ext_mask->vlan_tci;
466                 *fields |= DPAA2_ETH_DIST_VLAN;
467         }
468
469         return 0;
470 }
471
472 static int prep_mac_ext_rule(struct ethtool_flow_ext *ext_value,
473                              struct ethtool_flow_ext *ext_mask,
474                              void *key, void *mask, u64 *fields)
475 {
476         int off;
477
478         if (!is_zero_ether_addr(ext_mask->h_dest)) {
479                 off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_DA);
480                 ether_addr_copy(key + off, ext_value->h_dest);
481                 ether_addr_copy(mask + off, ext_mask->h_dest);
482                 *fields |= DPAA2_ETH_DIST_ETHDST;
483         }
484
485         return 0;
486 }
487
488 static int prep_cls_rule(struct ethtool_rx_flow_spec *fs, void *key, void *mask,
489                          u64 *fields)
490 {
491         int err;
492
493         switch (fs->flow_type & 0xFF) {
494         case ETHER_FLOW:
495                 err = prep_eth_rule(&fs->h_u.ether_spec, &fs->m_u.ether_spec,
496                                     key, mask, fields);
497                 break;
498         case IP_USER_FLOW:
499                 err = prep_uip_rule(&fs->h_u.usr_ip4_spec,
500                                     &fs->m_u.usr_ip4_spec, key, mask, fields);
501                 break;
502         case TCP_V4_FLOW:
503                 err = prep_l4_rule(&fs->h_u.tcp_ip4_spec, &fs->m_u.tcp_ip4_spec,
504                                    key, mask, IPPROTO_TCP, fields);
505                 break;
506         case UDP_V4_FLOW:
507                 err = prep_l4_rule(&fs->h_u.udp_ip4_spec, &fs->m_u.udp_ip4_spec,
508                                    key, mask, IPPROTO_UDP, fields);
509                 break;
510         case SCTP_V4_FLOW:
511                 err = prep_l4_rule(&fs->h_u.sctp_ip4_spec,
512                                    &fs->m_u.sctp_ip4_spec, key, mask,
513                                    IPPROTO_SCTP, fields);
514                 break;
515         default:
516                 return -EOPNOTSUPP;
517         }
518
519         if (err)
520                 return err;
521
522         if (fs->flow_type & FLOW_EXT) {
523                 err = prep_ext_rule(&fs->h_ext, &fs->m_ext, key, mask, fields);
524                 if (err)
525                         return err;
526         }
527
528         if (fs->flow_type & FLOW_MAC_EXT) {
529                 err = prep_mac_ext_rule(&fs->h_ext, &fs->m_ext, key, mask,
530                                         fields);
531                 if (err)
532                         return err;
533         }
534
535         return 0;
536 }
537
538 static int do_cls_rule(struct net_device *net_dev,
539                        struct ethtool_rx_flow_spec *fs,
540                        bool add)
541 {
542         struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
543         struct device *dev = net_dev->dev.parent;
544         struct dpni_rule_cfg rule_cfg = { 0 };
545         struct dpni_fs_action_cfg fs_act = { 0 };
546         dma_addr_t key_iova;
547         u64 fields = 0;
548         void *key_buf;
549         int i, err;
550
551         if (fs->ring_cookie != RX_CLS_FLOW_DISC &&
552             fs->ring_cookie >= dpaa2_eth_queue_count(priv))
553                 return -EINVAL;
554
555         rule_cfg.key_size = dpaa2_eth_cls_key_size(DPAA2_ETH_DIST_ALL);
556
557         /* allocate twice the key size, for the actual key and for mask */
558         key_buf = kzalloc(rule_cfg.key_size * 2, GFP_KERNEL);
559         if (!key_buf)
560                 return -ENOMEM;
561
562         /* Fill the key and mask memory areas */
563         err = prep_cls_rule(fs, key_buf, key_buf + rule_cfg.key_size, &fields);
564         if (err)
565                 goto free_mem;
566
567         if (!dpaa2_eth_fs_mask_enabled(priv)) {
568                 /* Masking allows us to configure a maximal key during init and
569                  * use it for all flow steering rules. Without it, we include
570                  * in the key only the fields actually used, so we need to
571                  * extract the others from the final key buffer.
572                  *
573                  * Program the FS key if needed, or return error if previously
574                  * set key can't be used for the current rule. User needs to
575                  * delete existing rules in this case to allow for the new one.
576                  */
577                 if (!priv->rx_cls_fields) {
578                         err = dpaa2_eth_set_cls(net_dev, fields);
579                         if (err)
580                                 goto free_mem;
581
582                         priv->rx_cls_fields = fields;
583                 } else if (priv->rx_cls_fields != fields) {
584                         netdev_err(net_dev, "No support for multiple FS keys, need to delete existing rules\n");
585                         err = -EOPNOTSUPP;
586                         goto free_mem;
587                 }
588
589                 dpaa2_eth_cls_trim_rule(key_buf, fields);
590                 rule_cfg.key_size = dpaa2_eth_cls_key_size(fields);
591         }
592
593         key_iova = dma_map_single(dev, key_buf, rule_cfg.key_size * 2,
594                                   DMA_TO_DEVICE);
595         if (dma_mapping_error(dev, key_iova)) {
596                 err = -ENOMEM;
597                 goto free_mem;
598         }
599
600         rule_cfg.key_iova = key_iova;
601         if (dpaa2_eth_fs_mask_enabled(priv))
602                 rule_cfg.mask_iova = key_iova + rule_cfg.key_size;
603
604         if (add) {
605                 if (fs->ring_cookie == RX_CLS_FLOW_DISC)
606                         fs_act.options |= DPNI_FS_OPT_DISCARD;
607                 else
608                         fs_act.flow_id = fs->ring_cookie;
609         }
610         for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
611                 if (add)
612                         err = dpni_add_fs_entry(priv->mc_io, 0, priv->mc_token,
613                                                 i, fs->location, &rule_cfg,
614                                                 &fs_act);
615                 else
616                         err = dpni_remove_fs_entry(priv->mc_io, 0,
617                                                    priv->mc_token, i,
618                                                    &rule_cfg);
619                 if (err)
620                         break;
621         }
622
623         dma_unmap_single(dev, key_iova, rule_cfg.key_size * 2, DMA_TO_DEVICE);
624
625 free_mem:
626         kfree(key_buf);
627
628         return err;
629 }
630
631 static int num_rules(struct dpaa2_eth_priv *priv)
632 {
633         int i, rules = 0;
634
635         for (i = 0; i < dpaa2_eth_fs_count(priv); i++)
636                 if (priv->cls_rules[i].in_use)
637                         rules++;
638
639         return rules;
640 }
641
642 static int update_cls_rule(struct net_device *net_dev,
643                            struct ethtool_rx_flow_spec *new_fs,
644                            unsigned int location)
645 {
646         struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
647         struct dpaa2_eth_cls_rule *rule;
648         int err = -EINVAL;
649
650         if (!priv->rx_cls_enabled)
651                 return -EOPNOTSUPP;
652
653         if (location >= dpaa2_eth_fs_count(priv))
654                 return -EINVAL;
655
656         rule = &priv->cls_rules[location];
657
658         /* If a rule is present at the specified location, delete it. */
659         if (rule->in_use) {
660                 err = do_cls_rule(net_dev, &rule->fs, false);
661                 if (err)
662                         return err;
663
664                 rule->in_use = 0;
665
666                 if (!dpaa2_eth_fs_mask_enabled(priv) && !num_rules(priv))
667                         priv->rx_cls_fields = 0;
668         }
669
670         /* If no new entry to add, return here */
671         if (!new_fs)
672                 return err;
673
674         err = do_cls_rule(net_dev, new_fs, true);
675         if (err)
676                 return err;
677
678         rule->in_use = 1;
679         rule->fs = *new_fs;
680
681         return 0;
682 }
683
684 static int dpaa2_eth_get_rxnfc(struct net_device *net_dev,
685                                struct ethtool_rxnfc *rxnfc, u32 *rule_locs)
686 {
687         struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
688         int max_rules = dpaa2_eth_fs_count(priv);
689         int i, j = 0;
690
691         switch (rxnfc->cmd) {
692         case ETHTOOL_GRXFH:
693                 /* we purposely ignore cmd->flow_type for now, because the
694                  * classifier only supports a single set of fields for all
695                  * protocols
696                  */
697                 rxnfc->data = priv->rx_hash_fields;
698                 break;
699         case ETHTOOL_GRXRINGS:
700                 rxnfc->data = dpaa2_eth_queue_count(priv);
701                 break;
702         case ETHTOOL_GRXCLSRLCNT:
703                 rxnfc->rule_cnt = 0;
704                 rxnfc->rule_cnt = num_rules(priv);
705                 rxnfc->data = max_rules;
706                 break;
707         case ETHTOOL_GRXCLSRULE:
708                 if (rxnfc->fs.location >= max_rules)
709                         return -EINVAL;
710                 rxnfc->fs.location = array_index_nospec(rxnfc->fs.location,
711                                                         max_rules);
712                 if (!priv->cls_rules[rxnfc->fs.location].in_use)
713                         return -EINVAL;
714                 rxnfc->fs = priv->cls_rules[rxnfc->fs.location].fs;
715                 break;
716         case ETHTOOL_GRXCLSRLALL:
717                 for (i = 0; i < max_rules; i++) {
718                         if (!priv->cls_rules[i].in_use)
719                                 continue;
720                         if (j == rxnfc->rule_cnt)
721                                 return -EMSGSIZE;
722                         rule_locs[j++] = i;
723                 }
724                 rxnfc->rule_cnt = j;
725                 rxnfc->data = max_rules;
726                 break;
727         default:
728                 return -EOPNOTSUPP;
729         }
730
731         return 0;
732 }
733
734 static int dpaa2_eth_set_rxnfc(struct net_device *net_dev,
735                                struct ethtool_rxnfc *rxnfc)
736 {
737         int err = 0;
738
739         switch (rxnfc->cmd) {
740         case ETHTOOL_SRXFH:
741                 if ((rxnfc->data & DPAA2_RXH_SUPPORTED) != rxnfc->data)
742                         return -EOPNOTSUPP;
743                 err = dpaa2_eth_set_hash(net_dev, rxnfc->data);
744                 break;
745         case ETHTOOL_SRXCLSRLINS:
746                 err = update_cls_rule(net_dev, &rxnfc->fs, rxnfc->fs.location);
747                 break;
748         case ETHTOOL_SRXCLSRLDEL:
749                 err = update_cls_rule(net_dev, NULL, rxnfc->fs.location);
750                 break;
751         default:
752                 err = -EOPNOTSUPP;
753         }
754
755         return err;
756 }
757
758 int dpaa2_phc_index = -1;
759 EXPORT_SYMBOL(dpaa2_phc_index);
760
761 static int dpaa2_eth_get_ts_info(struct net_device *dev,
762                                  struct ethtool_ts_info *info)
763 {
764         info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
765                                 SOF_TIMESTAMPING_RX_HARDWARE |
766                                 SOF_TIMESTAMPING_RAW_HARDWARE;
767
768         info->phc_index = dpaa2_phc_index;
769
770         info->tx_types = (1 << HWTSTAMP_TX_OFF) |
771                          (1 << HWTSTAMP_TX_ON);
772
773         info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
774                            (1 << HWTSTAMP_FILTER_ALL);
775         return 0;
776 }
777
778 const struct ethtool_ops dpaa2_ethtool_ops = {
779         .get_drvinfo = dpaa2_eth_get_drvinfo,
780         .nway_reset = dpaa2_eth_nway_reset,
781         .get_link = ethtool_op_get_link,
782         .get_link_ksettings = dpaa2_eth_get_link_ksettings,
783         .set_link_ksettings = dpaa2_eth_set_link_ksettings,
784         .get_pauseparam = dpaa2_eth_get_pauseparam,
785         .set_pauseparam = dpaa2_eth_set_pauseparam,
786         .get_sset_count = dpaa2_eth_get_sset_count,
787         .get_ethtool_stats = dpaa2_eth_get_ethtool_stats,
788         .get_strings = dpaa2_eth_get_strings,
789         .get_rxnfc = dpaa2_eth_get_rxnfc,
790         .set_rxnfc = dpaa2_eth_set_rxnfc,
791         .get_ts_info = dpaa2_eth_get_ts_info,
792 };