2 * Copyright 2015 Amazon.com, Inc. or its affiliates.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/pci.h>
35 #include "ena_netdev.h"
38 char name[ETH_GSTRING_LEN];
42 #define ENA_STAT_ENA_COM_ENTRY(stat) { \
44 .stat_offset = offsetof(struct ena_com_stats_admin, stat) / sizeof(u64) \
47 #define ENA_STAT_ENTRY(stat, stat_type) { \
49 .stat_offset = offsetof(struct ena_stats_##stat_type, stat) / sizeof(u64) \
52 #define ENA_STAT_RX_ENTRY(stat) \
53 ENA_STAT_ENTRY(stat, rx)
55 #define ENA_STAT_TX_ENTRY(stat) \
56 ENA_STAT_ENTRY(stat, tx)
58 #define ENA_STAT_GLOBAL_ENTRY(stat) \
59 ENA_STAT_ENTRY(stat, dev)
61 static const struct ena_stats ena_stats_global_strings[] = {
62 ENA_STAT_GLOBAL_ENTRY(tx_timeout),
63 ENA_STAT_GLOBAL_ENTRY(suspend),
64 ENA_STAT_GLOBAL_ENTRY(resume),
65 ENA_STAT_GLOBAL_ENTRY(wd_expired),
66 ENA_STAT_GLOBAL_ENTRY(interface_up),
67 ENA_STAT_GLOBAL_ENTRY(interface_down),
68 ENA_STAT_GLOBAL_ENTRY(admin_q_pause),
71 static const struct ena_stats ena_stats_tx_strings[] = {
72 ENA_STAT_TX_ENTRY(cnt),
73 ENA_STAT_TX_ENTRY(bytes),
74 ENA_STAT_TX_ENTRY(queue_stop),
75 ENA_STAT_TX_ENTRY(queue_wakeup),
76 ENA_STAT_TX_ENTRY(dma_mapping_err),
77 ENA_STAT_TX_ENTRY(linearize),
78 ENA_STAT_TX_ENTRY(linearize_failed),
79 ENA_STAT_TX_ENTRY(napi_comp),
80 ENA_STAT_TX_ENTRY(tx_poll),
81 ENA_STAT_TX_ENTRY(doorbells),
82 ENA_STAT_TX_ENTRY(prepare_ctx_err),
83 ENA_STAT_TX_ENTRY(bad_req_id),
84 ENA_STAT_TX_ENTRY(llq_buffer_copy),
85 ENA_STAT_TX_ENTRY(missed_tx),
86 ENA_STAT_TX_ENTRY(unmask_interrupt),
89 static const struct ena_stats ena_stats_rx_strings[] = {
90 ENA_STAT_RX_ENTRY(cnt),
91 ENA_STAT_RX_ENTRY(bytes),
92 ENA_STAT_RX_ENTRY(rx_copybreak_pkt),
93 ENA_STAT_RX_ENTRY(csum_good),
94 ENA_STAT_RX_ENTRY(refil_partial),
95 ENA_STAT_RX_ENTRY(bad_csum),
96 ENA_STAT_RX_ENTRY(page_alloc_fail),
97 ENA_STAT_RX_ENTRY(skb_alloc_fail),
98 ENA_STAT_RX_ENTRY(dma_mapping_err),
99 ENA_STAT_RX_ENTRY(bad_desc_num),
100 ENA_STAT_RX_ENTRY(bad_req_id),
101 ENA_STAT_RX_ENTRY(empty_rx_ring),
102 ENA_STAT_RX_ENTRY(csum_unchecked),
105 static const struct ena_stats ena_stats_ena_com_strings[] = {
106 ENA_STAT_ENA_COM_ENTRY(aborted_cmd),
107 ENA_STAT_ENA_COM_ENTRY(submitted_cmd),
108 ENA_STAT_ENA_COM_ENTRY(completed_cmd),
109 ENA_STAT_ENA_COM_ENTRY(out_of_space),
110 ENA_STAT_ENA_COM_ENTRY(no_completion),
113 #define ENA_STATS_ARRAY_GLOBAL ARRAY_SIZE(ena_stats_global_strings)
114 #define ENA_STATS_ARRAY_TX ARRAY_SIZE(ena_stats_tx_strings)
115 #define ENA_STATS_ARRAY_RX ARRAY_SIZE(ena_stats_rx_strings)
116 #define ENA_STATS_ARRAY_ENA_COM ARRAY_SIZE(ena_stats_ena_com_strings)
118 static void ena_safe_update_stat(u64 *src, u64 *dst,
119 struct u64_stats_sync *syncp)
124 start = u64_stats_fetch_begin_irq(syncp);
126 } while (u64_stats_fetch_retry_irq(syncp, start));
129 static void ena_queue_stats(struct ena_adapter *adapter, u64 **data)
131 const struct ena_stats *ena_stats;
132 struct ena_ring *ring;
137 for (i = 0; i < adapter->num_io_queues; i++) {
139 ring = &adapter->tx_ring[i];
141 for (j = 0; j < ENA_STATS_ARRAY_TX; j++) {
142 ena_stats = &ena_stats_tx_strings[j];
144 ptr = (u64 *)&ring->tx_stats + ena_stats->stat_offset;
146 ena_safe_update_stat(ptr, (*data)++, &ring->syncp);
150 ring = &adapter->rx_ring[i];
152 for (j = 0; j < ENA_STATS_ARRAY_RX; j++) {
153 ena_stats = &ena_stats_rx_strings[j];
155 ptr = (u64 *)&ring->rx_stats +
156 ena_stats->stat_offset;
158 ena_safe_update_stat(ptr, (*data)++, &ring->syncp);
163 static void ena_dev_admin_queue_stats(struct ena_adapter *adapter, u64 **data)
165 const struct ena_stats *ena_stats;
169 for (i = 0; i < ENA_STATS_ARRAY_ENA_COM; i++) {
170 ena_stats = &ena_stats_ena_com_strings[i];
172 ptr = (u64 *)&adapter->ena_dev->admin_queue.stats +
173 ena_stats->stat_offset;
179 static void ena_get_ethtool_stats(struct net_device *netdev,
180 struct ethtool_stats *stats,
183 struct ena_adapter *adapter = netdev_priv(netdev);
184 const struct ena_stats *ena_stats;
188 for (i = 0; i < ENA_STATS_ARRAY_GLOBAL; i++) {
189 ena_stats = &ena_stats_global_strings[i];
191 ptr = (u64 *)&adapter->dev_stats + ena_stats->stat_offset;
193 ena_safe_update_stat(ptr, data++, &adapter->syncp);
196 ena_queue_stats(adapter, &data);
197 ena_dev_admin_queue_stats(adapter, &data);
200 int ena_get_sset_count(struct net_device *netdev, int sset)
202 struct ena_adapter *adapter = netdev_priv(netdev);
204 if (sset != ETH_SS_STATS)
207 return adapter->num_io_queues * (ENA_STATS_ARRAY_TX + ENA_STATS_ARRAY_RX)
208 + ENA_STATS_ARRAY_GLOBAL + ENA_STATS_ARRAY_ENA_COM;
211 static void ena_queue_strings(struct ena_adapter *adapter, u8 **data)
213 const struct ena_stats *ena_stats;
216 for (i = 0; i < adapter->num_io_queues; i++) {
218 for (j = 0; j < ENA_STATS_ARRAY_TX; j++) {
219 ena_stats = &ena_stats_tx_strings[j];
221 snprintf(*data, ETH_GSTRING_LEN,
222 "queue_%u_tx_%s", i, ena_stats->name);
223 (*data) += ETH_GSTRING_LEN;
226 for (j = 0; j < ENA_STATS_ARRAY_RX; j++) {
227 ena_stats = &ena_stats_rx_strings[j];
229 snprintf(*data, ETH_GSTRING_LEN,
230 "queue_%u_rx_%s", i, ena_stats->name);
231 (*data) += ETH_GSTRING_LEN;
236 static void ena_com_dev_strings(u8 **data)
238 const struct ena_stats *ena_stats;
241 for (i = 0; i < ENA_STATS_ARRAY_ENA_COM; i++) {
242 ena_stats = &ena_stats_ena_com_strings[i];
244 snprintf(*data, ETH_GSTRING_LEN,
245 "ena_admin_q_%s", ena_stats->name);
246 (*data) += ETH_GSTRING_LEN;
250 static void ena_get_strings(struct net_device *netdev, u32 sset, u8 *data)
252 struct ena_adapter *adapter = netdev_priv(netdev);
253 const struct ena_stats *ena_stats;
256 if (sset != ETH_SS_STATS)
259 for (i = 0; i < ENA_STATS_ARRAY_GLOBAL; i++) {
260 ena_stats = &ena_stats_global_strings[i];
261 memcpy(data, ena_stats->name, ETH_GSTRING_LEN);
262 data += ETH_GSTRING_LEN;
265 ena_queue_strings(adapter, &data);
266 ena_com_dev_strings(&data);
269 static int ena_get_link_ksettings(struct net_device *netdev,
270 struct ethtool_link_ksettings *link_ksettings)
272 struct ena_adapter *adapter = netdev_priv(netdev);
273 struct ena_com_dev *ena_dev = adapter->ena_dev;
274 struct ena_admin_get_feature_link_desc *link;
275 struct ena_admin_get_feat_resp feat_resp;
278 rc = ena_com_get_link_params(ena_dev, &feat_resp);
282 link = &feat_resp.u.link;
283 link_ksettings->base.speed = link->speed;
285 if (link->flags & ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK) {
286 ethtool_link_ksettings_add_link_mode(link_ksettings,
288 ethtool_link_ksettings_add_link_mode(link_ksettings,
292 link_ksettings->base.autoneg =
293 (link->flags & ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK) ?
294 AUTONEG_ENABLE : AUTONEG_DISABLE;
296 link_ksettings->base.duplex = DUPLEX_FULL;
301 static int ena_get_coalesce(struct net_device *net_dev,
302 struct ethtool_coalesce *coalesce)
304 struct ena_adapter *adapter = netdev_priv(net_dev);
305 struct ena_com_dev *ena_dev = adapter->ena_dev;
307 if (!ena_com_interrupt_moderation_supported(ena_dev))
310 coalesce->tx_coalesce_usecs =
311 ena_com_get_nonadaptive_moderation_interval_tx(ena_dev) *
312 ena_dev->intr_delay_resolution;
314 coalesce->rx_coalesce_usecs =
315 ena_com_get_nonadaptive_moderation_interval_rx(ena_dev)
316 * ena_dev->intr_delay_resolution;
318 coalesce->use_adaptive_rx_coalesce =
319 ena_com_get_adaptive_moderation_enabled(ena_dev);
324 static void ena_update_tx_rings_nonadaptive_intr_moderation(struct ena_adapter *adapter)
329 val = ena_com_get_nonadaptive_moderation_interval_tx(adapter->ena_dev);
331 for (i = 0; i < adapter->num_io_queues; i++)
332 adapter->tx_ring[i].smoothed_interval = val;
335 static void ena_update_rx_rings_nonadaptive_intr_moderation(struct ena_adapter *adapter)
340 val = ena_com_get_nonadaptive_moderation_interval_rx(adapter->ena_dev);
342 for (i = 0; i < adapter->num_io_queues; i++)
343 adapter->rx_ring[i].smoothed_interval = val;
346 static int ena_set_coalesce(struct net_device *net_dev,
347 struct ethtool_coalesce *coalesce)
349 struct ena_adapter *adapter = netdev_priv(net_dev);
350 struct ena_com_dev *ena_dev = adapter->ena_dev;
353 if (!ena_com_interrupt_moderation_supported(ena_dev))
356 rc = ena_com_update_nonadaptive_moderation_interval_tx(ena_dev,
357 coalesce->tx_coalesce_usecs);
361 ena_update_tx_rings_nonadaptive_intr_moderation(adapter);
363 rc = ena_com_update_nonadaptive_moderation_interval_rx(ena_dev,
364 coalesce->rx_coalesce_usecs);
368 ena_update_rx_rings_nonadaptive_intr_moderation(adapter);
370 if (coalesce->use_adaptive_rx_coalesce &&
371 !ena_com_get_adaptive_moderation_enabled(ena_dev))
372 ena_com_enable_adaptive_moderation(ena_dev);
374 if (!coalesce->use_adaptive_rx_coalesce &&
375 ena_com_get_adaptive_moderation_enabled(ena_dev))
376 ena_com_disable_adaptive_moderation(ena_dev);
381 static u32 ena_get_msglevel(struct net_device *netdev)
383 struct ena_adapter *adapter = netdev_priv(netdev);
385 return adapter->msg_enable;
388 static void ena_set_msglevel(struct net_device *netdev, u32 value)
390 struct ena_adapter *adapter = netdev_priv(netdev);
392 adapter->msg_enable = value;
395 static void ena_get_drvinfo(struct net_device *dev,
396 struct ethtool_drvinfo *info)
398 struct ena_adapter *adapter = netdev_priv(dev);
400 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
401 strlcpy(info->bus_info, pci_name(adapter->pdev),
402 sizeof(info->bus_info));
405 static void ena_get_ringparam(struct net_device *netdev,
406 struct ethtool_ringparam *ring)
408 struct ena_adapter *adapter = netdev_priv(netdev);
410 ring->tx_max_pending = adapter->max_tx_ring_size;
411 ring->rx_max_pending = adapter->max_rx_ring_size;
412 ring->tx_pending = adapter->tx_ring[0].ring_size;
413 ring->rx_pending = adapter->rx_ring[0].ring_size;
416 static int ena_set_ringparam(struct net_device *netdev,
417 struct ethtool_ringparam *ring)
419 struct ena_adapter *adapter = netdev_priv(netdev);
420 u32 new_tx_size, new_rx_size;
422 new_tx_size = ring->tx_pending < ENA_MIN_RING_SIZE ?
423 ENA_MIN_RING_SIZE : ring->tx_pending;
424 new_tx_size = rounddown_pow_of_two(new_tx_size);
426 new_rx_size = ring->rx_pending < ENA_MIN_RING_SIZE ?
427 ENA_MIN_RING_SIZE : ring->rx_pending;
428 new_rx_size = rounddown_pow_of_two(new_rx_size);
430 if (new_tx_size == adapter->requested_tx_ring_size &&
431 new_rx_size == adapter->requested_rx_ring_size)
434 return ena_update_queue_sizes(adapter, new_tx_size, new_rx_size);
437 static u32 ena_flow_hash_to_flow_type(u16 hash_fields)
441 if (hash_fields & ENA_ADMIN_RSS_L2_DA)
444 if (hash_fields & ENA_ADMIN_RSS_L3_DA)
447 if (hash_fields & ENA_ADMIN_RSS_L3_SA)
450 if (hash_fields & ENA_ADMIN_RSS_L4_DP)
451 data |= RXH_L4_B_2_3;
453 if (hash_fields & ENA_ADMIN_RSS_L4_SP)
454 data |= RXH_L4_B_0_1;
459 static u16 ena_flow_data_to_flow_hash(u32 hash_fields)
463 if (hash_fields & RXH_L2DA)
464 data |= ENA_ADMIN_RSS_L2_DA;
466 if (hash_fields & RXH_IP_DST)
467 data |= ENA_ADMIN_RSS_L3_DA;
469 if (hash_fields & RXH_IP_SRC)
470 data |= ENA_ADMIN_RSS_L3_SA;
472 if (hash_fields & RXH_L4_B_2_3)
473 data |= ENA_ADMIN_RSS_L4_DP;
475 if (hash_fields & RXH_L4_B_0_1)
476 data |= ENA_ADMIN_RSS_L4_SP;
481 static int ena_get_rss_hash(struct ena_com_dev *ena_dev,
482 struct ethtool_rxnfc *cmd)
484 enum ena_admin_flow_hash_proto proto;
490 switch (cmd->flow_type) {
492 proto = ENA_ADMIN_RSS_TCP4;
495 proto = ENA_ADMIN_RSS_UDP4;
498 proto = ENA_ADMIN_RSS_TCP6;
501 proto = ENA_ADMIN_RSS_UDP6;
504 proto = ENA_ADMIN_RSS_IP4;
507 proto = ENA_ADMIN_RSS_IP6;
510 proto = ENA_ADMIN_RSS_NOT_IP;
523 rc = ena_com_get_hash_ctrl(ena_dev, proto, &hash_fields);
527 cmd->data = ena_flow_hash_to_flow_type(hash_fields);
532 static int ena_set_rss_hash(struct ena_com_dev *ena_dev,
533 struct ethtool_rxnfc *cmd)
535 enum ena_admin_flow_hash_proto proto;
538 switch (cmd->flow_type) {
540 proto = ENA_ADMIN_RSS_TCP4;
543 proto = ENA_ADMIN_RSS_UDP4;
546 proto = ENA_ADMIN_RSS_TCP6;
549 proto = ENA_ADMIN_RSS_UDP6;
552 proto = ENA_ADMIN_RSS_IP4;
555 proto = ENA_ADMIN_RSS_IP6;
558 proto = ENA_ADMIN_RSS_NOT_IP;
571 hash_fields = ena_flow_data_to_flow_hash(cmd->data);
573 return ena_com_fill_hash_ctrl(ena_dev, proto, hash_fields);
576 static int ena_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info)
578 struct ena_adapter *adapter = netdev_priv(netdev);
583 rc = ena_set_rss_hash(adapter->ena_dev, info);
585 case ETHTOOL_SRXCLSRLDEL:
586 case ETHTOOL_SRXCLSRLINS:
588 netif_err(adapter, drv, netdev,
589 "Command parameter %d is not supported\n", info->cmd);
596 static int ena_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info,
599 struct ena_adapter *adapter = netdev_priv(netdev);
603 case ETHTOOL_GRXRINGS:
604 info->data = adapter->num_io_queues;
608 rc = ena_get_rss_hash(adapter->ena_dev, info);
610 case ETHTOOL_GRXCLSRLCNT:
611 case ETHTOOL_GRXCLSRULE:
612 case ETHTOOL_GRXCLSRLALL:
614 netif_err(adapter, drv, netdev,
615 "Command parameter %d is not supported\n", info->cmd);
622 static u32 ena_get_rxfh_indir_size(struct net_device *netdev)
624 return ENA_RX_RSS_TABLE_SIZE;
627 static u32 ena_get_rxfh_key_size(struct net_device *netdev)
629 return ENA_HASH_KEY_SIZE;
632 static int ena_indirection_table_set(struct ena_adapter *adapter,
635 struct ena_com_dev *ena_dev = adapter->ena_dev;
638 for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) {
639 rc = ena_com_indirect_table_fill_entry(ena_dev,
641 ENA_IO_RXQ_IDX(indir[i]));
643 netif_err(adapter, drv, adapter->netdev,
644 "Cannot fill indirect table (index is too large)\n");
649 rc = ena_com_indirect_table_set(ena_dev);
651 netif_err(adapter, drv, adapter->netdev,
652 "Cannot set indirect table\n");
653 return rc == -EPERM ? -EOPNOTSUPP : rc;
658 static int ena_indirection_table_get(struct ena_adapter *adapter, u32 *indir)
660 struct ena_com_dev *ena_dev = adapter->ena_dev;
666 rc = ena_com_indirect_table_get(ena_dev, indir);
670 /* Our internal representation of the indices is: even indices
671 * for Tx and uneven indices for Rx. We need to convert the Rx
672 * indices to be consecutive
674 for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++)
675 indir[i] = ENA_IO_RXQ_IDX_TO_COMBINED_IDX(indir[i]);
680 static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
683 struct ena_adapter *adapter = netdev_priv(netdev);
684 enum ena_admin_hash_functions ena_func;
688 rc = ena_indirection_table_get(adapter, indir);
692 /* We call this function in order to check if the device
693 * supports getting/setting the hash function.
695 rc = ena_com_get_hash_function(adapter->ena_dev, &ena_func);
697 if (rc == -EOPNOTSUPP)
703 rc = ena_com_get_hash_key(adapter->ena_dev, key);
708 case ENA_ADMIN_TOEPLITZ:
709 func = ETH_RSS_HASH_TOP;
711 case ENA_ADMIN_CRC32:
712 func = ETH_RSS_HASH_CRC32;
715 netif_err(adapter, drv, netdev,
716 "Command parameter is not supported\n");
726 static int ena_set_rxfh(struct net_device *netdev, const u32 *indir,
727 const u8 *key, const u8 hfunc)
729 struct ena_adapter *adapter = netdev_priv(netdev);
730 struct ena_com_dev *ena_dev = adapter->ena_dev;
731 enum ena_admin_hash_functions func = 0;
735 rc = ena_indirection_table_set(adapter, indir);
741 case ETH_RSS_HASH_NO_CHANGE:
742 func = ena_com_get_current_hash_function(ena_dev);
744 case ETH_RSS_HASH_TOP:
745 func = ENA_ADMIN_TOEPLITZ;
747 case ETH_RSS_HASH_CRC32:
748 func = ENA_ADMIN_CRC32;
751 netif_err(adapter, drv, netdev, "Unsupported hfunc %d\n",
757 rc = ena_com_fill_hash_function(ena_dev, func, key,
761 netif_err(adapter, drv, netdev, "Cannot fill key\n");
762 return rc == -EPERM ? -EOPNOTSUPP : rc;
769 static void ena_get_channels(struct net_device *netdev,
770 struct ethtool_channels *channels)
772 struct ena_adapter *adapter = netdev_priv(netdev);
774 channels->max_combined = adapter->max_num_io_queues;
775 channels->combined_count = adapter->num_io_queues;
778 static int ena_set_channels(struct net_device *netdev,
779 struct ethtool_channels *channels)
781 struct ena_adapter *adapter = netdev_priv(netdev);
782 u32 count = channels->combined_count;
783 /* The check for max value is already done in ethtool */
784 if (count < ENA_MIN_NUM_IO_QUEUES ||
785 (ena_xdp_present(adapter) &&
786 !ena_xdp_legal_queue_count(adapter, channels->combined_count)))
789 return ena_update_queue_count(adapter, count);
792 static int ena_get_tunable(struct net_device *netdev,
793 const struct ethtool_tunable *tuna, void *data)
795 struct ena_adapter *adapter = netdev_priv(netdev);
799 case ETHTOOL_RX_COPYBREAK:
800 *(u32 *)data = adapter->rx_copybreak;
810 static int ena_set_tunable(struct net_device *netdev,
811 const struct ethtool_tunable *tuna,
814 struct ena_adapter *adapter = netdev_priv(netdev);
819 case ETHTOOL_RX_COPYBREAK:
821 if (len > adapter->netdev->mtu) {
825 adapter->rx_copybreak = len;
835 static const struct ethtool_ops ena_ethtool_ops = {
836 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
837 ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
838 .get_link_ksettings = ena_get_link_ksettings,
839 .get_drvinfo = ena_get_drvinfo,
840 .get_msglevel = ena_get_msglevel,
841 .set_msglevel = ena_set_msglevel,
842 .get_link = ethtool_op_get_link,
843 .get_coalesce = ena_get_coalesce,
844 .set_coalesce = ena_set_coalesce,
845 .get_ringparam = ena_get_ringparam,
846 .set_ringparam = ena_set_ringparam,
847 .get_sset_count = ena_get_sset_count,
848 .get_strings = ena_get_strings,
849 .get_ethtool_stats = ena_get_ethtool_stats,
850 .get_rxnfc = ena_get_rxnfc,
851 .set_rxnfc = ena_set_rxnfc,
852 .get_rxfh_indir_size = ena_get_rxfh_indir_size,
853 .get_rxfh_key_size = ena_get_rxfh_key_size,
854 .get_rxfh = ena_get_rxfh,
855 .set_rxfh = ena_set_rxfh,
856 .get_channels = ena_get_channels,
857 .set_channels = ena_set_channels,
858 .get_tunable = ena_get_tunable,
859 .set_tunable = ena_set_tunable,
860 .get_ts_info = ethtool_op_get_ts_info,
863 void ena_set_ethtool_ops(struct net_device *netdev)
865 netdev->ethtool_ops = &ena_ethtool_ops;
868 static void ena_dump_stats_ex(struct ena_adapter *adapter, u8 *buf)
870 struct net_device *netdev = adapter->netdev;
876 strings_num = ena_get_sset_count(netdev, ETH_SS_STATS);
877 if (strings_num <= 0) {
878 netif_err(adapter, drv, netdev, "Can't get stats num\n");
882 strings_buf = devm_kcalloc(&adapter->pdev->dev,
883 ETH_GSTRING_LEN, strings_num,
886 netif_err(adapter, drv, netdev,
887 "failed to alloc strings_buf\n");
891 data_buf = devm_kcalloc(&adapter->pdev->dev,
892 strings_num, sizeof(u64),
895 netif_err(adapter, drv, netdev,
896 "failed to allocate data buf\n");
897 devm_kfree(&adapter->pdev->dev, strings_buf);
901 ena_get_strings(netdev, ETH_SS_STATS, strings_buf);
902 ena_get_ethtool_stats(netdev, NULL, data_buf);
904 /* If there is a buffer, dump stats, otherwise print them to dmesg */
906 for (i = 0; i < strings_num; i++) {
907 rc = snprintf(buf, ETH_GSTRING_LEN + sizeof(u64),
909 strings_buf + i * ETH_GSTRING_LEN,
914 for (i = 0; i < strings_num; i++)
915 netif_err(adapter, drv, netdev, "%s: %llu\n",
916 strings_buf + i * ETH_GSTRING_LEN,
919 devm_kfree(&adapter->pdev->dev, strings_buf);
920 devm_kfree(&adapter->pdev->dev, data_buf);
923 void ena_dump_stats_to_buf(struct ena_adapter *adapter, u8 *buf)
928 ena_dump_stats_ex(adapter, buf);
931 void ena_dump_stats_to_dmesg(struct ena_adapter *adapter)
933 ena_dump_stats_ex(adapter, NULL);