2 * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #ifndef __MLX5_EN_STATS_H__
33 #define __MLX5_EN_STATS_H__
35 #define MLX5E_READ_CTR64_CPU(ptr, dsc, i) \
36 (*(u64 *)((char *)ptr + dsc[i].offset))
37 #define MLX5E_READ_CTR64_BE(ptr, dsc, i) \
38 be64_to_cpu(*(__be64 *)((char *)ptr + dsc[i].offset))
39 #define MLX5E_READ_CTR32_CPU(ptr, dsc, i) \
40 (*(u32 *)((char *)ptr + dsc[i].offset))
41 #define MLX5E_READ_CTR32_BE(ptr, dsc, i) \
42 be32_to_cpu(*(__be32 *)((char *)ptr + dsc[i].offset))
44 #define MLX5E_DECLARE_STAT(type, fld) #fld, offsetof(type, fld)
45 #define MLX5E_DECLARE_RX_STAT(type, fld) "rx%d_"#fld, offsetof(type, fld)
46 #define MLX5E_DECLARE_TX_STAT(type, fld) "tx%d_"#fld, offsetof(type, fld)
49 char format[ETH_GSTRING_LEN];
50 size_t offset; /* Byte offset */
53 struct mlx5e_sw_stats {
60 u64 tx_tso_inner_packets;
61 u64 tx_tso_inner_bytes;
64 u64 rx_csum_unnecessary;
67 u64 rx_csum_unnecessary_inner;
72 u64 tx_csum_partial_inner;
79 u64 rx_buff_alloc_err;
80 u64 rx_cqe_compress_blks;
81 u64 rx_cqe_compress_pkts;
89 /* Special handling counters */
90 u64 link_down_events_phy;
93 static const struct counter_desc sw_stats_desc[] = {
94 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
95 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
96 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
97 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
98 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_packets) },
99 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_bytes) },
100 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_packets) },
101 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) },
102 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) },
103 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) },
104 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) },
105 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) },
106 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) },
107 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) },
108 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) },
109 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx) },
110 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) },
111 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) },
112 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) },
113 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) },
114 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
115 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) },
116 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) },
117 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
118 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler) },
119 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
120 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
121 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
122 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_page_reuse) },
123 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_reuse) },
124 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_full) },
125 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_empty) },
126 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) },
127 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) },
128 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, link_down_events_phy) },
131 struct mlx5e_qcounter_stats {
132 u32 rx_out_of_buffer;
135 static const struct counter_desc q_stats_desc[] = {
136 { MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_out_of_buffer) },
139 #define VPORT_COUNTER_OFF(c) MLX5_BYTE_OFF(query_vport_counter_out, c)
140 #define VPORT_COUNTER_GET(vstats, c) MLX5_GET64(query_vport_counter_out, \
141 vstats->query_vport_out, c)
143 struct mlx5e_vport_stats {
144 __be64 query_vport_out[MLX5_ST_SZ_QW(query_vport_counter_out)];
147 static const struct counter_desc vport_stats_desc[] = {
148 { "rx_vport_unicast_packets",
149 VPORT_COUNTER_OFF(received_eth_unicast.packets) },
150 { "rx_vport_unicast_bytes",
151 VPORT_COUNTER_OFF(received_eth_unicast.octets) },
152 { "tx_vport_unicast_packets",
153 VPORT_COUNTER_OFF(transmitted_eth_unicast.packets) },
154 { "tx_vport_unicast_bytes",
155 VPORT_COUNTER_OFF(transmitted_eth_unicast.octets) },
156 { "rx_vport_multicast_packets",
157 VPORT_COUNTER_OFF(received_eth_multicast.packets) },
158 { "rx_vport_multicast_bytes",
159 VPORT_COUNTER_OFF(received_eth_multicast.octets) },
160 { "tx_vport_multicast_packets",
161 VPORT_COUNTER_OFF(transmitted_eth_multicast.packets) },
162 { "tx_vport_multicast_bytes",
163 VPORT_COUNTER_OFF(transmitted_eth_multicast.octets) },
164 { "rx_vport_broadcast_packets",
165 VPORT_COUNTER_OFF(received_eth_broadcast.packets) },
166 { "rx_vport_broadcast_bytes",
167 VPORT_COUNTER_OFF(received_eth_broadcast.octets) },
168 { "tx_vport_broadcast_packets",
169 VPORT_COUNTER_OFF(transmitted_eth_broadcast.packets) },
170 { "tx_vport_broadcast_bytes",
171 VPORT_COUNTER_OFF(transmitted_eth_broadcast.octets) },
172 { "rx_vport_rdma_unicast_packets",
173 VPORT_COUNTER_OFF(received_ib_unicast.packets) },
174 { "rx_vport_rdma_unicast_bytes",
175 VPORT_COUNTER_OFF(received_ib_unicast.octets) },
176 { "tx_vport_rdma_unicast_packets",
177 VPORT_COUNTER_OFF(transmitted_ib_unicast.packets) },
178 { "tx_vport_rdma_unicast_bytes",
179 VPORT_COUNTER_OFF(transmitted_ib_unicast.octets) },
180 { "rx_vport_rdma_multicast_packets",
181 VPORT_COUNTER_OFF(received_ib_multicast.packets) },
182 { "rx_vport_rdma_multicast_bytes",
183 VPORT_COUNTER_OFF(received_ib_multicast.octets) },
184 { "tx_vport_rdma_multicast_packets",
185 VPORT_COUNTER_OFF(transmitted_ib_multicast.packets) },
186 { "tx_vport_rdma_multicast_bytes",
187 VPORT_COUNTER_OFF(transmitted_ib_multicast.octets) },
190 #define PPORT_802_3_OFF(c) \
191 MLX5_BYTE_OFF(ppcnt_reg, \
192 counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)
193 #define PPORT_802_3_GET(pstats, c) \
194 MLX5_GET64(ppcnt_reg, pstats->IEEE_802_3_counters, \
195 counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)
196 #define PPORT_2863_OFF(c) \
197 MLX5_BYTE_OFF(ppcnt_reg, \
198 counter_set.eth_2863_cntrs_grp_data_layout.c##_high)
199 #define PPORT_2863_GET(pstats, c) \
200 MLX5_GET64(ppcnt_reg, pstats->RFC_2863_counters, \
201 counter_set.eth_2863_cntrs_grp_data_layout.c##_high)
202 #define PPORT_2819_OFF(c) \
203 MLX5_BYTE_OFF(ppcnt_reg, \
204 counter_set.eth_2819_cntrs_grp_data_layout.c##_high)
205 #define PPORT_2819_GET(pstats, c) \
206 MLX5_GET64(ppcnt_reg, pstats->RFC_2819_counters, \
207 counter_set.eth_2819_cntrs_grp_data_layout.c##_high)
208 #define PPORT_PHY_STATISTICAL_OFF(c) \
209 MLX5_BYTE_OFF(ppcnt_reg, \
210 counter_set.phys_layer_statistical_cntrs.c##_high)
211 #define PPORT_PHY_STATISTICAL_GET(pstats, c) \
212 MLX5_GET64(ppcnt_reg, (pstats)->phy_statistical_counters, \
213 counter_set.phys_layer_statistical_cntrs.c##_high)
214 #define PPORT_PER_PRIO_OFF(c) \
215 MLX5_BYTE_OFF(ppcnt_reg, \
216 counter_set.eth_per_prio_grp_data_layout.c##_high)
217 #define PPORT_PER_PRIO_GET(pstats, prio, c) \
218 MLX5_GET64(ppcnt_reg, pstats->per_prio_counters[prio], \
219 counter_set.eth_per_prio_grp_data_layout.c##_high)
220 #define NUM_PPORT_PRIO 8
221 #define PPORT_ETH_EXT_OFF(c) \
222 MLX5_BYTE_OFF(ppcnt_reg, \
223 counter_set.eth_extended_cntrs_grp_data_layout.c##_high)
224 #define PPORT_ETH_EXT_GET(pstats, c) \
225 MLX5_GET64(ppcnt_reg, (pstats)->eth_ext_counters, \
226 counter_set.eth_extended_cntrs_grp_data_layout.c##_high)
228 struct mlx5e_pport_stats {
229 __be64 IEEE_802_3_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
230 __be64 RFC_2863_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
231 __be64 RFC_2819_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
232 __be64 per_prio_counters[NUM_PPORT_PRIO][MLX5_ST_SZ_QW(ppcnt_reg)];
233 __be64 phy_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
234 __be64 phy_statistical_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
235 __be64 eth_ext_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
238 static const struct counter_desc pport_802_3_stats_desc[] = {
239 { "tx_packets_phy", PPORT_802_3_OFF(a_frames_transmitted_ok) },
240 { "rx_packets_phy", PPORT_802_3_OFF(a_frames_received_ok) },
241 { "rx_crc_errors_phy", PPORT_802_3_OFF(a_frame_check_sequence_errors) },
242 { "tx_bytes_phy", PPORT_802_3_OFF(a_octets_transmitted_ok) },
243 { "rx_bytes_phy", PPORT_802_3_OFF(a_octets_received_ok) },
244 { "tx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_xmitted_ok) },
245 { "tx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_xmitted_ok) },
246 { "rx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_received_ok) },
247 { "rx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_received_ok) },
248 { "rx_in_range_len_errors_phy", PPORT_802_3_OFF(a_in_range_length_errors) },
249 { "rx_out_of_range_len_phy", PPORT_802_3_OFF(a_out_of_range_length_field) },
250 { "rx_oversize_pkts_phy", PPORT_802_3_OFF(a_frame_too_long_errors) },
251 { "rx_symbol_err_phy", PPORT_802_3_OFF(a_symbol_error_during_carrier) },
252 { "tx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_transmitted) },
253 { "rx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_received) },
254 { "rx_unsupported_op_phy", PPORT_802_3_OFF(a_unsupported_opcodes_received) },
255 { "rx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_received) },
256 { "tx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_transmitted) },
259 static const struct counter_desc pport_2863_stats_desc[] = {
260 { "rx_discards_phy", PPORT_2863_OFF(if_in_discards) },
261 { "tx_discards_phy", PPORT_2863_OFF(if_out_discards) },
262 { "tx_errors_phy", PPORT_2863_OFF(if_out_errors) },
265 static const struct counter_desc pport_2819_stats_desc[] = {
266 { "rx_undersize_pkts_phy", PPORT_2819_OFF(ether_stats_undersize_pkts) },
267 { "rx_fragments_phy", PPORT_2819_OFF(ether_stats_fragments) },
268 { "rx_jabbers_phy", PPORT_2819_OFF(ether_stats_jabbers) },
269 { "rx_64_bytes_phy", PPORT_2819_OFF(ether_stats_pkts64octets) },
270 { "rx_65_to_127_bytes_phy", PPORT_2819_OFF(ether_stats_pkts65to127octets) },
271 { "rx_128_to_255_bytes_phy", PPORT_2819_OFF(ether_stats_pkts128to255octets) },
272 { "rx_256_to_511_bytes_phy", PPORT_2819_OFF(ether_stats_pkts256to511octets) },
273 { "rx_512_to_1023_bytes_phy", PPORT_2819_OFF(ether_stats_pkts512to1023octets) },
274 { "rx_1024_to_1518_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1024to1518octets) },
275 { "rx_1519_to_2047_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1519to2047octets) },
276 { "rx_2048_to_4095_bytes_phy", PPORT_2819_OFF(ether_stats_pkts2048to4095octets) },
277 { "rx_4096_to_8191_bytes_phy", PPORT_2819_OFF(ether_stats_pkts4096to8191octets) },
278 { "rx_8192_to_10239_bytes_phy", PPORT_2819_OFF(ether_stats_pkts8192to10239octets) },
281 static const struct counter_desc pport_phy_statistical_stats_desc[] = {
282 { "rx_pcs_symbol_err_phy", PPORT_PHY_STATISTICAL_OFF(phy_symbol_errors) },
283 { "rx_corrected_bits_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits) },
286 static const struct counter_desc pport_per_prio_traffic_stats_desc[] = {
287 { "rx_prio%d_bytes", PPORT_PER_PRIO_OFF(rx_octets) },
288 { "rx_prio%d_packets", PPORT_PER_PRIO_OFF(rx_frames) },
289 { "tx_prio%d_bytes", PPORT_PER_PRIO_OFF(tx_octets) },
290 { "tx_prio%d_packets", PPORT_PER_PRIO_OFF(tx_frames) },
293 static const struct counter_desc pport_per_prio_pfc_stats_desc[] = {
294 /* %s is "global" or "prio{i}" */
295 { "rx_%s_pause", PPORT_PER_PRIO_OFF(rx_pause) },
296 { "rx_%s_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) },
297 { "tx_%s_pause", PPORT_PER_PRIO_OFF(tx_pause) },
298 { "tx_%s_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) },
299 { "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
302 static const struct counter_desc pport_eth_ext_stats_desc[] = {
303 { "rx_buffer_passed_thres_phy", PPORT_ETH_EXT_OFF(rx_buffer_almost_full) },
306 #define PCIE_PERF_OFF(c) \
307 MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c)
308 #define PCIE_PERF_GET(pcie_stats, c) \
309 MLX5_GET(mpcnt_reg, (pcie_stats)->pcie_perf_counters, \
310 counter_set.pcie_perf_cntrs_grp_data_layout.c)
312 #define PCIE_PERF_OFF64(c) \
313 MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c##_high)
314 #define PCIE_PERF_GET64(pcie_stats, c) \
315 MLX5_GET64(mpcnt_reg, (pcie_stats)->pcie_perf_counters, \
316 counter_set.pcie_perf_cntrs_grp_data_layout.c##_high)
318 struct mlx5e_pcie_stats {
319 __be64 pcie_perf_counters[MLX5_ST_SZ_QW(mpcnt_reg)];
322 static const struct counter_desc pcie_perf_stats_desc[] = {
323 { "rx_pci_signal_integrity", PCIE_PERF_OFF(rx_errors) },
324 { "tx_pci_signal_integrity", PCIE_PERF_OFF(tx_errors) },
327 static const struct counter_desc pcie_perf_stats_desc64[] = {
328 { "outbound_pci_buffer_overflow", PCIE_PERF_OFF64(tx_overflow_buffer_pkt) },
331 static const struct counter_desc pcie_perf_stall_stats_desc[] = {
332 { "outbound_pci_stalled_rd", PCIE_PERF_OFF(outbound_stalled_reads) },
333 { "outbound_pci_stalled_wr", PCIE_PERF_OFF(outbound_stalled_writes) },
334 { "outbound_pci_stalled_rd_events", PCIE_PERF_OFF(outbound_stalled_reads_events) },
335 { "outbound_pci_stalled_wr_events", PCIE_PERF_OFF(outbound_stalled_writes_events) },
338 struct mlx5e_rq_stats {
342 u64 csum_unnecessary_inner;
352 u64 cqe_compress_blks;
353 u64 cqe_compress_pkts;
362 static const struct counter_desc rq_stats_desc[] = {
363 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
364 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
365 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) },
366 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
367 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
368 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) },
369 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx) },
370 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx_full) },
371 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) },
372 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) },
373 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
374 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler) },
375 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
376 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
377 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
378 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, page_reuse) },
379 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_reuse) },
380 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_full) },
381 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_empty) },
382 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_busy) },
383 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_waive) },
386 struct mlx5e_sq_stats {
387 /* commonly accessed in data path */
393 u64 tso_inner_packets;
395 u64 csum_partial_inner;
397 /* less likely accessed in data path */
404 static const struct counter_desc sq_stats_desc[] = {
405 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, packets) },
406 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, bytes) },
407 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_packets) },
408 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_bytes) },
409 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
410 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
411 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
412 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) },
413 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) },
414 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) },
415 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) },
416 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) },
417 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
420 #define NUM_SW_COUNTERS ARRAY_SIZE(sw_stats_desc)
421 #define NUM_Q_COUNTERS ARRAY_SIZE(q_stats_desc)
422 #define NUM_VPORT_COUNTERS ARRAY_SIZE(vport_stats_desc)
423 #define NUM_PPORT_802_3_COUNTERS ARRAY_SIZE(pport_802_3_stats_desc)
424 #define NUM_PPORT_2863_COUNTERS ARRAY_SIZE(pport_2863_stats_desc)
425 #define NUM_PPORT_2819_COUNTERS ARRAY_SIZE(pport_2819_stats_desc)
426 #define NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv) \
427 (ARRAY_SIZE(pport_phy_statistical_stats_desc) * \
428 MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group))
429 #define NUM_PCIE_PERF_COUNTERS(priv) \
430 (ARRAY_SIZE(pcie_perf_stats_desc) * \
431 MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
432 #define NUM_PCIE_PERF_COUNTERS64(priv) \
433 (ARRAY_SIZE(pcie_perf_stats_desc64) * \
434 MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
435 #define NUM_PCIE_PERF_STALL_COUNTERS(priv) \
436 (ARRAY_SIZE(pcie_perf_stall_stats_desc) * \
437 MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
438 #define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS \
439 ARRAY_SIZE(pport_per_prio_traffic_stats_desc)
440 #define NUM_PPORT_PER_PRIO_PFC_COUNTERS \
441 ARRAY_SIZE(pport_per_prio_pfc_stats_desc)
442 #define NUM_PPORT_ETH_EXT_COUNTERS(priv) \
443 (ARRAY_SIZE(pport_eth_ext_stats_desc) * \
444 MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
445 #define NUM_PPORT_COUNTERS(priv) (NUM_PPORT_802_3_COUNTERS + \
446 NUM_PPORT_2863_COUNTERS + \
447 NUM_PPORT_2819_COUNTERS + \
448 NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv) + \
449 NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * \
451 NUM_PPORT_ETH_EXT_COUNTERS(priv))
452 #define NUM_PCIE_COUNTERS(priv) (NUM_PCIE_PERF_COUNTERS(priv) + \
453 NUM_PCIE_PERF_COUNTERS64(priv) +\
454 NUM_PCIE_PERF_STALL_COUNTERS(priv))
455 #define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc)
456 #define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc)
459 struct mlx5e_sw_stats sw;
460 struct mlx5e_qcounter_stats qcnt;
461 struct mlx5e_vport_stats vport;
462 struct mlx5e_pport_stats pport;
463 struct rtnl_link_stats64 vf_vport;
464 struct mlx5e_pcie_stats pcie;
467 static const struct counter_desc mlx5e_pme_status_desc[] = {
468 { "module_unplug", 8 },
471 static const struct counter_desc mlx5e_pme_error_desc[] = {
472 { "module_bus_stuck", 16 }, /* bus stuck (I2C or data shorted) */
473 { "module_high_temp", 48 }, /* high temperature */
474 { "module_bad_shorted", 56 }, /* bad or shorted cable/module */
477 #endif /* __MLX5_EN_STATS_H__ */