2 * Copyright (c) 2017, Mellanox Technologies, Ltd. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include "en_accel/tls.h"
36 #include "en_accel/en_accel.h"
40 static unsigned int stats_grps_num(struct mlx5e_priv *priv)
42 return !priv->profile->stats_grps_num ? 0 :
43 priv->profile->stats_grps_num(priv);
46 unsigned int mlx5e_stats_total_num(struct mlx5e_priv *priv)
48 mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
49 const unsigned int num_stats_grps = stats_grps_num(priv);
50 unsigned int total = 0;
53 for (i = 0; i < num_stats_grps; i++)
54 total += stats_grps[i]->get_num_stats(priv);
59 void mlx5e_stats_update_ndo_stats(struct mlx5e_priv *priv)
61 mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
62 const unsigned int num_stats_grps = stats_grps_num(priv);
65 for (i = num_stats_grps - 1; i >= 0; i--)
66 if (stats_grps[i]->update_stats &&
67 stats_grps[i]->update_stats_mask & MLX5E_NDO_UPDATE_STATS)
68 stats_grps[i]->update_stats(priv);
71 void mlx5e_stats_update(struct mlx5e_priv *priv)
73 mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
74 const unsigned int num_stats_grps = stats_grps_num(priv);
77 for (i = num_stats_grps - 1; i >= 0; i--)
78 if (stats_grps[i]->update_stats)
79 stats_grps[i]->update_stats(priv);
82 void mlx5e_stats_fill(struct mlx5e_priv *priv, u64 *data, int idx)
84 mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
85 const unsigned int num_stats_grps = stats_grps_num(priv);
88 for (i = 0; i < num_stats_grps; i++)
89 idx = stats_grps[i]->fill_stats(priv, data, idx);
92 void mlx5e_stats_fill_strings(struct mlx5e_priv *priv, u8 *data)
94 mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
95 const unsigned int num_stats_grps = stats_grps_num(priv);
98 for (i = 0; i < num_stats_grps; i++)
99 idx = stats_grps[i]->fill_strings(priv, data, idx);
102 /* Concrete NIC Stats */
104 static const struct counter_desc sw_stats_desc[] = {
105 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
106 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
107 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
108 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
109 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_packets) },
110 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_bytes) },
111 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_packets) },
112 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) },
113 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_added_vlan_packets) },
114 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_nop) },
115 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_mpwqe_blks) },
116 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_mpwqe_pkts) },
118 #ifdef CONFIG_MLX5_EN_TLS
119 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_packets) },
120 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_bytes) },
121 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ooo) },
122 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_packets) },
123 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_bytes) },
124 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_resync_bytes) },
125 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_skip_no_sync_data) },
126 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_no_sync_data) },
127 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_bypass_req) },
130 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) },
131 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) },
132 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_packets) },
133 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_bytes) },
134 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_skbs) },
135 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_match_packets) },
136 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_large_hds) },
137 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_ecn_mark) },
138 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_removed_vlan_packets) },
139 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) },
140 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) },
141 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) },
142 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail) },
143 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail_slow) },
144 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) },
145 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) },
146 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_redirect) },
147 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_xmit) },
148 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_mpwqe) },
149 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_inlnw) },
150 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_nops) },
151 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) },
152 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_err) },
153 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_cqe) },
154 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_none) },
155 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) },
156 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) },
157 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) },
158 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) },
159 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) },
160 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_recover) },
161 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqes) },
162 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
163 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) },
164 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_xmit) },
165 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_mpwqe) },
166 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_inlnw) },
167 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_nops) },
168 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_full) },
169 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_err) },
170 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_cqes) },
171 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
172 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_cqes) },
173 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_strides) },
174 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_oversize_pkts_sw_drop) },
175 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
176 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
177 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
178 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_reuse) },
179 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_full) },
180 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_empty) },
181 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) },
182 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) },
183 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_congst_umr) },
184 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_err) },
185 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_recover) },
186 #ifdef CONFIG_MLX5_EN_TLS
187 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_packets) },
188 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_bytes) },
189 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_pkt) },
190 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_start) },
191 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_end) },
192 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_skip) },
193 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_ok) },
194 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_retry) },
195 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_skip) },
196 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_err) },
198 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_events) },
199 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_poll) },
200 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_arm) },
201 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_aff_change) },
202 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_force_irq) },
203 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_eq_rearm) },
204 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_packets) },
205 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_bytes) },
206 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_complete) },
207 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_unnecessary) },
208 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_unnecessary_inner) },
209 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_none) },
210 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_ecn_mark) },
211 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_removed_vlan_packets) },
212 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_xdp_drop) },
213 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_xdp_redirect) },
214 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_wqe_err) },
215 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_mpwqe_filler_cqes) },
216 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_mpwqe_filler_strides) },
217 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_oversize_pkts_sw_drop) },
218 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_buff_alloc_err) },
219 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_cqe_compress_blks) },
220 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_cqe_compress_pkts) },
221 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_congst_umr) },
222 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_arfs_err) },
223 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_xmit) },
224 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_mpwqe) },
225 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_inlnw) },
226 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_full) },
227 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_err) },
228 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_cqes) },
231 #define NUM_SW_COUNTERS ARRAY_SIZE(sw_stats_desc)
233 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(sw)
235 return NUM_SW_COUNTERS;
238 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(sw)
242 for (i = 0; i < NUM_SW_COUNTERS; i++)
243 strcpy(data + (idx++) * ETH_GSTRING_LEN, sw_stats_desc[i].format);
247 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw)
251 for (i = 0; i < NUM_SW_COUNTERS; i++)
252 data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.sw, sw_stats_desc, i);
256 static void mlx5e_stats_grp_sw_update_stats_xdp_red(struct mlx5e_sw_stats *s,
257 struct mlx5e_xdpsq_stats *xdpsq_red_stats)
259 s->tx_xdp_xmit += xdpsq_red_stats->xmit;
260 s->tx_xdp_mpwqe += xdpsq_red_stats->mpwqe;
261 s->tx_xdp_inlnw += xdpsq_red_stats->inlnw;
262 s->tx_xdp_nops += xdpsq_red_stats->nops;
263 s->tx_xdp_full += xdpsq_red_stats->full;
264 s->tx_xdp_err += xdpsq_red_stats->err;
265 s->tx_xdp_cqes += xdpsq_red_stats->cqes;
268 static void mlx5e_stats_grp_sw_update_stats_xdpsq(struct mlx5e_sw_stats *s,
269 struct mlx5e_xdpsq_stats *xdpsq_stats)
271 s->rx_xdp_tx_xmit += xdpsq_stats->xmit;
272 s->rx_xdp_tx_mpwqe += xdpsq_stats->mpwqe;
273 s->rx_xdp_tx_inlnw += xdpsq_stats->inlnw;
274 s->rx_xdp_tx_nops += xdpsq_stats->nops;
275 s->rx_xdp_tx_full += xdpsq_stats->full;
276 s->rx_xdp_tx_err += xdpsq_stats->err;
277 s->rx_xdp_tx_cqe += xdpsq_stats->cqes;
280 static void mlx5e_stats_grp_sw_update_stats_xsksq(struct mlx5e_sw_stats *s,
281 struct mlx5e_xdpsq_stats *xsksq_stats)
283 s->tx_xsk_xmit += xsksq_stats->xmit;
284 s->tx_xsk_mpwqe += xsksq_stats->mpwqe;
285 s->tx_xsk_inlnw += xsksq_stats->inlnw;
286 s->tx_xsk_full += xsksq_stats->full;
287 s->tx_xsk_err += xsksq_stats->err;
288 s->tx_xsk_cqes += xsksq_stats->cqes;
291 static void mlx5e_stats_grp_sw_update_stats_xskrq(struct mlx5e_sw_stats *s,
292 struct mlx5e_rq_stats *xskrq_stats)
294 s->rx_xsk_packets += xskrq_stats->packets;
295 s->rx_xsk_bytes += xskrq_stats->bytes;
296 s->rx_xsk_csum_complete += xskrq_stats->csum_complete;
297 s->rx_xsk_csum_unnecessary += xskrq_stats->csum_unnecessary;
298 s->rx_xsk_csum_unnecessary_inner += xskrq_stats->csum_unnecessary_inner;
299 s->rx_xsk_csum_none += xskrq_stats->csum_none;
300 s->rx_xsk_ecn_mark += xskrq_stats->ecn_mark;
301 s->rx_xsk_removed_vlan_packets += xskrq_stats->removed_vlan_packets;
302 s->rx_xsk_xdp_drop += xskrq_stats->xdp_drop;
303 s->rx_xsk_xdp_redirect += xskrq_stats->xdp_redirect;
304 s->rx_xsk_wqe_err += xskrq_stats->wqe_err;
305 s->rx_xsk_mpwqe_filler_cqes += xskrq_stats->mpwqe_filler_cqes;
306 s->rx_xsk_mpwqe_filler_strides += xskrq_stats->mpwqe_filler_strides;
307 s->rx_xsk_oversize_pkts_sw_drop += xskrq_stats->oversize_pkts_sw_drop;
308 s->rx_xsk_buff_alloc_err += xskrq_stats->buff_alloc_err;
309 s->rx_xsk_cqe_compress_blks += xskrq_stats->cqe_compress_blks;
310 s->rx_xsk_cqe_compress_pkts += xskrq_stats->cqe_compress_pkts;
311 s->rx_xsk_congst_umr += xskrq_stats->congst_umr;
312 s->rx_xsk_arfs_err += xskrq_stats->arfs_err;
315 static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s,
316 struct mlx5e_rq_stats *rq_stats)
318 s->rx_packets += rq_stats->packets;
319 s->rx_bytes += rq_stats->bytes;
320 s->rx_lro_packets += rq_stats->lro_packets;
321 s->rx_lro_bytes += rq_stats->lro_bytes;
322 s->rx_gro_packets += rq_stats->gro_packets;
323 s->rx_gro_bytes += rq_stats->gro_bytes;
324 s->rx_gro_skbs += rq_stats->gro_skbs;
325 s->rx_gro_match_packets += rq_stats->gro_match_packets;
326 s->rx_gro_large_hds += rq_stats->gro_large_hds;
327 s->rx_ecn_mark += rq_stats->ecn_mark;
328 s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets;
329 s->rx_csum_none += rq_stats->csum_none;
330 s->rx_csum_complete += rq_stats->csum_complete;
331 s->rx_csum_complete_tail += rq_stats->csum_complete_tail;
332 s->rx_csum_complete_tail_slow += rq_stats->csum_complete_tail_slow;
333 s->rx_csum_unnecessary += rq_stats->csum_unnecessary;
334 s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
335 s->rx_xdp_drop += rq_stats->xdp_drop;
336 s->rx_xdp_redirect += rq_stats->xdp_redirect;
337 s->rx_wqe_err += rq_stats->wqe_err;
338 s->rx_mpwqe_filler_cqes += rq_stats->mpwqe_filler_cqes;
339 s->rx_mpwqe_filler_strides += rq_stats->mpwqe_filler_strides;
340 s->rx_oversize_pkts_sw_drop += rq_stats->oversize_pkts_sw_drop;
341 s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
342 s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
343 s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
344 s->rx_cache_reuse += rq_stats->cache_reuse;
345 s->rx_cache_full += rq_stats->cache_full;
346 s->rx_cache_empty += rq_stats->cache_empty;
347 s->rx_cache_busy += rq_stats->cache_busy;
348 s->rx_cache_waive += rq_stats->cache_waive;
349 s->rx_congst_umr += rq_stats->congst_umr;
350 s->rx_arfs_err += rq_stats->arfs_err;
351 s->rx_recover += rq_stats->recover;
352 #ifdef CONFIG_MLX5_EN_TLS
353 s->rx_tls_decrypted_packets += rq_stats->tls_decrypted_packets;
354 s->rx_tls_decrypted_bytes += rq_stats->tls_decrypted_bytes;
355 s->rx_tls_resync_req_pkt += rq_stats->tls_resync_req_pkt;
356 s->rx_tls_resync_req_start += rq_stats->tls_resync_req_start;
357 s->rx_tls_resync_req_end += rq_stats->tls_resync_req_end;
358 s->rx_tls_resync_req_skip += rq_stats->tls_resync_req_skip;
359 s->rx_tls_resync_res_ok += rq_stats->tls_resync_res_ok;
360 s->rx_tls_resync_res_retry += rq_stats->tls_resync_res_retry;
361 s->rx_tls_resync_res_skip += rq_stats->tls_resync_res_skip;
362 s->rx_tls_err += rq_stats->tls_err;
366 static void mlx5e_stats_grp_sw_update_stats_ch_stats(struct mlx5e_sw_stats *s,
367 struct mlx5e_ch_stats *ch_stats)
369 s->ch_events += ch_stats->events;
370 s->ch_poll += ch_stats->poll;
371 s->ch_arm += ch_stats->arm;
372 s->ch_aff_change += ch_stats->aff_change;
373 s->ch_force_irq += ch_stats->force_irq;
374 s->ch_eq_rearm += ch_stats->eq_rearm;
377 static void mlx5e_stats_grp_sw_update_stats_sq(struct mlx5e_sw_stats *s,
378 struct mlx5e_sq_stats *sq_stats)
380 s->tx_packets += sq_stats->packets;
381 s->tx_bytes += sq_stats->bytes;
382 s->tx_tso_packets += sq_stats->tso_packets;
383 s->tx_tso_bytes += sq_stats->tso_bytes;
384 s->tx_tso_inner_packets += sq_stats->tso_inner_packets;
385 s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes;
386 s->tx_added_vlan_packets += sq_stats->added_vlan_packets;
387 s->tx_nop += sq_stats->nop;
388 s->tx_mpwqe_blks += sq_stats->mpwqe_blks;
389 s->tx_mpwqe_pkts += sq_stats->mpwqe_pkts;
390 s->tx_queue_stopped += sq_stats->stopped;
391 s->tx_queue_wake += sq_stats->wake;
392 s->tx_queue_dropped += sq_stats->dropped;
393 s->tx_cqe_err += sq_stats->cqe_err;
394 s->tx_recover += sq_stats->recover;
395 s->tx_xmit_more += sq_stats->xmit_more;
396 s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
397 s->tx_csum_none += sq_stats->csum_none;
398 s->tx_csum_partial += sq_stats->csum_partial;
399 #ifdef CONFIG_MLX5_EN_TLS
400 s->tx_tls_encrypted_packets += sq_stats->tls_encrypted_packets;
401 s->tx_tls_encrypted_bytes += sq_stats->tls_encrypted_bytes;
402 s->tx_tls_ooo += sq_stats->tls_ooo;
403 s->tx_tls_dump_bytes += sq_stats->tls_dump_bytes;
404 s->tx_tls_dump_packets += sq_stats->tls_dump_packets;
405 s->tx_tls_resync_bytes += sq_stats->tls_resync_bytes;
406 s->tx_tls_skip_no_sync_data += sq_stats->tls_skip_no_sync_data;
407 s->tx_tls_drop_no_sync_data += sq_stats->tls_drop_no_sync_data;
408 s->tx_tls_drop_bypass_req += sq_stats->tls_drop_bypass_req;
410 s->tx_cqes += sq_stats->cqes;
413 static void mlx5e_stats_grp_sw_update_stats_ptp(struct mlx5e_priv *priv,
414 struct mlx5e_sw_stats *s)
418 if (!priv->tx_ptp_opened && !priv->rx_ptp_opened)
421 mlx5e_stats_grp_sw_update_stats_ch_stats(s, &priv->ptp_stats.ch);
423 if (priv->tx_ptp_opened) {
424 for (i = 0; i < priv->max_opened_tc; i++) {
425 mlx5e_stats_grp_sw_update_stats_sq(s, &priv->ptp_stats.sq[i]);
427 /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
431 if (priv->rx_ptp_opened) {
432 mlx5e_stats_grp_sw_update_stats_rq_stats(s, &priv->ptp_stats.rq);
434 /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
439 static void mlx5e_stats_grp_sw_update_stats_qos(struct mlx5e_priv *priv,
440 struct mlx5e_sw_stats *s)
442 struct mlx5e_sq_stats **stats;
446 /* Pairs with smp_store_release in mlx5e_open_qos_sq. */
447 max_qos_sqs = smp_load_acquire(&priv->htb.max_qos_sqs);
448 stats = READ_ONCE(priv->htb.qos_sq_stats);
450 for (i = 0; i < max_qos_sqs; i++) {
451 mlx5e_stats_grp_sw_update_stats_sq(s, READ_ONCE(stats[i]));
453 /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
458 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)
460 struct mlx5e_sw_stats *s = &priv->stats.sw;
463 memset(s, 0, sizeof(*s));
465 for (i = 0; i < priv->stats_nch; i++) {
466 struct mlx5e_channel_stats *channel_stats =
467 priv->channel_stats[i];
470 mlx5e_stats_grp_sw_update_stats_rq_stats(s, &channel_stats->rq);
471 mlx5e_stats_grp_sw_update_stats_xdpsq(s, &channel_stats->rq_xdpsq);
472 mlx5e_stats_grp_sw_update_stats_ch_stats(s, &channel_stats->ch);
474 mlx5e_stats_grp_sw_update_stats_xdp_red(s, &channel_stats->xdpsq);
475 /* AF_XDP zero-copy */
476 mlx5e_stats_grp_sw_update_stats_xskrq(s, &channel_stats->xskrq);
477 mlx5e_stats_grp_sw_update_stats_xsksq(s, &channel_stats->xsksq);
479 for (j = 0; j < priv->max_opened_tc; j++) {
480 mlx5e_stats_grp_sw_update_stats_sq(s, &channel_stats->sq[j]);
482 /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
486 mlx5e_stats_grp_sw_update_stats_ptp(priv, s);
487 mlx5e_stats_grp_sw_update_stats_qos(priv, s);
490 static const struct counter_desc q_stats_desc[] = {
491 { MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_out_of_buffer) },
494 static const struct counter_desc drop_rq_stats_desc[] = {
495 { MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_if_down_packets) },
498 #define NUM_Q_COUNTERS ARRAY_SIZE(q_stats_desc)
499 #define NUM_DROP_RQ_COUNTERS ARRAY_SIZE(drop_rq_stats_desc)
501 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qcnt)
506 num_stats += NUM_Q_COUNTERS;
508 if (priv->drop_rq_q_counter)
509 num_stats += NUM_DROP_RQ_COUNTERS;
514 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qcnt)
518 for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++)
519 strcpy(data + (idx++) * ETH_GSTRING_LEN,
520 q_stats_desc[i].format);
522 for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++)
523 strcpy(data + (idx++) * ETH_GSTRING_LEN,
524 drop_rq_stats_desc[i].format);
529 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qcnt)
533 for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++)
534 data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
536 for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++)
537 data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
538 drop_rq_stats_desc, i);
542 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qcnt)
544 struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt;
545 u32 out[MLX5_ST_SZ_DW(query_q_counter_out)] = {};
546 u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {};
549 MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
551 if (priv->q_counter) {
552 MLX5_SET(query_q_counter_in, in, counter_set_id,
554 ret = mlx5_cmd_exec_inout(priv->mdev, query_q_counter, in, out);
556 qcnt->rx_out_of_buffer = MLX5_GET(query_q_counter_out,
560 if (priv->drop_rq_q_counter) {
561 MLX5_SET(query_q_counter_in, in, counter_set_id,
562 priv->drop_rq_q_counter);
563 ret = mlx5_cmd_exec_inout(priv->mdev, query_q_counter, in, out);
565 qcnt->rx_if_down_packets = MLX5_GET(query_q_counter_out,
570 #define VNIC_ENV_OFF(c) MLX5_BYTE_OFF(query_vnic_env_out, c)
571 static const struct counter_desc vnic_env_stats_steer_desc[] = {
572 { "rx_steer_missed_packets",
573 VNIC_ENV_OFF(vport_env.nic_receive_steering_discard) },
576 static const struct counter_desc vnic_env_stats_dev_oob_desc[] = {
577 { "dev_internal_queue_oob",
578 VNIC_ENV_OFF(vport_env.internal_rq_out_of_buffer) },
581 #define NUM_VNIC_ENV_STEER_COUNTERS(dev) \
582 (MLX5_CAP_GEN(dev, nic_receive_steering_discard) ? \
583 ARRAY_SIZE(vnic_env_stats_steer_desc) : 0)
584 #define NUM_VNIC_ENV_DEV_OOB_COUNTERS(dev) \
585 (MLX5_CAP_GEN(dev, vnic_env_int_rq_oob) ? \
586 ARRAY_SIZE(vnic_env_stats_dev_oob_desc) : 0)
588 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vnic_env)
590 return NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev) +
591 NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev);
594 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vnic_env)
598 for (i = 0; i < NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev); i++)
599 strcpy(data + (idx++) * ETH_GSTRING_LEN,
600 vnic_env_stats_steer_desc[i].format);
602 for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++)
603 strcpy(data + (idx++) * ETH_GSTRING_LEN,
604 vnic_env_stats_dev_oob_desc[i].format);
608 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vnic_env)
612 for (i = 0; i < NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev); i++)
613 data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vnic.query_vnic_env_out,
614 vnic_env_stats_steer_desc, i);
616 for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++)
617 data[idx++] = MLX5E_READ_CTR32_BE(priv->stats.vnic.query_vnic_env_out,
618 vnic_env_stats_dev_oob_desc, i);
622 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vnic_env)
624 u32 *out = (u32 *)priv->stats.vnic.query_vnic_env_out;
625 u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {};
626 struct mlx5_core_dev *mdev = priv->mdev;
628 if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard))
631 MLX5_SET(query_vnic_env_in, in, opcode, MLX5_CMD_OP_QUERY_VNIC_ENV);
632 mlx5_cmd_exec_inout(mdev, query_vnic_env, in, out);
635 #define VPORT_COUNTER_OFF(c) MLX5_BYTE_OFF(query_vport_counter_out, c)
636 static const struct counter_desc vport_stats_desc[] = {
637 { "rx_vport_unicast_packets",
638 VPORT_COUNTER_OFF(received_eth_unicast.packets) },
639 { "rx_vport_unicast_bytes",
640 VPORT_COUNTER_OFF(received_eth_unicast.octets) },
641 { "tx_vport_unicast_packets",
642 VPORT_COUNTER_OFF(transmitted_eth_unicast.packets) },
643 { "tx_vport_unicast_bytes",
644 VPORT_COUNTER_OFF(transmitted_eth_unicast.octets) },
645 { "rx_vport_multicast_packets",
646 VPORT_COUNTER_OFF(received_eth_multicast.packets) },
647 { "rx_vport_multicast_bytes",
648 VPORT_COUNTER_OFF(received_eth_multicast.octets) },
649 { "tx_vport_multicast_packets",
650 VPORT_COUNTER_OFF(transmitted_eth_multicast.packets) },
651 { "tx_vport_multicast_bytes",
652 VPORT_COUNTER_OFF(transmitted_eth_multicast.octets) },
653 { "rx_vport_broadcast_packets",
654 VPORT_COUNTER_OFF(received_eth_broadcast.packets) },
655 { "rx_vport_broadcast_bytes",
656 VPORT_COUNTER_OFF(received_eth_broadcast.octets) },
657 { "tx_vport_broadcast_packets",
658 VPORT_COUNTER_OFF(transmitted_eth_broadcast.packets) },
659 { "tx_vport_broadcast_bytes",
660 VPORT_COUNTER_OFF(transmitted_eth_broadcast.octets) },
661 { "rx_vport_rdma_unicast_packets",
662 VPORT_COUNTER_OFF(received_ib_unicast.packets) },
663 { "rx_vport_rdma_unicast_bytes",
664 VPORT_COUNTER_OFF(received_ib_unicast.octets) },
665 { "tx_vport_rdma_unicast_packets",
666 VPORT_COUNTER_OFF(transmitted_ib_unicast.packets) },
667 { "tx_vport_rdma_unicast_bytes",
668 VPORT_COUNTER_OFF(transmitted_ib_unicast.octets) },
669 { "rx_vport_rdma_multicast_packets",
670 VPORT_COUNTER_OFF(received_ib_multicast.packets) },
671 { "rx_vport_rdma_multicast_bytes",
672 VPORT_COUNTER_OFF(received_ib_multicast.octets) },
673 { "tx_vport_rdma_multicast_packets",
674 VPORT_COUNTER_OFF(transmitted_ib_multicast.packets) },
675 { "tx_vport_rdma_multicast_bytes",
676 VPORT_COUNTER_OFF(transmitted_ib_multicast.octets) },
679 #define NUM_VPORT_COUNTERS ARRAY_SIZE(vport_stats_desc)
681 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vport)
683 return NUM_VPORT_COUNTERS;
686 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport)
690 for (i = 0; i < NUM_VPORT_COUNTERS; i++)
691 strcpy(data + (idx++) * ETH_GSTRING_LEN, vport_stats_desc[i].format);
695 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport)
699 for (i = 0; i < NUM_VPORT_COUNTERS; i++)
700 data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out,
701 vport_stats_desc, i);
705 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport)
707 u32 *out = (u32 *)priv->stats.vport.query_vport_out;
708 u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {};
709 struct mlx5_core_dev *mdev = priv->mdev;
711 MLX5_SET(query_vport_counter_in, in, opcode, MLX5_CMD_OP_QUERY_VPORT_COUNTER);
712 mlx5_cmd_exec_inout(mdev, query_vport_counter, in, out);
715 #define PPORT_802_3_OFF(c) \
716 MLX5_BYTE_OFF(ppcnt_reg, \
717 counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)
718 static const struct counter_desc pport_802_3_stats_desc[] = {
719 { "tx_packets_phy", PPORT_802_3_OFF(a_frames_transmitted_ok) },
720 { "rx_packets_phy", PPORT_802_3_OFF(a_frames_received_ok) },
721 { "rx_crc_errors_phy", PPORT_802_3_OFF(a_frame_check_sequence_errors) },
722 { "tx_bytes_phy", PPORT_802_3_OFF(a_octets_transmitted_ok) },
723 { "rx_bytes_phy", PPORT_802_3_OFF(a_octets_received_ok) },
724 { "tx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_xmitted_ok) },
725 { "tx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_xmitted_ok) },
726 { "rx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_received_ok) },
727 { "rx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_received_ok) },
728 { "rx_in_range_len_errors_phy", PPORT_802_3_OFF(a_in_range_length_errors) },
729 { "rx_out_of_range_len_phy", PPORT_802_3_OFF(a_out_of_range_length_field) },
730 { "rx_oversize_pkts_phy", PPORT_802_3_OFF(a_frame_too_long_errors) },
731 { "rx_symbol_err_phy", PPORT_802_3_OFF(a_symbol_error_during_carrier) },
732 { "tx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_transmitted) },
733 { "rx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_received) },
734 { "rx_unsupported_op_phy", PPORT_802_3_OFF(a_unsupported_opcodes_received) },
735 { "rx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_received) },
736 { "tx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_transmitted) },
739 #define NUM_PPORT_802_3_COUNTERS ARRAY_SIZE(pport_802_3_stats_desc)
741 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(802_3)
743 return NUM_PPORT_802_3_COUNTERS;
746 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(802_3)
750 for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
751 strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_802_3_stats_desc[i].format);
755 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(802_3)
759 for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
760 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.IEEE_802_3_counters,
761 pport_802_3_stats_desc, i);
765 #define MLX5_BASIC_PPCNT_SUPPORTED(mdev) \
766 (MLX5_CAP_GEN(mdev, pcam_reg) ? MLX5_CAP_PCAM_REG(mdev, ppcnt) : 1)
768 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(802_3)
770 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
771 struct mlx5_core_dev *mdev = priv->mdev;
772 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
773 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
776 if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
779 MLX5_SET(ppcnt_reg, in, local_port, 1);
780 out = pstats->IEEE_802_3_counters;
781 MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
782 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
785 #define MLX5E_READ_CTR64_BE_F(ptr, set, c) \
786 be64_to_cpu(*(__be64 *)((char *)ptr + \
787 MLX5_BYTE_OFF(ppcnt_reg, \
788 counter_set.set.c##_high)))
790 static int mlx5e_stats_get_ieee(struct mlx5_core_dev *mdev,
791 u32 *ppcnt_ieee_802_3)
793 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
794 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
796 if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
799 MLX5_SET(ppcnt_reg, in, local_port, 1);
800 MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
801 return mlx5_core_access_reg(mdev, in, sz, ppcnt_ieee_802_3,
802 sz, MLX5_REG_PPCNT, 0, 0);
805 void mlx5e_stats_pause_get(struct mlx5e_priv *priv,
806 struct ethtool_pause_stats *pause_stats)
808 u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)];
809 struct mlx5_core_dev *mdev = priv->mdev;
811 if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3))
814 pause_stats->tx_pause_frames =
815 MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
816 eth_802_3_cntrs_grp_data_layout,
817 a_pause_mac_ctrl_frames_transmitted);
818 pause_stats->rx_pause_frames =
819 MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
820 eth_802_3_cntrs_grp_data_layout,
821 a_pause_mac_ctrl_frames_received);
824 void mlx5e_stats_eth_phy_get(struct mlx5e_priv *priv,
825 struct ethtool_eth_phy_stats *phy_stats)
827 u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)];
828 struct mlx5_core_dev *mdev = priv->mdev;
830 if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3))
833 phy_stats->SymbolErrorDuringCarrier =
834 MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
835 eth_802_3_cntrs_grp_data_layout,
836 a_symbol_error_during_carrier);
839 void mlx5e_stats_eth_mac_get(struct mlx5e_priv *priv,
840 struct ethtool_eth_mac_stats *mac_stats)
842 u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)];
843 struct mlx5_core_dev *mdev = priv->mdev;
845 if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3))
849 MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3, \
850 eth_802_3_cntrs_grp_data_layout, \
853 mac_stats->FramesTransmittedOK = RD(a_frames_transmitted_ok);
854 mac_stats->FramesReceivedOK = RD(a_frames_received_ok);
855 mac_stats->FrameCheckSequenceErrors = RD(a_frame_check_sequence_errors);
856 mac_stats->OctetsTransmittedOK = RD(a_octets_transmitted_ok);
857 mac_stats->OctetsReceivedOK = RD(a_octets_received_ok);
858 mac_stats->MulticastFramesXmittedOK = RD(a_multicast_frames_xmitted_ok);
859 mac_stats->BroadcastFramesXmittedOK = RD(a_broadcast_frames_xmitted_ok);
860 mac_stats->MulticastFramesReceivedOK = RD(a_multicast_frames_received_ok);
861 mac_stats->BroadcastFramesReceivedOK = RD(a_broadcast_frames_received_ok);
862 mac_stats->InRangeLengthErrors = RD(a_in_range_length_errors);
863 mac_stats->OutOfRangeLengthField = RD(a_out_of_range_length_field);
864 mac_stats->FrameTooLongErrors = RD(a_frame_too_long_errors);
868 void mlx5e_stats_eth_ctrl_get(struct mlx5e_priv *priv,
869 struct ethtool_eth_ctrl_stats *ctrl_stats)
871 u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)];
872 struct mlx5_core_dev *mdev = priv->mdev;
874 if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3))
877 ctrl_stats->MACControlFramesTransmitted =
878 MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
879 eth_802_3_cntrs_grp_data_layout,
880 a_mac_control_frames_transmitted);
881 ctrl_stats->MACControlFramesReceived =
882 MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
883 eth_802_3_cntrs_grp_data_layout,
884 a_mac_control_frames_received);
885 ctrl_stats->UnsupportedOpcodesReceived =
886 MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
887 eth_802_3_cntrs_grp_data_layout,
888 a_unsupported_opcodes_received);
891 #define PPORT_2863_OFF(c) \
892 MLX5_BYTE_OFF(ppcnt_reg, \
893 counter_set.eth_2863_cntrs_grp_data_layout.c##_high)
894 static const struct counter_desc pport_2863_stats_desc[] = {
895 { "rx_discards_phy", PPORT_2863_OFF(if_in_discards) },
896 { "tx_discards_phy", PPORT_2863_OFF(if_out_discards) },
897 { "tx_errors_phy", PPORT_2863_OFF(if_out_errors) },
900 #define NUM_PPORT_2863_COUNTERS ARRAY_SIZE(pport_2863_stats_desc)
902 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(2863)
904 return NUM_PPORT_2863_COUNTERS;
907 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(2863)
911 for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
912 strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2863_stats_desc[i].format);
916 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(2863)
920 for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
921 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2863_counters,
922 pport_2863_stats_desc, i);
926 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(2863)
928 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
929 struct mlx5_core_dev *mdev = priv->mdev;
930 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
931 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
934 MLX5_SET(ppcnt_reg, in, local_port, 1);
935 out = pstats->RFC_2863_counters;
936 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
937 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
940 #define PPORT_2819_OFF(c) \
941 MLX5_BYTE_OFF(ppcnt_reg, \
942 counter_set.eth_2819_cntrs_grp_data_layout.c##_high)
943 static const struct counter_desc pport_2819_stats_desc[] = {
944 { "rx_undersize_pkts_phy", PPORT_2819_OFF(ether_stats_undersize_pkts) },
945 { "rx_fragments_phy", PPORT_2819_OFF(ether_stats_fragments) },
946 { "rx_jabbers_phy", PPORT_2819_OFF(ether_stats_jabbers) },
947 { "rx_64_bytes_phy", PPORT_2819_OFF(ether_stats_pkts64octets) },
948 { "rx_65_to_127_bytes_phy", PPORT_2819_OFF(ether_stats_pkts65to127octets) },
949 { "rx_128_to_255_bytes_phy", PPORT_2819_OFF(ether_stats_pkts128to255octets) },
950 { "rx_256_to_511_bytes_phy", PPORT_2819_OFF(ether_stats_pkts256to511octets) },
951 { "rx_512_to_1023_bytes_phy", PPORT_2819_OFF(ether_stats_pkts512to1023octets) },
952 { "rx_1024_to_1518_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1024to1518octets) },
953 { "rx_1519_to_2047_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1519to2047octets) },
954 { "rx_2048_to_4095_bytes_phy", PPORT_2819_OFF(ether_stats_pkts2048to4095octets) },
955 { "rx_4096_to_8191_bytes_phy", PPORT_2819_OFF(ether_stats_pkts4096to8191octets) },
956 { "rx_8192_to_10239_bytes_phy", PPORT_2819_OFF(ether_stats_pkts8192to10239octets) },
959 #define NUM_PPORT_2819_COUNTERS ARRAY_SIZE(pport_2819_stats_desc)
961 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(2819)
963 return NUM_PPORT_2819_COUNTERS;
966 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(2819)
970 for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
971 strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2819_stats_desc[i].format);
975 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(2819)
979 for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
980 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2819_counters,
981 pport_2819_stats_desc, i);
985 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(2819)
987 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
988 struct mlx5_core_dev *mdev = priv->mdev;
989 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
990 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
993 if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
996 MLX5_SET(ppcnt_reg, in, local_port, 1);
997 out = pstats->RFC_2819_counters;
998 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
999 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1002 static const struct ethtool_rmon_hist_range mlx5e_rmon_ranges[] = {
1016 void mlx5e_stats_rmon_get(struct mlx5e_priv *priv,
1017 struct ethtool_rmon_stats *rmon,
1018 const struct ethtool_rmon_hist_range **ranges)
1020 u32 ppcnt_RFC_2819_counters[MLX5_ST_SZ_DW(ppcnt_reg)];
1021 struct mlx5_core_dev *mdev = priv->mdev;
1022 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1023 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1025 MLX5_SET(ppcnt_reg, in, local_port, 1);
1026 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
1027 if (mlx5_core_access_reg(mdev, in, sz, ppcnt_RFC_2819_counters,
1028 sz, MLX5_REG_PPCNT, 0, 0))
1032 MLX5E_READ_CTR64_BE_F(ppcnt_RFC_2819_counters, \
1033 eth_2819_cntrs_grp_data_layout, \
1036 rmon->undersize_pkts = RD(ether_stats_undersize_pkts);
1037 rmon->fragments = RD(ether_stats_fragments);
1038 rmon->jabbers = RD(ether_stats_jabbers);
1040 rmon->hist[0] = RD(ether_stats_pkts64octets);
1041 rmon->hist[1] = RD(ether_stats_pkts65to127octets);
1042 rmon->hist[2] = RD(ether_stats_pkts128to255octets);
1043 rmon->hist[3] = RD(ether_stats_pkts256to511octets);
1044 rmon->hist[4] = RD(ether_stats_pkts512to1023octets);
1045 rmon->hist[5] = RD(ether_stats_pkts1024to1518octets);
1046 rmon->hist[6] = RD(ether_stats_pkts1519to2047octets);
1047 rmon->hist[7] = RD(ether_stats_pkts2048to4095octets);
1048 rmon->hist[8] = RD(ether_stats_pkts4096to8191octets);
1049 rmon->hist[9] = RD(ether_stats_pkts8192to10239octets);
1052 *ranges = mlx5e_rmon_ranges;
1055 #define PPORT_PHY_STATISTICAL_OFF(c) \
1056 MLX5_BYTE_OFF(ppcnt_reg, \
1057 counter_set.phys_layer_statistical_cntrs.c##_high)
1058 static const struct counter_desc pport_phy_statistical_stats_desc[] = {
1059 { "rx_pcs_symbol_err_phy", PPORT_PHY_STATISTICAL_OFF(phy_symbol_errors) },
1060 { "rx_corrected_bits_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits) },
1063 static const struct counter_desc
1064 pport_phy_statistical_err_lanes_stats_desc[] = {
1065 { "rx_err_lane_0_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane0) },
1066 { "rx_err_lane_1_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane1) },
1067 { "rx_err_lane_2_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane2) },
1068 { "rx_err_lane_3_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane3) },
1071 #define NUM_PPORT_PHY_STATISTICAL_COUNTERS \
1072 ARRAY_SIZE(pport_phy_statistical_stats_desc)
1073 #define NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS \
1074 ARRAY_SIZE(pport_phy_statistical_err_lanes_stats_desc)
1076 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(phy)
1078 struct mlx5_core_dev *mdev = priv->mdev;
1081 /* "1" for link_down_events special counter */
1084 num_stats += MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group) ?
1085 NUM_PPORT_PHY_STATISTICAL_COUNTERS : 0;
1087 num_stats += MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters) ?
1088 NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS : 0;
1093 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(phy)
1095 struct mlx5_core_dev *mdev = priv->mdev;
1098 strcpy(data + (idx++) * ETH_GSTRING_LEN, "link_down_events_phy");
1100 if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
1103 for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
1104 strcpy(data + (idx++) * ETH_GSTRING_LEN,
1105 pport_phy_statistical_stats_desc[i].format);
1107 if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters))
1108 for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++)
1109 strcpy(data + (idx++) * ETH_GSTRING_LEN,
1110 pport_phy_statistical_err_lanes_stats_desc[i].format);
1115 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(phy)
1117 struct mlx5_core_dev *mdev = priv->mdev;
1120 /* link_down_events_phy has special handling since it is not stored in __be64 format */
1121 data[idx++] = MLX5_GET(ppcnt_reg, priv->stats.pport.phy_counters,
1122 counter_set.phys_layer_cntrs.link_down_events);
1124 if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
1127 for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
1129 MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
1130 pport_phy_statistical_stats_desc, i);
1132 if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters))
1133 for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++)
1135 MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
1136 pport_phy_statistical_err_lanes_stats_desc,
1141 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(phy)
1143 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1144 struct mlx5_core_dev *mdev = priv->mdev;
1145 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1146 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1149 MLX5_SET(ppcnt_reg, in, local_port, 1);
1150 out = pstats->phy_counters;
1151 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
1152 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1154 if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
1157 out = pstats->phy_statistical_counters;
1158 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
1159 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1162 static int fec_num_lanes(struct mlx5_core_dev *dev)
1164 u32 out[MLX5_ST_SZ_DW(pmlp_reg)] = {};
1165 u32 in[MLX5_ST_SZ_DW(pmlp_reg)] = {};
1168 MLX5_SET(pmlp_reg, in, local_port, 1);
1169 err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out),
1170 MLX5_REG_PMLP, 0, 0);
1174 return MLX5_GET(pmlp_reg, out, width);
1177 static int fec_active_mode(struct mlx5_core_dev *mdev)
1179 unsigned long fec_active_long;
1182 if (mlx5e_get_fec_mode(mdev, &fec_active, NULL))
1183 return MLX5E_FEC_NOFEC;
1185 fec_active_long = fec_active;
1186 return find_first_bit(&fec_active_long, sizeof(unsigned long) * BITS_PER_BYTE);
1189 #define MLX5E_STATS_SET_FEC_BLOCK(idx) ({ \
1190 fec_stats->corrected_blocks.lanes[(idx)] = \
1191 MLX5E_READ_CTR64_BE_F(ppcnt, phys_layer_cntrs, \
1192 fc_fec_corrected_blocks_lane##idx); \
1193 fec_stats->uncorrectable_blocks.lanes[(idx)] = \
1194 MLX5E_READ_CTR64_BE_F(ppcnt, phys_layer_cntrs, \
1195 fc_fec_uncorrectable_blocks_lane##idx); \
1198 static void fec_set_fc_stats(struct ethtool_fec_stats *fec_stats,
1199 u32 *ppcnt, u8 lanes)
1201 if (lanes > 3) { /* 4 lanes */
1202 MLX5E_STATS_SET_FEC_BLOCK(3);
1203 MLX5E_STATS_SET_FEC_BLOCK(2);
1205 if (lanes > 1) /* 2 lanes */
1206 MLX5E_STATS_SET_FEC_BLOCK(1);
1207 if (lanes > 0) /* 1 lane */
1208 MLX5E_STATS_SET_FEC_BLOCK(0);
1211 static void fec_set_rs_stats(struct ethtool_fec_stats *fec_stats, u32 *ppcnt)
1213 fec_stats->corrected_blocks.total =
1214 MLX5E_READ_CTR64_BE_F(ppcnt, phys_layer_cntrs,
1215 rs_fec_corrected_blocks);
1216 fec_stats->uncorrectable_blocks.total =
1217 MLX5E_READ_CTR64_BE_F(ppcnt, phys_layer_cntrs,
1218 rs_fec_uncorrectable_blocks);
1221 static void fec_set_block_stats(struct mlx5e_priv *priv,
1222 struct ethtool_fec_stats *fec_stats)
1224 struct mlx5_core_dev *mdev = priv->mdev;
1225 u32 out[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1226 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1227 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1228 int mode = fec_active_mode(mdev);
1230 if (mode == MLX5E_FEC_NOFEC)
1233 MLX5_SET(ppcnt_reg, in, local_port, 1);
1234 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
1235 if (mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0))
1239 case MLX5E_FEC_RS_528_514:
1240 case MLX5E_FEC_RS_544_514:
1241 case MLX5E_FEC_LLRS_272_257_1:
1242 fec_set_rs_stats(fec_stats, out);
1244 case MLX5E_FEC_FIRECODE:
1245 fec_set_fc_stats(fec_stats, out, fec_num_lanes(mdev));
1249 static void fec_set_corrected_bits_total(struct mlx5e_priv *priv,
1250 struct ethtool_fec_stats *fec_stats)
1252 u32 ppcnt_phy_statistical[MLX5_ST_SZ_DW(ppcnt_reg)];
1253 struct mlx5_core_dev *mdev = priv->mdev;
1254 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1255 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1257 MLX5_SET(ppcnt_reg, in, local_port, 1);
1258 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
1259 if (mlx5_core_access_reg(mdev, in, sz, ppcnt_phy_statistical,
1260 sz, MLX5_REG_PPCNT, 0, 0))
1263 fec_stats->corrected_bits.total =
1264 MLX5E_READ_CTR64_BE_F(ppcnt_phy_statistical,
1265 phys_layer_statistical_cntrs,
1266 phy_corrected_bits);
1269 void mlx5e_stats_fec_get(struct mlx5e_priv *priv,
1270 struct ethtool_fec_stats *fec_stats)
1272 if (!MLX5_CAP_PCAM_FEATURE(priv->mdev, ppcnt_statistical_group))
1275 fec_set_corrected_bits_total(priv, fec_stats);
1276 fec_set_block_stats(priv, fec_stats);
1279 #define PPORT_ETH_EXT_OFF(c) \
1280 MLX5_BYTE_OFF(ppcnt_reg, \
1281 counter_set.eth_extended_cntrs_grp_data_layout.c##_high)
1282 static const struct counter_desc pport_eth_ext_stats_desc[] = {
1283 { "rx_buffer_passed_thres_phy", PPORT_ETH_EXT_OFF(rx_buffer_almost_full) },
1286 #define NUM_PPORT_ETH_EXT_COUNTERS ARRAY_SIZE(pport_eth_ext_stats_desc)
1288 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(eth_ext)
1290 if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
1291 return NUM_PPORT_ETH_EXT_COUNTERS;
1296 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(eth_ext)
1300 if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
1301 for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++)
1302 strcpy(data + (idx++) * ETH_GSTRING_LEN,
1303 pport_eth_ext_stats_desc[i].format);
1307 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(eth_ext)
1311 if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
1312 for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++)
1314 MLX5E_READ_CTR64_BE(&priv->stats.pport.eth_ext_counters,
1315 pport_eth_ext_stats_desc, i);
1319 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(eth_ext)
1321 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1322 struct mlx5_core_dev *mdev = priv->mdev;
1323 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1324 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1327 if (!MLX5_CAP_PCAM_FEATURE(mdev, rx_buffer_fullness_counters))
1330 MLX5_SET(ppcnt_reg, in, local_port, 1);
1331 out = pstats->eth_ext_counters;
1332 MLX5_SET(ppcnt_reg, in, grp, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP);
1333 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1336 #define PCIE_PERF_OFF(c) \
1337 MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c)
1338 static const struct counter_desc pcie_perf_stats_desc[] = {
1339 { "rx_pci_signal_integrity", PCIE_PERF_OFF(rx_errors) },
1340 { "tx_pci_signal_integrity", PCIE_PERF_OFF(tx_errors) },
1343 #define PCIE_PERF_OFF64(c) \
1344 MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c##_high)
1345 static const struct counter_desc pcie_perf_stats_desc64[] = {
1346 { "outbound_pci_buffer_overflow", PCIE_PERF_OFF64(tx_overflow_buffer_pkt) },
1349 static const struct counter_desc pcie_perf_stall_stats_desc[] = {
1350 { "outbound_pci_stalled_rd", PCIE_PERF_OFF(outbound_stalled_reads) },
1351 { "outbound_pci_stalled_wr", PCIE_PERF_OFF(outbound_stalled_writes) },
1352 { "outbound_pci_stalled_rd_events", PCIE_PERF_OFF(outbound_stalled_reads_events) },
1353 { "outbound_pci_stalled_wr_events", PCIE_PERF_OFF(outbound_stalled_writes_events) },
1356 #define NUM_PCIE_PERF_COUNTERS ARRAY_SIZE(pcie_perf_stats_desc)
1357 #define NUM_PCIE_PERF_COUNTERS64 ARRAY_SIZE(pcie_perf_stats_desc64)
1358 #define NUM_PCIE_PERF_STALL_COUNTERS ARRAY_SIZE(pcie_perf_stall_stats_desc)
1360 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pcie)
1364 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
1365 num_stats += NUM_PCIE_PERF_COUNTERS;
1367 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
1368 num_stats += NUM_PCIE_PERF_COUNTERS64;
1370 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
1371 num_stats += NUM_PCIE_PERF_STALL_COUNTERS;
1376 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pcie)
1380 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
1381 for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
1382 strcpy(data + (idx++) * ETH_GSTRING_LEN,
1383 pcie_perf_stats_desc[i].format);
1385 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
1386 for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++)
1387 strcpy(data + (idx++) * ETH_GSTRING_LEN,
1388 pcie_perf_stats_desc64[i].format);
1390 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
1391 for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++)
1392 strcpy(data + (idx++) * ETH_GSTRING_LEN,
1393 pcie_perf_stall_stats_desc[i].format);
1397 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pcie)
1401 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
1402 for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
1404 MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
1405 pcie_perf_stats_desc, i);
1407 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
1408 for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++)
1410 MLX5E_READ_CTR64_BE(&priv->stats.pcie.pcie_perf_counters,
1411 pcie_perf_stats_desc64, i);
1413 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
1414 for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++)
1416 MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
1417 pcie_perf_stall_stats_desc, i);
1421 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pcie)
1423 struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie;
1424 struct mlx5_core_dev *mdev = priv->mdev;
1425 u32 in[MLX5_ST_SZ_DW(mpcnt_reg)] = {0};
1426 int sz = MLX5_ST_SZ_BYTES(mpcnt_reg);
1429 if (!MLX5_CAP_MCAM_FEATURE(mdev, pcie_performance_group))
1432 out = pcie_stats->pcie_perf_counters;
1433 MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP);
1434 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
1437 #define PPORT_PER_TC_PRIO_OFF(c) \
1438 MLX5_BYTE_OFF(ppcnt_reg, \
1439 counter_set.eth_per_tc_prio_grp_data_layout.c##_high)
1441 static const struct counter_desc pport_per_tc_prio_stats_desc[] = {
1442 { "rx_prio%d_buf_discard", PPORT_PER_TC_PRIO_OFF(no_buffer_discard_uc) },
1445 #define NUM_PPORT_PER_TC_PRIO_COUNTERS ARRAY_SIZE(pport_per_tc_prio_stats_desc)
1447 #define PPORT_PER_TC_CONGEST_PRIO_OFF(c) \
1448 MLX5_BYTE_OFF(ppcnt_reg, \
1449 counter_set.eth_per_tc_congest_prio_grp_data_layout.c##_high)
1451 static const struct counter_desc pport_per_tc_congest_prio_stats_desc[] = {
1452 { "rx_prio%d_cong_discard", PPORT_PER_TC_CONGEST_PRIO_OFF(wred_discard) },
1453 { "rx_prio%d_marked", PPORT_PER_TC_CONGEST_PRIO_OFF(ecn_marked_tc) },
1456 #define NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS \
1457 ARRAY_SIZE(pport_per_tc_congest_prio_stats_desc)
1459 static int mlx5e_grp_per_tc_prio_get_num_stats(struct mlx5e_priv *priv)
1461 struct mlx5_core_dev *mdev = priv->mdev;
1463 if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1466 return NUM_PPORT_PER_TC_PRIO_COUNTERS * NUM_PPORT_PRIO;
1469 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_port_buff_congest)
1471 struct mlx5_core_dev *mdev = priv->mdev;
1474 if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1477 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1478 for (i = 0; i < NUM_PPORT_PER_TC_PRIO_COUNTERS; i++)
1479 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1480 pport_per_tc_prio_stats_desc[i].format, prio);
1481 for (i = 0; i < NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS; i++)
1482 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1483 pport_per_tc_congest_prio_stats_desc[i].format, prio);
1489 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_port_buff_congest)
1491 struct mlx5e_pport_stats *pport = &priv->stats.pport;
1492 struct mlx5_core_dev *mdev = priv->mdev;
1495 if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1498 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1499 for (i = 0; i < NUM_PPORT_PER_TC_PRIO_COUNTERS; i++)
1501 MLX5E_READ_CTR64_BE(&pport->per_tc_prio_counters[prio],
1502 pport_per_tc_prio_stats_desc, i);
1503 for (i = 0; i < NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS ; i++)
1505 MLX5E_READ_CTR64_BE(&pport->per_tc_congest_prio_counters[prio],
1506 pport_per_tc_congest_prio_stats_desc, i);
1512 static void mlx5e_grp_per_tc_prio_update_stats(struct mlx5e_priv *priv)
1514 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1515 struct mlx5_core_dev *mdev = priv->mdev;
1516 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1517 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1521 if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1524 MLX5_SET(ppcnt_reg, in, pnat, 2);
1525 MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP);
1526 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1527 out = pstats->per_tc_prio_counters[prio];
1528 MLX5_SET(ppcnt_reg, in, prio_tc, prio);
1529 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1533 static int mlx5e_grp_per_tc_congest_prio_get_num_stats(struct mlx5e_priv *priv)
1535 struct mlx5_core_dev *mdev = priv->mdev;
1537 if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1540 return NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS * NUM_PPORT_PRIO;
1543 static void mlx5e_grp_per_tc_congest_prio_update_stats(struct mlx5e_priv *priv)
1545 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1546 struct mlx5_core_dev *mdev = priv->mdev;
1547 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1548 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1552 if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1555 MLX5_SET(ppcnt_reg, in, pnat, 2);
1556 MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_TRAFFIC_CLASS_CONGESTION_GROUP);
1557 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1558 out = pstats->per_tc_congest_prio_counters[prio];
1559 MLX5_SET(ppcnt_reg, in, prio_tc, prio);
1560 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1564 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_port_buff_congest)
1566 return mlx5e_grp_per_tc_prio_get_num_stats(priv) +
1567 mlx5e_grp_per_tc_congest_prio_get_num_stats(priv);
1570 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_port_buff_congest)
1572 mlx5e_grp_per_tc_prio_update_stats(priv);
1573 mlx5e_grp_per_tc_congest_prio_update_stats(priv);
1576 #define PPORT_PER_PRIO_OFF(c) \
1577 MLX5_BYTE_OFF(ppcnt_reg, \
1578 counter_set.eth_per_prio_grp_data_layout.c##_high)
1579 static const struct counter_desc pport_per_prio_traffic_stats_desc[] = {
1580 { "rx_prio%d_bytes", PPORT_PER_PRIO_OFF(rx_octets) },
1581 { "rx_prio%d_packets", PPORT_PER_PRIO_OFF(rx_frames) },
1582 { "rx_prio%d_discards", PPORT_PER_PRIO_OFF(rx_discards) },
1583 { "tx_prio%d_bytes", PPORT_PER_PRIO_OFF(tx_octets) },
1584 { "tx_prio%d_packets", PPORT_PER_PRIO_OFF(tx_frames) },
1587 #define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS ARRAY_SIZE(pport_per_prio_traffic_stats_desc)
1589 static int mlx5e_grp_per_prio_traffic_get_num_stats(void)
1591 return NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * NUM_PPORT_PRIO;
1594 static int mlx5e_grp_per_prio_traffic_fill_strings(struct mlx5e_priv *priv,
1600 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1601 for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
1602 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1603 pport_per_prio_traffic_stats_desc[i].format, prio);
1609 static int mlx5e_grp_per_prio_traffic_fill_stats(struct mlx5e_priv *priv,
1615 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1616 for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
1618 MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
1619 pport_per_prio_traffic_stats_desc, i);
1625 static const struct counter_desc pport_per_prio_pfc_stats_desc[] = {
1626 /* %s is "global" or "prio{i}" */
1627 { "rx_%s_pause", PPORT_PER_PRIO_OFF(rx_pause) },
1628 { "rx_%s_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) },
1629 { "tx_%s_pause", PPORT_PER_PRIO_OFF(tx_pause) },
1630 { "tx_%s_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) },
1631 { "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
1634 static const struct counter_desc pport_pfc_stall_stats_desc[] = {
1635 { "tx_pause_storm_warning_events", PPORT_PER_PRIO_OFF(device_stall_minor_watermark_cnt) },
1636 { "tx_pause_storm_error_events", PPORT_PER_PRIO_OFF(device_stall_critical_watermark_cnt) },
1639 #define NUM_PPORT_PER_PRIO_PFC_COUNTERS ARRAY_SIZE(pport_per_prio_pfc_stats_desc)
1640 #define NUM_PPORT_PFC_STALL_COUNTERS(priv) (ARRAY_SIZE(pport_pfc_stall_stats_desc) * \
1641 MLX5_CAP_PCAM_FEATURE((priv)->mdev, pfcc_mask) * \
1642 MLX5_CAP_DEBUG((priv)->mdev, stall_detect))
1644 static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv)
1646 struct mlx5_core_dev *mdev = priv->mdev;
1651 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1654 err = mlx5_query_port_pfc(mdev, &pfc_en_tx, &pfc_en_rx);
1656 return err ? 0 : pfc_en_tx | pfc_en_rx;
1659 static bool mlx5e_query_global_pause_combined(struct mlx5e_priv *priv)
1661 struct mlx5_core_dev *mdev = priv->mdev;
1666 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1669 err = mlx5_query_port_pause(mdev, &rx_pause, &tx_pause);
1671 return err ? false : rx_pause | tx_pause;
1674 static int mlx5e_grp_per_prio_pfc_get_num_stats(struct mlx5e_priv *priv)
1676 return (mlx5e_query_global_pause_combined(priv) +
1677 hweight8(mlx5e_query_pfc_combined(priv))) *
1678 NUM_PPORT_PER_PRIO_PFC_COUNTERS +
1679 NUM_PPORT_PFC_STALL_COUNTERS(priv);
1682 static int mlx5e_grp_per_prio_pfc_fill_strings(struct mlx5e_priv *priv,
1686 unsigned long pfc_combined;
1689 pfc_combined = mlx5e_query_pfc_combined(priv);
1690 for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
1691 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1692 char pfc_string[ETH_GSTRING_LEN];
1694 snprintf(pfc_string, sizeof(pfc_string), "prio%d", prio);
1695 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1696 pport_per_prio_pfc_stats_desc[i].format, pfc_string);
1700 if (mlx5e_query_global_pause_combined(priv)) {
1701 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1702 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1703 pport_per_prio_pfc_stats_desc[i].format, "global");
1707 for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++)
1708 strcpy(data + (idx++) * ETH_GSTRING_LEN,
1709 pport_pfc_stall_stats_desc[i].format);
1714 static int mlx5e_grp_per_prio_pfc_fill_stats(struct mlx5e_priv *priv,
1718 unsigned long pfc_combined;
1721 pfc_combined = mlx5e_query_pfc_combined(priv);
1722 for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
1723 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1725 MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
1726 pport_per_prio_pfc_stats_desc, i);
1730 if (mlx5e_query_global_pause_combined(priv)) {
1731 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1733 MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
1734 pport_per_prio_pfc_stats_desc, i);
1738 for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++)
1739 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
1740 pport_pfc_stall_stats_desc, i);
1745 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_prio)
1747 return mlx5e_grp_per_prio_traffic_get_num_stats() +
1748 mlx5e_grp_per_prio_pfc_get_num_stats(priv);
1751 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_prio)
1753 idx = mlx5e_grp_per_prio_traffic_fill_strings(priv, data, idx);
1754 idx = mlx5e_grp_per_prio_pfc_fill_strings(priv, data, idx);
1758 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_prio)
1760 idx = mlx5e_grp_per_prio_traffic_fill_stats(priv, data, idx);
1761 idx = mlx5e_grp_per_prio_pfc_fill_stats(priv, data, idx);
1765 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_prio)
1767 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1768 struct mlx5_core_dev *mdev = priv->mdev;
1769 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1770 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1774 if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
1777 MLX5_SET(ppcnt_reg, in, local_port, 1);
1778 MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
1779 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1780 out = pstats->per_prio_counters[prio];
1781 MLX5_SET(ppcnt_reg, in, prio_tc, prio);
1782 mlx5_core_access_reg(mdev, in, sz, out, sz,
1783 MLX5_REG_PPCNT, 0, 0);
1787 static const struct counter_desc mlx5e_pme_status_desc[] = {
1788 { "module_unplug", sizeof(u64) * MLX5_MODULE_STATUS_UNPLUGGED },
1791 static const struct counter_desc mlx5e_pme_error_desc[] = {
1792 { "module_bus_stuck", sizeof(u64) * MLX5_MODULE_EVENT_ERROR_BUS_STUCK },
1793 { "module_high_temp", sizeof(u64) * MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE },
1794 { "module_bad_shorted", sizeof(u64) * MLX5_MODULE_EVENT_ERROR_BAD_CABLE },
1797 #define NUM_PME_STATUS_STATS ARRAY_SIZE(mlx5e_pme_status_desc)
1798 #define NUM_PME_ERR_STATS ARRAY_SIZE(mlx5e_pme_error_desc)
1800 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pme)
1802 return NUM_PME_STATUS_STATS + NUM_PME_ERR_STATS;
1805 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pme)
1809 for (i = 0; i < NUM_PME_STATUS_STATS; i++)
1810 strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_status_desc[i].format);
1812 for (i = 0; i < NUM_PME_ERR_STATS; i++)
1813 strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_error_desc[i].format);
1818 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pme)
1820 struct mlx5_pme_stats pme_stats;
1823 mlx5_get_pme_stats(priv->mdev, &pme_stats);
1825 for (i = 0; i < NUM_PME_STATUS_STATS; i++)
1826 data[idx++] = MLX5E_READ_CTR64_CPU(pme_stats.status_counters,
1827 mlx5e_pme_status_desc, i);
1829 for (i = 0; i < NUM_PME_ERR_STATS; i++)
1830 data[idx++] = MLX5E_READ_CTR64_CPU(pme_stats.error_counters,
1831 mlx5e_pme_error_desc, i);
1836 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pme) { return; }
1838 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(tls)
1840 return mlx5e_tls_get_count(priv);
1843 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(tls)
1845 return idx + mlx5e_tls_get_strings(priv, data + idx * ETH_GSTRING_LEN);
1848 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(tls)
1850 return idx + mlx5e_tls_get_stats(priv, data + idx);
1853 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(tls) { return; }
1855 static const struct counter_desc rq_stats_desc[] = {
1856 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
1857 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
1858 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) },
1859 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail) },
1860 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail_slow) },
1861 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
1862 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
1863 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
1864 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) },
1865 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_redirect) },
1866 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) },
1867 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) },
1868 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_packets) },
1869 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_bytes) },
1870 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_skbs) },
1871 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_match_packets) },
1872 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_large_hds) },
1873 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, ecn_mark) },
1874 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
1875 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
1876 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
1877 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
1878 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) },
1879 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
1880 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
1881 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
1882 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_reuse) },
1883 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_full) },
1884 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_empty) },
1885 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_busy) },
1886 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_waive) },
1887 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, congst_umr) },
1888 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_err) },
1889 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, recover) },
1890 #ifdef CONFIG_MLX5_EN_TLS
1891 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_packets) },
1892 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_bytes) },
1893 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_pkt) },
1894 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_start) },
1895 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_end) },
1896 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_skip) },
1897 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_ok) },
1898 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_retry) },
1899 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_skip) },
1900 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_err) },
1904 static const struct counter_desc sq_stats_desc[] = {
1905 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, packets) },
1906 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, bytes) },
1907 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_packets) },
1908 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_bytes) },
1909 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
1910 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
1911 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
1912 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
1913 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
1914 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) },
1915 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, mpwqe_blks) },
1916 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, mpwqe_pkts) },
1917 #ifdef CONFIG_MLX5_EN_TLS
1918 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) },
1919 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) },
1920 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ooo) },
1921 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) },
1922 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) },
1923 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_resync_bytes) },
1924 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_skip_no_sync_data) },
1925 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) },
1926 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) },
1928 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) },
1929 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) },
1930 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) },
1931 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
1932 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, recover) },
1933 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqes) },
1934 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) },
1935 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqe_err) },
1938 static const struct counter_desc rq_xdpsq_stats_desc[] = {
1939 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
1940 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
1941 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
1942 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, nops) },
1943 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) },
1944 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) },
1945 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
1948 static const struct counter_desc xdpsq_stats_desc[] = {
1949 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
1950 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
1951 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
1952 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, nops) },
1953 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) },
1954 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) },
1955 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
1958 static const struct counter_desc xskrq_stats_desc[] = {
1959 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, packets) },
1960 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, bytes) },
1961 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_complete) },
1962 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
1963 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
1964 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_none) },
1965 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, ecn_mark) },
1966 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
1967 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, xdp_drop) },
1968 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, xdp_redirect) },
1969 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, wqe_err) },
1970 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
1971 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
1972 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) },
1973 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
1974 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
1975 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
1976 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, congst_umr) },
1977 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, arfs_err) },
1980 static const struct counter_desc xsksq_stats_desc[] = {
1981 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
1982 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
1983 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
1984 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, full) },
1985 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, err) },
1986 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
1989 static const struct counter_desc ch_stats_desc[] = {
1990 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, events) },
1991 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, poll) },
1992 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, arm) },
1993 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, aff_change) },
1994 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, force_irq) },
1995 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, eq_rearm) },
1998 static const struct counter_desc ptp_sq_stats_desc[] = {
1999 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, packets) },
2000 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, bytes) },
2001 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
2002 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
2003 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
2004 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, nop) },
2005 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, csum_none) },
2006 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, stopped) },
2007 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, dropped) },
2008 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
2009 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, recover) },
2010 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, cqes) },
2011 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, wake) },
2012 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, cqe_err) },
2015 static const struct counter_desc ptp_ch_stats_desc[] = {
2016 { MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, events) },
2017 { MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, poll) },
2018 { MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, arm) },
2019 { MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, eq_rearm) },
2022 static const struct counter_desc ptp_cq_stats_desc[] = {
2023 { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, cqe) },
2024 { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, err_cqe) },
2025 { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort) },
2026 { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort_abs_diff_ns) },
2029 static const struct counter_desc ptp_rq_stats_desc[] = {
2030 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, packets) },
2031 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, bytes) },
2032 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_complete) },
2033 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_complete_tail) },
2034 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_complete_tail_slow) },
2035 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
2036 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
2037 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_none) },
2038 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, xdp_drop) },
2039 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, xdp_redirect) },
2040 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, lro_packets) },
2041 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, lro_bytes) },
2042 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, ecn_mark) },
2043 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
2044 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, wqe_err) },
2045 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
2046 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
2047 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) },
2048 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
2049 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
2050 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
2051 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cache_reuse) },
2052 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cache_full) },
2053 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cache_empty) },
2054 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cache_busy) },
2055 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cache_waive) },
2056 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, congst_umr) },
2057 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, arfs_err) },
2058 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, recover) },
2061 static const struct counter_desc qos_sq_stats_desc[] = {
2062 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, packets) },
2063 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, bytes) },
2064 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_packets) },
2065 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_bytes) },
2066 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
2067 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
2068 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
2069 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
2070 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
2071 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, nop) },
2072 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, mpwqe_blks) },
2073 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, mpwqe_pkts) },
2074 #ifdef CONFIG_MLX5_EN_TLS
2075 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) },
2076 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) },
2077 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_ooo) },
2078 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) },
2079 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) },
2080 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_resync_bytes) },
2081 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_skip_no_sync_data) },
2082 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) },
2083 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) },
2085 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, csum_none) },
2086 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, stopped) },
2087 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, dropped) },
2088 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
2089 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, recover) },
2090 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, cqes) },
2091 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, wake) },
2092 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, cqe_err) },
2095 #define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc)
2096 #define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc)
2097 #define NUM_XDPSQ_STATS ARRAY_SIZE(xdpsq_stats_desc)
2098 #define NUM_RQ_XDPSQ_STATS ARRAY_SIZE(rq_xdpsq_stats_desc)
2099 #define NUM_XSKRQ_STATS ARRAY_SIZE(xskrq_stats_desc)
2100 #define NUM_XSKSQ_STATS ARRAY_SIZE(xsksq_stats_desc)
2101 #define NUM_CH_STATS ARRAY_SIZE(ch_stats_desc)
2102 #define NUM_PTP_SQ_STATS ARRAY_SIZE(ptp_sq_stats_desc)
2103 #define NUM_PTP_CH_STATS ARRAY_SIZE(ptp_ch_stats_desc)
2104 #define NUM_PTP_CQ_STATS ARRAY_SIZE(ptp_cq_stats_desc)
2105 #define NUM_PTP_RQ_STATS ARRAY_SIZE(ptp_rq_stats_desc)
2106 #define NUM_QOS_SQ_STATS ARRAY_SIZE(qos_sq_stats_desc)
2108 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qos)
2110 /* Pairs with smp_store_release in mlx5e_open_qos_sq. */
2111 return NUM_QOS_SQ_STATS * smp_load_acquire(&priv->htb.max_qos_sqs);
2114 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qos)
2116 /* Pairs with smp_store_release in mlx5e_open_qos_sq. */
2117 u16 max_qos_sqs = smp_load_acquire(&priv->htb.max_qos_sqs);
2120 for (qid = 0; qid < max_qos_sqs; qid++)
2121 for (i = 0; i < NUM_QOS_SQ_STATS; i++)
2122 sprintf(data + (idx++) * ETH_GSTRING_LEN,
2123 qos_sq_stats_desc[i].format, qid);
2128 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qos)
2130 struct mlx5e_sq_stats **stats;
2134 /* Pairs with smp_store_release in mlx5e_open_qos_sq. */
2135 max_qos_sqs = smp_load_acquire(&priv->htb.max_qos_sqs);
2136 stats = READ_ONCE(priv->htb.qos_sq_stats);
2138 for (qid = 0; qid < max_qos_sqs; qid++) {
2139 struct mlx5e_sq_stats *s = READ_ONCE(stats[qid]);
2141 for (i = 0; i < NUM_QOS_SQ_STATS; i++)
2142 data[idx++] = MLX5E_READ_CTR64_CPU(s, qos_sq_stats_desc, i);
2148 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qos) { return; }
2150 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ptp)
2152 int num = NUM_PTP_CH_STATS;
2154 if (!priv->tx_ptp_opened && !priv->rx_ptp_opened)
2157 if (priv->tx_ptp_opened)
2158 num += (NUM_PTP_SQ_STATS + NUM_PTP_CQ_STATS) * priv->max_opened_tc;
2159 if (priv->rx_ptp_opened)
2160 num += NUM_PTP_RQ_STATS;
2165 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ptp)
2169 if (!priv->tx_ptp_opened && !priv->rx_ptp_opened)
2172 for (i = 0; i < NUM_PTP_CH_STATS; i++)
2173 sprintf(data + (idx++) * ETH_GSTRING_LEN,
2174 "%s", ptp_ch_stats_desc[i].format);
2176 if (priv->tx_ptp_opened) {
2177 for (tc = 0; tc < priv->max_opened_tc; tc++)
2178 for (i = 0; i < NUM_PTP_SQ_STATS; i++)
2179 sprintf(data + (idx++) * ETH_GSTRING_LEN,
2180 ptp_sq_stats_desc[i].format, tc);
2182 for (tc = 0; tc < priv->max_opened_tc; tc++)
2183 for (i = 0; i < NUM_PTP_CQ_STATS; i++)
2184 sprintf(data + (idx++) * ETH_GSTRING_LEN,
2185 ptp_cq_stats_desc[i].format, tc);
2187 if (priv->rx_ptp_opened) {
2188 for (i = 0; i < NUM_PTP_RQ_STATS; i++)
2189 sprintf(data + (idx++) * ETH_GSTRING_LEN,
2190 ptp_rq_stats_desc[i].format, MLX5E_PTP_CHANNEL_IX);
2195 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ptp)
2199 if (!priv->tx_ptp_opened && !priv->rx_ptp_opened)
2202 for (i = 0; i < NUM_PTP_CH_STATS; i++)
2204 MLX5E_READ_CTR64_CPU(&priv->ptp_stats.ch,
2205 ptp_ch_stats_desc, i);
2207 if (priv->tx_ptp_opened) {
2208 for (tc = 0; tc < priv->max_opened_tc; tc++)
2209 for (i = 0; i < NUM_PTP_SQ_STATS; i++)
2211 MLX5E_READ_CTR64_CPU(&priv->ptp_stats.sq[tc],
2212 ptp_sq_stats_desc, i);
2214 for (tc = 0; tc < priv->max_opened_tc; tc++)
2215 for (i = 0; i < NUM_PTP_CQ_STATS; i++)
2217 MLX5E_READ_CTR64_CPU(&priv->ptp_stats.cq[tc],
2218 ptp_cq_stats_desc, i);
2220 if (priv->rx_ptp_opened) {
2221 for (i = 0; i < NUM_PTP_RQ_STATS; i++)
2223 MLX5E_READ_CTR64_CPU(&priv->ptp_stats.rq,
2224 ptp_rq_stats_desc, i);
2229 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ptp) { return; }
2231 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(channels)
2233 int max_nch = priv->stats_nch;
2235 return (NUM_RQ_STATS * max_nch) +
2236 (NUM_CH_STATS * max_nch) +
2237 (NUM_SQ_STATS * max_nch * priv->max_opened_tc) +
2238 (NUM_RQ_XDPSQ_STATS * max_nch) +
2239 (NUM_XDPSQ_STATS * max_nch) +
2240 (NUM_XSKRQ_STATS * max_nch * priv->xsk.ever_used) +
2241 (NUM_XSKSQ_STATS * max_nch * priv->xsk.ever_used);
2244 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(channels)
2246 bool is_xsk = priv->xsk.ever_used;
2247 int max_nch = priv->stats_nch;
2250 for (i = 0; i < max_nch; i++)
2251 for (j = 0; j < NUM_CH_STATS; j++)
2252 sprintf(data + (idx++) * ETH_GSTRING_LEN,
2253 ch_stats_desc[j].format, i);
2255 for (i = 0; i < max_nch; i++) {
2256 for (j = 0; j < NUM_RQ_STATS; j++)
2257 sprintf(data + (idx++) * ETH_GSTRING_LEN,
2258 rq_stats_desc[j].format, i);
2259 for (j = 0; j < NUM_XSKRQ_STATS * is_xsk; j++)
2260 sprintf(data + (idx++) * ETH_GSTRING_LEN,
2261 xskrq_stats_desc[j].format, i);
2262 for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++)
2263 sprintf(data + (idx++) * ETH_GSTRING_LEN,
2264 rq_xdpsq_stats_desc[j].format, i);
2267 for (tc = 0; tc < priv->max_opened_tc; tc++)
2268 for (i = 0; i < max_nch; i++)
2269 for (j = 0; j < NUM_SQ_STATS; j++)
2270 sprintf(data + (idx++) * ETH_GSTRING_LEN,
2271 sq_stats_desc[j].format,
2274 for (i = 0; i < max_nch; i++) {
2275 for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++)
2276 sprintf(data + (idx++) * ETH_GSTRING_LEN,
2277 xsksq_stats_desc[j].format, i);
2278 for (j = 0; j < NUM_XDPSQ_STATS; j++)
2279 sprintf(data + (idx++) * ETH_GSTRING_LEN,
2280 xdpsq_stats_desc[j].format, i);
2286 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(channels)
2288 bool is_xsk = priv->xsk.ever_used;
2289 int max_nch = priv->stats_nch;
2292 for (i = 0; i < max_nch; i++)
2293 for (j = 0; j < NUM_CH_STATS; j++)
2295 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->ch,
2298 for (i = 0; i < max_nch; i++) {
2299 for (j = 0; j < NUM_RQ_STATS; j++)
2301 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->rq,
2303 for (j = 0; j < NUM_XSKRQ_STATS * is_xsk; j++)
2305 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->xskrq,
2306 xskrq_stats_desc, j);
2307 for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++)
2309 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->rq_xdpsq,
2310 rq_xdpsq_stats_desc, j);
2313 for (tc = 0; tc < priv->max_opened_tc; tc++)
2314 for (i = 0; i < max_nch; i++)
2315 for (j = 0; j < NUM_SQ_STATS; j++)
2317 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->sq[tc],
2320 for (i = 0; i < max_nch; i++) {
2321 for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++)
2323 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->xsksq,
2324 xsksq_stats_desc, j);
2325 for (j = 0; j < NUM_XDPSQ_STATS; j++)
2327 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->xdpsq,
2328 xdpsq_stats_desc, j);
2334 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(channels) { return; }
2336 MLX5E_DEFINE_STATS_GRP(sw, 0);
2337 MLX5E_DEFINE_STATS_GRP(qcnt, MLX5E_NDO_UPDATE_STATS);
2338 MLX5E_DEFINE_STATS_GRP(vnic_env, 0);
2339 MLX5E_DEFINE_STATS_GRP(vport, MLX5E_NDO_UPDATE_STATS);
2340 MLX5E_DEFINE_STATS_GRP(802_3, MLX5E_NDO_UPDATE_STATS);
2341 MLX5E_DEFINE_STATS_GRP(2863, 0);
2342 MLX5E_DEFINE_STATS_GRP(2819, 0);
2343 MLX5E_DEFINE_STATS_GRP(phy, 0);
2344 MLX5E_DEFINE_STATS_GRP(pcie, 0);
2345 MLX5E_DEFINE_STATS_GRP(per_prio, 0);
2346 MLX5E_DEFINE_STATS_GRP(pme, 0);
2347 MLX5E_DEFINE_STATS_GRP(channels, 0);
2348 MLX5E_DEFINE_STATS_GRP(per_port_buff_congest, 0);
2349 MLX5E_DEFINE_STATS_GRP(eth_ext, 0);
2350 static MLX5E_DEFINE_STATS_GRP(tls, 0);
2351 static MLX5E_DEFINE_STATS_GRP(ptp, 0);
2352 static MLX5E_DEFINE_STATS_GRP(qos, 0);
2354 /* The stats groups order is opposite to the update_stats() order calls */
2355 mlx5e_stats_grp_t mlx5e_nic_stats_grps[] = {
2356 &MLX5E_STATS_GRP(sw),
2357 &MLX5E_STATS_GRP(qcnt),
2358 &MLX5E_STATS_GRP(vnic_env),
2359 &MLX5E_STATS_GRP(vport),
2360 &MLX5E_STATS_GRP(802_3),
2361 &MLX5E_STATS_GRP(2863),
2362 &MLX5E_STATS_GRP(2819),
2363 &MLX5E_STATS_GRP(phy),
2364 &MLX5E_STATS_GRP(eth_ext),
2365 &MLX5E_STATS_GRP(pcie),
2366 &MLX5E_STATS_GRP(per_prio),
2367 &MLX5E_STATS_GRP(pme),
2368 #ifdef CONFIG_MLX5_EN_IPSEC
2369 &MLX5E_STATS_GRP(ipsec_sw),
2370 &MLX5E_STATS_GRP(ipsec_hw),
2372 &MLX5E_STATS_GRP(tls),
2373 &MLX5E_STATS_GRP(channels),
2374 &MLX5E_STATS_GRP(per_port_buff_congest),
2375 &MLX5E_STATS_GRP(ptp),
2376 &MLX5E_STATS_GRP(qos),
2379 unsigned int mlx5e_nic_stats_grps_num(struct mlx5e_priv *priv)
2381 return ARRAY_SIZE(mlx5e_nic_stats_grps);