2 * Copyright (c) 2017, Mellanox Technologies, Ltd. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include "en_accel/tls.h"
36 #include "en_accel/en_accel.h"
38 static unsigned int stats_grps_num(struct mlx5e_priv *priv)
40 return !priv->profile->stats_grps_num ? 0 :
41 priv->profile->stats_grps_num(priv);
44 unsigned int mlx5e_stats_total_num(struct mlx5e_priv *priv)
46 mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
47 const unsigned int num_stats_grps = stats_grps_num(priv);
48 unsigned int total = 0;
51 for (i = 0; i < num_stats_grps; i++)
52 total += stats_grps[i]->get_num_stats(priv);
57 void mlx5e_stats_update_ndo_stats(struct mlx5e_priv *priv)
59 mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
60 const unsigned int num_stats_grps = stats_grps_num(priv);
63 for (i = num_stats_grps - 1; i >= 0; i--)
64 if (stats_grps[i]->update_stats &&
65 stats_grps[i]->update_stats_mask & MLX5E_NDO_UPDATE_STATS)
66 stats_grps[i]->update_stats(priv);
69 void mlx5e_stats_update(struct mlx5e_priv *priv)
71 mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
72 const unsigned int num_stats_grps = stats_grps_num(priv);
75 for (i = num_stats_grps - 1; i >= 0; i--)
76 if (stats_grps[i]->update_stats)
77 stats_grps[i]->update_stats(priv);
80 void mlx5e_stats_fill(struct mlx5e_priv *priv, u64 *data, int idx)
82 mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
83 const unsigned int num_stats_grps = stats_grps_num(priv);
86 for (i = 0; i < num_stats_grps; i++)
87 idx = stats_grps[i]->fill_stats(priv, data, idx);
90 void mlx5e_stats_fill_strings(struct mlx5e_priv *priv, u8 *data)
92 mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
93 const unsigned int num_stats_grps = stats_grps_num(priv);
96 for (i = 0; i < num_stats_grps; i++)
97 idx = stats_grps[i]->fill_strings(priv, data, idx);
100 /* Concrete NIC Stats */
102 static const struct counter_desc sw_stats_desc[] = {
103 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
104 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
105 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
106 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
107 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_packets) },
108 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_bytes) },
109 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_packets) },
110 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) },
111 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_added_vlan_packets) },
112 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_nop) },
113 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_mpwqe_blks) },
114 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_mpwqe_pkts) },
116 #ifdef CONFIG_MLX5_EN_TLS
117 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_packets) },
118 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_bytes) },
119 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ctx) },
120 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ooo) },
121 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_packets) },
122 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_bytes) },
123 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_resync_bytes) },
124 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_skip_no_sync_data) },
125 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_no_sync_data) },
126 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_bypass_req) },
129 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) },
130 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) },
131 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_ecn_mark) },
132 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_removed_vlan_packets) },
133 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) },
134 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) },
135 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) },
136 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail) },
137 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail_slow) },
138 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) },
139 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) },
140 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_redirect) },
141 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_xmit) },
142 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_mpwqe) },
143 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_inlnw) },
144 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_nops) },
145 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) },
146 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_err) },
147 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_cqe) },
148 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_none) },
149 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) },
150 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) },
151 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) },
152 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) },
153 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) },
154 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_recover) },
155 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqes) },
156 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
157 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) },
158 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_xmit) },
159 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_mpwqe) },
160 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_inlnw) },
161 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_nops) },
162 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_full) },
163 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_err) },
164 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_cqes) },
165 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
166 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_cqes) },
167 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_strides) },
168 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_oversize_pkts_sw_drop) },
169 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
170 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
171 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
172 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_reuse) },
173 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_full) },
174 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_empty) },
175 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) },
176 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) },
177 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_congst_umr) },
178 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_err) },
179 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_recover) },
180 #ifdef CONFIG_MLX5_EN_TLS
181 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_packets) },
182 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_bytes) },
183 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_ctx) },
184 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_del) },
185 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_pkt) },
186 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_start) },
187 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_end) },
188 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_skip) },
189 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_ok) },
190 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_skip) },
191 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_err) },
193 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_events) },
194 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_poll) },
195 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_arm) },
196 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_aff_change) },
197 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_force_irq) },
198 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_eq_rearm) },
199 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_packets) },
200 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_bytes) },
201 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_complete) },
202 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_unnecessary) },
203 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_unnecessary_inner) },
204 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_none) },
205 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_ecn_mark) },
206 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_removed_vlan_packets) },
207 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_xdp_drop) },
208 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_xdp_redirect) },
209 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_wqe_err) },
210 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_mpwqe_filler_cqes) },
211 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_mpwqe_filler_strides) },
212 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_oversize_pkts_sw_drop) },
213 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_buff_alloc_err) },
214 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_cqe_compress_blks) },
215 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_cqe_compress_pkts) },
216 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_congst_umr) },
217 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_arfs_err) },
218 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_xmit) },
219 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_mpwqe) },
220 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_inlnw) },
221 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_full) },
222 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_err) },
223 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_cqes) },
226 #define NUM_SW_COUNTERS ARRAY_SIZE(sw_stats_desc)
228 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(sw)
230 return NUM_SW_COUNTERS;
233 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(sw)
237 for (i = 0; i < NUM_SW_COUNTERS; i++)
238 strcpy(data + (idx++) * ETH_GSTRING_LEN, sw_stats_desc[i].format);
242 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw)
246 for (i = 0; i < NUM_SW_COUNTERS; i++)
247 data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.sw, sw_stats_desc, i);
251 static void mlx5e_stats_grp_sw_update_stats_xdp_red(struct mlx5e_sw_stats *s,
252 struct mlx5e_xdpsq_stats *xdpsq_red_stats)
254 s->tx_xdp_xmit += xdpsq_red_stats->xmit;
255 s->tx_xdp_mpwqe += xdpsq_red_stats->mpwqe;
256 s->tx_xdp_inlnw += xdpsq_red_stats->inlnw;
257 s->tx_xdp_nops += xdpsq_red_stats->nops;
258 s->tx_xdp_full += xdpsq_red_stats->full;
259 s->tx_xdp_err += xdpsq_red_stats->err;
260 s->tx_xdp_cqes += xdpsq_red_stats->cqes;
263 static void mlx5e_stats_grp_sw_update_stats_xdpsq(struct mlx5e_sw_stats *s,
264 struct mlx5e_xdpsq_stats *xdpsq_stats)
266 s->rx_xdp_tx_xmit += xdpsq_stats->xmit;
267 s->rx_xdp_tx_mpwqe += xdpsq_stats->mpwqe;
268 s->rx_xdp_tx_inlnw += xdpsq_stats->inlnw;
269 s->rx_xdp_tx_nops += xdpsq_stats->nops;
270 s->rx_xdp_tx_full += xdpsq_stats->full;
271 s->rx_xdp_tx_err += xdpsq_stats->err;
272 s->rx_xdp_tx_cqe += xdpsq_stats->cqes;
275 static void mlx5e_stats_grp_sw_update_stats_xsksq(struct mlx5e_sw_stats *s,
276 struct mlx5e_xdpsq_stats *xsksq_stats)
278 s->tx_xsk_xmit += xsksq_stats->xmit;
279 s->tx_xsk_mpwqe += xsksq_stats->mpwqe;
280 s->tx_xsk_inlnw += xsksq_stats->inlnw;
281 s->tx_xsk_full += xsksq_stats->full;
282 s->tx_xsk_err += xsksq_stats->err;
283 s->tx_xsk_cqes += xsksq_stats->cqes;
286 static void mlx5e_stats_grp_sw_update_stats_xskrq(struct mlx5e_sw_stats *s,
287 struct mlx5e_rq_stats *xskrq_stats)
289 s->rx_xsk_packets += xskrq_stats->packets;
290 s->rx_xsk_bytes += xskrq_stats->bytes;
291 s->rx_xsk_csum_complete += xskrq_stats->csum_complete;
292 s->rx_xsk_csum_unnecessary += xskrq_stats->csum_unnecessary;
293 s->rx_xsk_csum_unnecessary_inner += xskrq_stats->csum_unnecessary_inner;
294 s->rx_xsk_csum_none += xskrq_stats->csum_none;
295 s->rx_xsk_ecn_mark += xskrq_stats->ecn_mark;
296 s->rx_xsk_removed_vlan_packets += xskrq_stats->removed_vlan_packets;
297 s->rx_xsk_xdp_drop += xskrq_stats->xdp_drop;
298 s->rx_xsk_xdp_redirect += xskrq_stats->xdp_redirect;
299 s->rx_xsk_wqe_err += xskrq_stats->wqe_err;
300 s->rx_xsk_mpwqe_filler_cqes += xskrq_stats->mpwqe_filler_cqes;
301 s->rx_xsk_mpwqe_filler_strides += xskrq_stats->mpwqe_filler_strides;
302 s->rx_xsk_oversize_pkts_sw_drop += xskrq_stats->oversize_pkts_sw_drop;
303 s->rx_xsk_buff_alloc_err += xskrq_stats->buff_alloc_err;
304 s->rx_xsk_cqe_compress_blks += xskrq_stats->cqe_compress_blks;
305 s->rx_xsk_cqe_compress_pkts += xskrq_stats->cqe_compress_pkts;
306 s->rx_xsk_congst_umr += xskrq_stats->congst_umr;
307 s->rx_xsk_arfs_err += xskrq_stats->arfs_err;
310 static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s,
311 struct mlx5e_rq_stats *rq_stats)
313 s->rx_packets += rq_stats->packets;
314 s->rx_bytes += rq_stats->bytes;
315 s->rx_lro_packets += rq_stats->lro_packets;
316 s->rx_lro_bytes += rq_stats->lro_bytes;
317 s->rx_ecn_mark += rq_stats->ecn_mark;
318 s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets;
319 s->rx_csum_none += rq_stats->csum_none;
320 s->rx_csum_complete += rq_stats->csum_complete;
321 s->rx_csum_complete_tail += rq_stats->csum_complete_tail;
322 s->rx_csum_complete_tail_slow += rq_stats->csum_complete_tail_slow;
323 s->rx_csum_unnecessary += rq_stats->csum_unnecessary;
324 s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
325 s->rx_xdp_drop += rq_stats->xdp_drop;
326 s->rx_xdp_redirect += rq_stats->xdp_redirect;
327 s->rx_wqe_err += rq_stats->wqe_err;
328 s->rx_mpwqe_filler_cqes += rq_stats->mpwqe_filler_cqes;
329 s->rx_mpwqe_filler_strides += rq_stats->mpwqe_filler_strides;
330 s->rx_oversize_pkts_sw_drop += rq_stats->oversize_pkts_sw_drop;
331 s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
332 s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
333 s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
334 s->rx_cache_reuse += rq_stats->cache_reuse;
335 s->rx_cache_full += rq_stats->cache_full;
336 s->rx_cache_empty += rq_stats->cache_empty;
337 s->rx_cache_busy += rq_stats->cache_busy;
338 s->rx_cache_waive += rq_stats->cache_waive;
339 s->rx_congst_umr += rq_stats->congst_umr;
340 s->rx_arfs_err += rq_stats->arfs_err;
341 s->rx_recover += rq_stats->recover;
342 #ifdef CONFIG_MLX5_EN_TLS
343 s->rx_tls_decrypted_packets += rq_stats->tls_decrypted_packets;
344 s->rx_tls_decrypted_bytes += rq_stats->tls_decrypted_bytes;
345 s->rx_tls_ctx += rq_stats->tls_ctx;
346 s->rx_tls_del += rq_stats->tls_del;
347 s->rx_tls_resync_req_pkt += rq_stats->tls_resync_req_pkt;
348 s->rx_tls_resync_req_start += rq_stats->tls_resync_req_start;
349 s->rx_tls_resync_req_end += rq_stats->tls_resync_req_end;
350 s->rx_tls_resync_req_skip += rq_stats->tls_resync_req_skip;
351 s->rx_tls_resync_res_ok += rq_stats->tls_resync_res_ok;
352 s->rx_tls_resync_res_skip += rq_stats->tls_resync_res_skip;
353 s->rx_tls_err += rq_stats->tls_err;
357 static void mlx5e_stats_grp_sw_update_stats_ch_stats(struct mlx5e_sw_stats *s,
358 struct mlx5e_ch_stats *ch_stats)
360 s->ch_events += ch_stats->events;
361 s->ch_poll += ch_stats->poll;
362 s->ch_arm += ch_stats->arm;
363 s->ch_aff_change += ch_stats->aff_change;
364 s->ch_force_irq += ch_stats->force_irq;
365 s->ch_eq_rearm += ch_stats->eq_rearm;
368 static void mlx5e_stats_grp_sw_update_stats_sq(struct mlx5e_sw_stats *s,
369 struct mlx5e_sq_stats *sq_stats)
371 s->tx_packets += sq_stats->packets;
372 s->tx_bytes += sq_stats->bytes;
373 s->tx_tso_packets += sq_stats->tso_packets;
374 s->tx_tso_bytes += sq_stats->tso_bytes;
375 s->tx_tso_inner_packets += sq_stats->tso_inner_packets;
376 s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes;
377 s->tx_added_vlan_packets += sq_stats->added_vlan_packets;
378 s->tx_nop += sq_stats->nop;
379 s->tx_mpwqe_blks += sq_stats->mpwqe_blks;
380 s->tx_mpwqe_pkts += sq_stats->mpwqe_pkts;
381 s->tx_queue_stopped += sq_stats->stopped;
382 s->tx_queue_wake += sq_stats->wake;
383 s->tx_queue_dropped += sq_stats->dropped;
384 s->tx_cqe_err += sq_stats->cqe_err;
385 s->tx_recover += sq_stats->recover;
386 s->tx_xmit_more += sq_stats->xmit_more;
387 s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
388 s->tx_csum_none += sq_stats->csum_none;
389 s->tx_csum_partial += sq_stats->csum_partial;
390 #ifdef CONFIG_MLX5_EN_TLS
391 s->tx_tls_encrypted_packets += sq_stats->tls_encrypted_packets;
392 s->tx_tls_encrypted_bytes += sq_stats->tls_encrypted_bytes;
393 s->tx_tls_ctx += sq_stats->tls_ctx;
394 s->tx_tls_ooo += sq_stats->tls_ooo;
395 s->tx_tls_dump_bytes += sq_stats->tls_dump_bytes;
396 s->tx_tls_dump_packets += sq_stats->tls_dump_packets;
397 s->tx_tls_resync_bytes += sq_stats->tls_resync_bytes;
398 s->tx_tls_skip_no_sync_data += sq_stats->tls_skip_no_sync_data;
399 s->tx_tls_drop_no_sync_data += sq_stats->tls_drop_no_sync_data;
400 s->tx_tls_drop_bypass_req += sq_stats->tls_drop_bypass_req;
402 s->tx_cqes += sq_stats->cqes;
405 static void mlx5e_stats_grp_sw_update_stats_ptp(struct mlx5e_priv *priv,
406 struct mlx5e_sw_stats *s)
410 if (!priv->port_ptp_opened)
413 mlx5e_stats_grp_sw_update_stats_ch_stats(s, &priv->port_ptp_stats.ch);
415 for (i = 0; i < priv->max_opened_tc; i++) {
416 mlx5e_stats_grp_sw_update_stats_sq(s, &priv->port_ptp_stats.sq[i]);
418 /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
423 static void mlx5e_stats_grp_sw_update_stats_qos(struct mlx5e_priv *priv,
424 struct mlx5e_sw_stats *s)
426 struct mlx5e_sq_stats **stats;
430 /* Pairs with smp_store_release in mlx5e_open_qos_sq. */
431 max_qos_sqs = smp_load_acquire(&priv->htb.max_qos_sqs);
432 stats = READ_ONCE(priv->htb.qos_sq_stats);
434 for (i = 0; i < max_qos_sqs; i++) {
435 mlx5e_stats_grp_sw_update_stats_sq(s, READ_ONCE(stats[i]));
437 /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
442 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)
444 struct mlx5e_sw_stats *s = &priv->stats.sw;
447 memset(s, 0, sizeof(*s));
449 for (i = 0; i < priv->max_nch; i++) {
450 struct mlx5e_channel_stats *channel_stats =
451 &priv->channel_stats[i];
454 mlx5e_stats_grp_sw_update_stats_rq_stats(s, &channel_stats->rq);
455 mlx5e_stats_grp_sw_update_stats_xdpsq(s, &channel_stats->rq_xdpsq);
456 mlx5e_stats_grp_sw_update_stats_ch_stats(s, &channel_stats->ch);
458 mlx5e_stats_grp_sw_update_stats_xdp_red(s, &channel_stats->xdpsq);
459 /* AF_XDP zero-copy */
460 mlx5e_stats_grp_sw_update_stats_xskrq(s, &channel_stats->xskrq);
461 mlx5e_stats_grp_sw_update_stats_xsksq(s, &channel_stats->xsksq);
463 for (j = 0; j < priv->max_opened_tc; j++) {
464 mlx5e_stats_grp_sw_update_stats_sq(s, &channel_stats->sq[j]);
466 /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
470 mlx5e_stats_grp_sw_update_stats_ptp(priv, s);
471 mlx5e_stats_grp_sw_update_stats_qos(priv, s);
474 static const struct counter_desc q_stats_desc[] = {
475 { MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_out_of_buffer) },
478 static const struct counter_desc drop_rq_stats_desc[] = {
479 { MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_if_down_packets) },
482 #define NUM_Q_COUNTERS ARRAY_SIZE(q_stats_desc)
483 #define NUM_DROP_RQ_COUNTERS ARRAY_SIZE(drop_rq_stats_desc)
485 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qcnt)
490 num_stats += NUM_Q_COUNTERS;
492 if (priv->drop_rq_q_counter)
493 num_stats += NUM_DROP_RQ_COUNTERS;
498 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qcnt)
502 for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++)
503 strcpy(data + (idx++) * ETH_GSTRING_LEN,
504 q_stats_desc[i].format);
506 for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++)
507 strcpy(data + (idx++) * ETH_GSTRING_LEN,
508 drop_rq_stats_desc[i].format);
513 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qcnt)
517 for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++)
518 data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
520 for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++)
521 data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
522 drop_rq_stats_desc, i);
526 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qcnt)
528 struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt;
529 u32 out[MLX5_ST_SZ_DW(query_q_counter_out)] = {};
530 u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {};
533 MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
535 if (priv->q_counter) {
536 MLX5_SET(query_q_counter_in, in, counter_set_id,
538 ret = mlx5_cmd_exec_inout(priv->mdev, query_q_counter, in, out);
540 qcnt->rx_out_of_buffer = MLX5_GET(query_q_counter_out,
544 if (priv->drop_rq_q_counter) {
545 MLX5_SET(query_q_counter_in, in, counter_set_id,
546 priv->drop_rq_q_counter);
547 ret = mlx5_cmd_exec_inout(priv->mdev, query_q_counter, in, out);
549 qcnt->rx_if_down_packets = MLX5_GET(query_q_counter_out,
554 #define VNIC_ENV_OFF(c) MLX5_BYTE_OFF(query_vnic_env_out, c)
555 static const struct counter_desc vnic_env_stats_steer_desc[] = {
556 { "rx_steer_missed_packets",
557 VNIC_ENV_OFF(vport_env.nic_receive_steering_discard) },
560 static const struct counter_desc vnic_env_stats_dev_oob_desc[] = {
561 { "dev_internal_queue_oob",
562 VNIC_ENV_OFF(vport_env.internal_rq_out_of_buffer) },
565 #define NUM_VNIC_ENV_STEER_COUNTERS(dev) \
566 (MLX5_CAP_GEN(dev, nic_receive_steering_discard) ? \
567 ARRAY_SIZE(vnic_env_stats_steer_desc) : 0)
568 #define NUM_VNIC_ENV_DEV_OOB_COUNTERS(dev) \
569 (MLX5_CAP_GEN(dev, vnic_env_int_rq_oob) ? \
570 ARRAY_SIZE(vnic_env_stats_dev_oob_desc) : 0)
572 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vnic_env)
574 return NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev) +
575 NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev);
578 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vnic_env)
582 for (i = 0; i < NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev); i++)
583 strcpy(data + (idx++) * ETH_GSTRING_LEN,
584 vnic_env_stats_steer_desc[i].format);
586 for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++)
587 strcpy(data + (idx++) * ETH_GSTRING_LEN,
588 vnic_env_stats_dev_oob_desc[i].format);
592 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vnic_env)
596 for (i = 0; i < NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev); i++)
597 data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vnic.query_vnic_env_out,
598 vnic_env_stats_steer_desc, i);
600 for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++)
601 data[idx++] = MLX5E_READ_CTR32_BE(priv->stats.vnic.query_vnic_env_out,
602 vnic_env_stats_dev_oob_desc, i);
606 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vnic_env)
608 u32 *out = (u32 *)priv->stats.vnic.query_vnic_env_out;
609 u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {};
610 struct mlx5_core_dev *mdev = priv->mdev;
612 if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard))
615 MLX5_SET(query_vnic_env_in, in, opcode, MLX5_CMD_OP_QUERY_VNIC_ENV);
616 mlx5_cmd_exec_inout(mdev, query_vnic_env, in, out);
619 #define VPORT_COUNTER_OFF(c) MLX5_BYTE_OFF(query_vport_counter_out, c)
620 static const struct counter_desc vport_stats_desc[] = {
621 { "rx_vport_unicast_packets",
622 VPORT_COUNTER_OFF(received_eth_unicast.packets) },
623 { "rx_vport_unicast_bytes",
624 VPORT_COUNTER_OFF(received_eth_unicast.octets) },
625 { "tx_vport_unicast_packets",
626 VPORT_COUNTER_OFF(transmitted_eth_unicast.packets) },
627 { "tx_vport_unicast_bytes",
628 VPORT_COUNTER_OFF(transmitted_eth_unicast.octets) },
629 { "rx_vport_multicast_packets",
630 VPORT_COUNTER_OFF(received_eth_multicast.packets) },
631 { "rx_vport_multicast_bytes",
632 VPORT_COUNTER_OFF(received_eth_multicast.octets) },
633 { "tx_vport_multicast_packets",
634 VPORT_COUNTER_OFF(transmitted_eth_multicast.packets) },
635 { "tx_vport_multicast_bytes",
636 VPORT_COUNTER_OFF(transmitted_eth_multicast.octets) },
637 { "rx_vport_broadcast_packets",
638 VPORT_COUNTER_OFF(received_eth_broadcast.packets) },
639 { "rx_vport_broadcast_bytes",
640 VPORT_COUNTER_OFF(received_eth_broadcast.octets) },
641 { "tx_vport_broadcast_packets",
642 VPORT_COUNTER_OFF(transmitted_eth_broadcast.packets) },
643 { "tx_vport_broadcast_bytes",
644 VPORT_COUNTER_OFF(transmitted_eth_broadcast.octets) },
645 { "rx_vport_rdma_unicast_packets",
646 VPORT_COUNTER_OFF(received_ib_unicast.packets) },
647 { "rx_vport_rdma_unicast_bytes",
648 VPORT_COUNTER_OFF(received_ib_unicast.octets) },
649 { "tx_vport_rdma_unicast_packets",
650 VPORT_COUNTER_OFF(transmitted_ib_unicast.packets) },
651 { "tx_vport_rdma_unicast_bytes",
652 VPORT_COUNTER_OFF(transmitted_ib_unicast.octets) },
653 { "rx_vport_rdma_multicast_packets",
654 VPORT_COUNTER_OFF(received_ib_multicast.packets) },
655 { "rx_vport_rdma_multicast_bytes",
656 VPORT_COUNTER_OFF(received_ib_multicast.octets) },
657 { "tx_vport_rdma_multicast_packets",
658 VPORT_COUNTER_OFF(transmitted_ib_multicast.packets) },
659 { "tx_vport_rdma_multicast_bytes",
660 VPORT_COUNTER_OFF(transmitted_ib_multicast.octets) },
663 #define NUM_VPORT_COUNTERS ARRAY_SIZE(vport_stats_desc)
665 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vport)
667 return NUM_VPORT_COUNTERS;
670 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport)
674 for (i = 0; i < NUM_VPORT_COUNTERS; i++)
675 strcpy(data + (idx++) * ETH_GSTRING_LEN, vport_stats_desc[i].format);
679 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport)
683 for (i = 0; i < NUM_VPORT_COUNTERS; i++)
684 data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out,
685 vport_stats_desc, i);
689 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport)
691 u32 *out = (u32 *)priv->stats.vport.query_vport_out;
692 u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {};
693 struct mlx5_core_dev *mdev = priv->mdev;
695 MLX5_SET(query_vport_counter_in, in, opcode, MLX5_CMD_OP_QUERY_VPORT_COUNTER);
696 mlx5_cmd_exec_inout(mdev, query_vport_counter, in, out);
699 #define PPORT_802_3_OFF(c) \
700 MLX5_BYTE_OFF(ppcnt_reg, \
701 counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)
702 static const struct counter_desc pport_802_3_stats_desc[] = {
703 { "tx_packets_phy", PPORT_802_3_OFF(a_frames_transmitted_ok) },
704 { "rx_packets_phy", PPORT_802_3_OFF(a_frames_received_ok) },
705 { "rx_crc_errors_phy", PPORT_802_3_OFF(a_frame_check_sequence_errors) },
706 { "tx_bytes_phy", PPORT_802_3_OFF(a_octets_transmitted_ok) },
707 { "rx_bytes_phy", PPORT_802_3_OFF(a_octets_received_ok) },
708 { "tx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_xmitted_ok) },
709 { "tx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_xmitted_ok) },
710 { "rx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_received_ok) },
711 { "rx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_received_ok) },
712 { "rx_in_range_len_errors_phy", PPORT_802_3_OFF(a_in_range_length_errors) },
713 { "rx_out_of_range_len_phy", PPORT_802_3_OFF(a_out_of_range_length_field) },
714 { "rx_oversize_pkts_phy", PPORT_802_3_OFF(a_frame_too_long_errors) },
715 { "rx_symbol_err_phy", PPORT_802_3_OFF(a_symbol_error_during_carrier) },
716 { "tx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_transmitted) },
717 { "rx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_received) },
718 { "rx_unsupported_op_phy", PPORT_802_3_OFF(a_unsupported_opcodes_received) },
719 { "rx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_received) },
720 { "tx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_transmitted) },
723 #define NUM_PPORT_802_3_COUNTERS ARRAY_SIZE(pport_802_3_stats_desc)
725 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(802_3)
727 return NUM_PPORT_802_3_COUNTERS;
730 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(802_3)
734 for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
735 strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_802_3_stats_desc[i].format);
739 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(802_3)
743 for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
744 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.IEEE_802_3_counters,
745 pport_802_3_stats_desc, i);
749 #define MLX5_BASIC_PPCNT_SUPPORTED(mdev) \
750 (MLX5_CAP_GEN(mdev, pcam_reg) ? MLX5_CAP_PCAM_REG(mdev, ppcnt) : 1)
752 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(802_3)
754 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
755 struct mlx5_core_dev *mdev = priv->mdev;
756 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
757 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
760 if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
763 MLX5_SET(ppcnt_reg, in, local_port, 1);
764 out = pstats->IEEE_802_3_counters;
765 MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
766 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
769 #define MLX5E_READ_CTR64_BE_F(ptr, c) \
770 be64_to_cpu(*(__be64 *)((char *)ptr + \
771 MLX5_BYTE_OFF(ppcnt_reg, \
772 counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)))
774 void mlx5e_stats_pause_get(struct mlx5e_priv *priv,
775 struct ethtool_pause_stats *pause_stats)
777 u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)];
778 struct mlx5_core_dev *mdev = priv->mdev;
779 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
780 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
782 if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
785 MLX5_SET(ppcnt_reg, in, local_port, 1);
786 MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
787 mlx5_core_access_reg(mdev, in, sz, ppcnt_ieee_802_3,
788 sz, MLX5_REG_PPCNT, 0, 0);
790 pause_stats->tx_pause_frames =
791 MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
792 a_pause_mac_ctrl_frames_transmitted);
793 pause_stats->rx_pause_frames =
794 MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
795 a_pause_mac_ctrl_frames_received);
798 #define PPORT_2863_OFF(c) \
799 MLX5_BYTE_OFF(ppcnt_reg, \
800 counter_set.eth_2863_cntrs_grp_data_layout.c##_high)
801 static const struct counter_desc pport_2863_stats_desc[] = {
802 { "rx_discards_phy", PPORT_2863_OFF(if_in_discards) },
803 { "tx_discards_phy", PPORT_2863_OFF(if_out_discards) },
804 { "tx_errors_phy", PPORT_2863_OFF(if_out_errors) },
807 #define NUM_PPORT_2863_COUNTERS ARRAY_SIZE(pport_2863_stats_desc)
809 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(2863)
811 return NUM_PPORT_2863_COUNTERS;
814 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(2863)
818 for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
819 strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2863_stats_desc[i].format);
823 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(2863)
827 for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
828 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2863_counters,
829 pport_2863_stats_desc, i);
833 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(2863)
835 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
836 struct mlx5_core_dev *mdev = priv->mdev;
837 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
838 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
841 MLX5_SET(ppcnt_reg, in, local_port, 1);
842 out = pstats->RFC_2863_counters;
843 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
844 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
847 #define PPORT_2819_OFF(c) \
848 MLX5_BYTE_OFF(ppcnt_reg, \
849 counter_set.eth_2819_cntrs_grp_data_layout.c##_high)
850 static const struct counter_desc pport_2819_stats_desc[] = {
851 { "rx_undersize_pkts_phy", PPORT_2819_OFF(ether_stats_undersize_pkts) },
852 { "rx_fragments_phy", PPORT_2819_OFF(ether_stats_fragments) },
853 { "rx_jabbers_phy", PPORT_2819_OFF(ether_stats_jabbers) },
854 { "rx_64_bytes_phy", PPORT_2819_OFF(ether_stats_pkts64octets) },
855 { "rx_65_to_127_bytes_phy", PPORT_2819_OFF(ether_stats_pkts65to127octets) },
856 { "rx_128_to_255_bytes_phy", PPORT_2819_OFF(ether_stats_pkts128to255octets) },
857 { "rx_256_to_511_bytes_phy", PPORT_2819_OFF(ether_stats_pkts256to511octets) },
858 { "rx_512_to_1023_bytes_phy", PPORT_2819_OFF(ether_stats_pkts512to1023octets) },
859 { "rx_1024_to_1518_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1024to1518octets) },
860 { "rx_1519_to_2047_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1519to2047octets) },
861 { "rx_2048_to_4095_bytes_phy", PPORT_2819_OFF(ether_stats_pkts2048to4095octets) },
862 { "rx_4096_to_8191_bytes_phy", PPORT_2819_OFF(ether_stats_pkts4096to8191octets) },
863 { "rx_8192_to_10239_bytes_phy", PPORT_2819_OFF(ether_stats_pkts8192to10239octets) },
866 #define NUM_PPORT_2819_COUNTERS ARRAY_SIZE(pport_2819_stats_desc)
868 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(2819)
870 return NUM_PPORT_2819_COUNTERS;
873 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(2819)
877 for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
878 strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2819_stats_desc[i].format);
882 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(2819)
886 for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
887 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2819_counters,
888 pport_2819_stats_desc, i);
892 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(2819)
894 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
895 struct mlx5_core_dev *mdev = priv->mdev;
896 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
897 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
900 if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
903 MLX5_SET(ppcnt_reg, in, local_port, 1);
904 out = pstats->RFC_2819_counters;
905 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
906 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
909 #define PPORT_PHY_STATISTICAL_OFF(c) \
910 MLX5_BYTE_OFF(ppcnt_reg, \
911 counter_set.phys_layer_statistical_cntrs.c##_high)
912 static const struct counter_desc pport_phy_statistical_stats_desc[] = {
913 { "rx_pcs_symbol_err_phy", PPORT_PHY_STATISTICAL_OFF(phy_symbol_errors) },
914 { "rx_corrected_bits_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits) },
917 static const struct counter_desc
918 pport_phy_statistical_err_lanes_stats_desc[] = {
919 { "rx_err_lane_0_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane0) },
920 { "rx_err_lane_1_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane1) },
921 { "rx_err_lane_2_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane2) },
922 { "rx_err_lane_3_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane3) },
925 #define NUM_PPORT_PHY_STATISTICAL_COUNTERS \
926 ARRAY_SIZE(pport_phy_statistical_stats_desc)
927 #define NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS \
928 ARRAY_SIZE(pport_phy_statistical_err_lanes_stats_desc)
930 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(phy)
932 struct mlx5_core_dev *mdev = priv->mdev;
935 /* "1" for link_down_events special counter */
938 num_stats += MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group) ?
939 NUM_PPORT_PHY_STATISTICAL_COUNTERS : 0;
941 num_stats += MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters) ?
942 NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS : 0;
947 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(phy)
949 struct mlx5_core_dev *mdev = priv->mdev;
952 strcpy(data + (idx++) * ETH_GSTRING_LEN, "link_down_events_phy");
954 if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
957 for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
958 strcpy(data + (idx++) * ETH_GSTRING_LEN,
959 pport_phy_statistical_stats_desc[i].format);
961 if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters))
962 for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++)
963 strcpy(data + (idx++) * ETH_GSTRING_LEN,
964 pport_phy_statistical_err_lanes_stats_desc[i].format);
969 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(phy)
971 struct mlx5_core_dev *mdev = priv->mdev;
974 /* link_down_events_phy has special handling since it is not stored in __be64 format */
975 data[idx++] = MLX5_GET(ppcnt_reg, priv->stats.pport.phy_counters,
976 counter_set.phys_layer_cntrs.link_down_events);
978 if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
981 for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
983 MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
984 pport_phy_statistical_stats_desc, i);
986 if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters))
987 for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++)
989 MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
990 pport_phy_statistical_err_lanes_stats_desc,
995 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(phy)
997 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
998 struct mlx5_core_dev *mdev = priv->mdev;
999 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1000 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1003 MLX5_SET(ppcnt_reg, in, local_port, 1);
1004 out = pstats->phy_counters;
1005 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
1006 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1008 if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
1011 out = pstats->phy_statistical_counters;
1012 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
1013 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1016 #define PPORT_ETH_EXT_OFF(c) \
1017 MLX5_BYTE_OFF(ppcnt_reg, \
1018 counter_set.eth_extended_cntrs_grp_data_layout.c##_high)
1019 static const struct counter_desc pport_eth_ext_stats_desc[] = {
1020 { "rx_buffer_passed_thres_phy", PPORT_ETH_EXT_OFF(rx_buffer_almost_full) },
1023 #define NUM_PPORT_ETH_EXT_COUNTERS ARRAY_SIZE(pport_eth_ext_stats_desc)
1025 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(eth_ext)
1027 if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
1028 return NUM_PPORT_ETH_EXT_COUNTERS;
1033 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(eth_ext)
1037 if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
1038 for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++)
1039 strcpy(data + (idx++) * ETH_GSTRING_LEN,
1040 pport_eth_ext_stats_desc[i].format);
1044 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(eth_ext)
1048 if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
1049 for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++)
1051 MLX5E_READ_CTR64_BE(&priv->stats.pport.eth_ext_counters,
1052 pport_eth_ext_stats_desc, i);
1056 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(eth_ext)
1058 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1059 struct mlx5_core_dev *mdev = priv->mdev;
1060 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1061 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1064 if (!MLX5_CAP_PCAM_FEATURE(mdev, rx_buffer_fullness_counters))
1067 MLX5_SET(ppcnt_reg, in, local_port, 1);
1068 out = pstats->eth_ext_counters;
1069 MLX5_SET(ppcnt_reg, in, grp, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP);
1070 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1073 #define PCIE_PERF_OFF(c) \
1074 MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c)
1075 static const struct counter_desc pcie_perf_stats_desc[] = {
1076 { "rx_pci_signal_integrity", PCIE_PERF_OFF(rx_errors) },
1077 { "tx_pci_signal_integrity", PCIE_PERF_OFF(tx_errors) },
1080 #define PCIE_PERF_OFF64(c) \
1081 MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c##_high)
1082 static const struct counter_desc pcie_perf_stats_desc64[] = {
1083 { "outbound_pci_buffer_overflow", PCIE_PERF_OFF64(tx_overflow_buffer_pkt) },
1086 static const struct counter_desc pcie_perf_stall_stats_desc[] = {
1087 { "outbound_pci_stalled_rd", PCIE_PERF_OFF(outbound_stalled_reads) },
1088 { "outbound_pci_stalled_wr", PCIE_PERF_OFF(outbound_stalled_writes) },
1089 { "outbound_pci_stalled_rd_events", PCIE_PERF_OFF(outbound_stalled_reads_events) },
1090 { "outbound_pci_stalled_wr_events", PCIE_PERF_OFF(outbound_stalled_writes_events) },
1093 #define NUM_PCIE_PERF_COUNTERS ARRAY_SIZE(pcie_perf_stats_desc)
1094 #define NUM_PCIE_PERF_COUNTERS64 ARRAY_SIZE(pcie_perf_stats_desc64)
1095 #define NUM_PCIE_PERF_STALL_COUNTERS ARRAY_SIZE(pcie_perf_stall_stats_desc)
1097 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pcie)
1101 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
1102 num_stats += NUM_PCIE_PERF_COUNTERS;
1104 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
1105 num_stats += NUM_PCIE_PERF_COUNTERS64;
1107 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
1108 num_stats += NUM_PCIE_PERF_STALL_COUNTERS;
1113 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pcie)
1117 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
1118 for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
1119 strcpy(data + (idx++) * ETH_GSTRING_LEN,
1120 pcie_perf_stats_desc[i].format);
1122 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
1123 for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++)
1124 strcpy(data + (idx++) * ETH_GSTRING_LEN,
1125 pcie_perf_stats_desc64[i].format);
1127 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
1128 for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++)
1129 strcpy(data + (idx++) * ETH_GSTRING_LEN,
1130 pcie_perf_stall_stats_desc[i].format);
1134 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pcie)
1138 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
1139 for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
1141 MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
1142 pcie_perf_stats_desc, i);
1144 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
1145 for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++)
1147 MLX5E_READ_CTR64_BE(&priv->stats.pcie.pcie_perf_counters,
1148 pcie_perf_stats_desc64, i);
1150 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
1151 for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++)
1153 MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
1154 pcie_perf_stall_stats_desc, i);
1158 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pcie)
1160 struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie;
1161 struct mlx5_core_dev *mdev = priv->mdev;
1162 u32 in[MLX5_ST_SZ_DW(mpcnt_reg)] = {0};
1163 int sz = MLX5_ST_SZ_BYTES(mpcnt_reg);
1166 if (!MLX5_CAP_MCAM_FEATURE(mdev, pcie_performance_group))
1169 out = pcie_stats->pcie_perf_counters;
1170 MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP);
1171 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
1174 #define PPORT_PER_TC_PRIO_OFF(c) \
1175 MLX5_BYTE_OFF(ppcnt_reg, \
1176 counter_set.eth_per_tc_prio_grp_data_layout.c##_high)
1178 static const struct counter_desc pport_per_tc_prio_stats_desc[] = {
1179 { "rx_prio%d_buf_discard", PPORT_PER_TC_PRIO_OFF(no_buffer_discard_uc) },
1182 #define NUM_PPORT_PER_TC_PRIO_COUNTERS ARRAY_SIZE(pport_per_tc_prio_stats_desc)
1184 #define PPORT_PER_TC_CONGEST_PRIO_OFF(c) \
1185 MLX5_BYTE_OFF(ppcnt_reg, \
1186 counter_set.eth_per_tc_congest_prio_grp_data_layout.c##_high)
1188 static const struct counter_desc pport_per_tc_congest_prio_stats_desc[] = {
1189 { "rx_prio%d_cong_discard", PPORT_PER_TC_CONGEST_PRIO_OFF(wred_discard) },
1190 { "rx_prio%d_marked", PPORT_PER_TC_CONGEST_PRIO_OFF(ecn_marked_tc) },
1193 #define NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS \
1194 ARRAY_SIZE(pport_per_tc_congest_prio_stats_desc)
1196 static int mlx5e_grp_per_tc_prio_get_num_stats(struct mlx5e_priv *priv)
1198 struct mlx5_core_dev *mdev = priv->mdev;
1200 if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1203 return NUM_PPORT_PER_TC_PRIO_COUNTERS * NUM_PPORT_PRIO;
1206 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_port_buff_congest)
1208 struct mlx5_core_dev *mdev = priv->mdev;
1211 if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1214 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1215 for (i = 0; i < NUM_PPORT_PER_TC_PRIO_COUNTERS; i++)
1216 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1217 pport_per_tc_prio_stats_desc[i].format, prio);
1218 for (i = 0; i < NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS; i++)
1219 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1220 pport_per_tc_congest_prio_stats_desc[i].format, prio);
1226 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_port_buff_congest)
1228 struct mlx5e_pport_stats *pport = &priv->stats.pport;
1229 struct mlx5_core_dev *mdev = priv->mdev;
1232 if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1235 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1236 for (i = 0; i < NUM_PPORT_PER_TC_PRIO_COUNTERS; i++)
1238 MLX5E_READ_CTR64_BE(&pport->per_tc_prio_counters[prio],
1239 pport_per_tc_prio_stats_desc, i);
1240 for (i = 0; i < NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS ; i++)
1242 MLX5E_READ_CTR64_BE(&pport->per_tc_congest_prio_counters[prio],
1243 pport_per_tc_congest_prio_stats_desc, i);
1249 static void mlx5e_grp_per_tc_prio_update_stats(struct mlx5e_priv *priv)
1251 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1252 struct mlx5_core_dev *mdev = priv->mdev;
1253 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1254 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1258 if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1261 MLX5_SET(ppcnt_reg, in, pnat, 2);
1262 MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP);
1263 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1264 out = pstats->per_tc_prio_counters[prio];
1265 MLX5_SET(ppcnt_reg, in, prio_tc, prio);
1266 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1270 static int mlx5e_grp_per_tc_congest_prio_get_num_stats(struct mlx5e_priv *priv)
1272 struct mlx5_core_dev *mdev = priv->mdev;
1274 if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1277 return NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS * NUM_PPORT_PRIO;
1280 static void mlx5e_grp_per_tc_congest_prio_update_stats(struct mlx5e_priv *priv)
1282 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1283 struct mlx5_core_dev *mdev = priv->mdev;
1284 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1285 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1289 if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1292 MLX5_SET(ppcnt_reg, in, pnat, 2);
1293 MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_TRAFFIC_CLASS_CONGESTION_GROUP);
1294 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1295 out = pstats->per_tc_congest_prio_counters[prio];
1296 MLX5_SET(ppcnt_reg, in, prio_tc, prio);
1297 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1301 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_port_buff_congest)
1303 return mlx5e_grp_per_tc_prio_get_num_stats(priv) +
1304 mlx5e_grp_per_tc_congest_prio_get_num_stats(priv);
1307 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_port_buff_congest)
1309 mlx5e_grp_per_tc_prio_update_stats(priv);
1310 mlx5e_grp_per_tc_congest_prio_update_stats(priv);
1313 #define PPORT_PER_PRIO_OFF(c) \
1314 MLX5_BYTE_OFF(ppcnt_reg, \
1315 counter_set.eth_per_prio_grp_data_layout.c##_high)
1316 static const struct counter_desc pport_per_prio_traffic_stats_desc[] = {
1317 { "rx_prio%d_bytes", PPORT_PER_PRIO_OFF(rx_octets) },
1318 { "rx_prio%d_packets", PPORT_PER_PRIO_OFF(rx_frames) },
1319 { "rx_prio%d_discards", PPORT_PER_PRIO_OFF(rx_discards) },
1320 { "tx_prio%d_bytes", PPORT_PER_PRIO_OFF(tx_octets) },
1321 { "tx_prio%d_packets", PPORT_PER_PRIO_OFF(tx_frames) },
1324 #define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS ARRAY_SIZE(pport_per_prio_traffic_stats_desc)
1326 static int mlx5e_grp_per_prio_traffic_get_num_stats(void)
1328 return NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * NUM_PPORT_PRIO;
1331 static int mlx5e_grp_per_prio_traffic_fill_strings(struct mlx5e_priv *priv,
1337 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1338 for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
1339 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1340 pport_per_prio_traffic_stats_desc[i].format, prio);
1346 static int mlx5e_grp_per_prio_traffic_fill_stats(struct mlx5e_priv *priv,
1352 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1353 for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
1355 MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
1356 pport_per_prio_traffic_stats_desc, i);
1362 static const struct counter_desc pport_per_prio_pfc_stats_desc[] = {
1363 /* %s is "global" or "prio{i}" */
1364 { "rx_%s_pause", PPORT_PER_PRIO_OFF(rx_pause) },
1365 { "rx_%s_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) },
1366 { "tx_%s_pause", PPORT_PER_PRIO_OFF(tx_pause) },
1367 { "tx_%s_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) },
1368 { "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
1371 static const struct counter_desc pport_pfc_stall_stats_desc[] = {
1372 { "tx_pause_storm_warning_events", PPORT_PER_PRIO_OFF(device_stall_minor_watermark_cnt) },
1373 { "tx_pause_storm_error_events", PPORT_PER_PRIO_OFF(device_stall_critical_watermark_cnt) },
1376 #define NUM_PPORT_PER_PRIO_PFC_COUNTERS ARRAY_SIZE(pport_per_prio_pfc_stats_desc)
1377 #define NUM_PPORT_PFC_STALL_COUNTERS(priv) (ARRAY_SIZE(pport_pfc_stall_stats_desc) * \
1378 MLX5_CAP_PCAM_FEATURE((priv)->mdev, pfcc_mask) * \
1379 MLX5_CAP_DEBUG((priv)->mdev, stall_detect))
1381 static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv)
1383 struct mlx5_core_dev *mdev = priv->mdev;
1388 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1391 err = mlx5_query_port_pfc(mdev, &pfc_en_tx, &pfc_en_rx);
1393 return err ? 0 : pfc_en_tx | pfc_en_rx;
1396 static bool mlx5e_query_global_pause_combined(struct mlx5e_priv *priv)
1398 struct mlx5_core_dev *mdev = priv->mdev;
1403 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1406 err = mlx5_query_port_pause(mdev, &rx_pause, &tx_pause);
1408 return err ? false : rx_pause | tx_pause;
1411 static int mlx5e_grp_per_prio_pfc_get_num_stats(struct mlx5e_priv *priv)
1413 return (mlx5e_query_global_pause_combined(priv) +
1414 hweight8(mlx5e_query_pfc_combined(priv))) *
1415 NUM_PPORT_PER_PRIO_PFC_COUNTERS +
1416 NUM_PPORT_PFC_STALL_COUNTERS(priv);
1419 static int mlx5e_grp_per_prio_pfc_fill_strings(struct mlx5e_priv *priv,
1423 unsigned long pfc_combined;
1426 pfc_combined = mlx5e_query_pfc_combined(priv);
1427 for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
1428 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1429 char pfc_string[ETH_GSTRING_LEN];
1431 snprintf(pfc_string, sizeof(pfc_string), "prio%d", prio);
1432 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1433 pport_per_prio_pfc_stats_desc[i].format, pfc_string);
1437 if (mlx5e_query_global_pause_combined(priv)) {
1438 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1439 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1440 pport_per_prio_pfc_stats_desc[i].format, "global");
1444 for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++)
1445 strcpy(data + (idx++) * ETH_GSTRING_LEN,
1446 pport_pfc_stall_stats_desc[i].format);
1451 static int mlx5e_grp_per_prio_pfc_fill_stats(struct mlx5e_priv *priv,
1455 unsigned long pfc_combined;
1458 pfc_combined = mlx5e_query_pfc_combined(priv);
1459 for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
1460 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1462 MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
1463 pport_per_prio_pfc_stats_desc, i);
1467 if (mlx5e_query_global_pause_combined(priv)) {
1468 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1470 MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
1471 pport_per_prio_pfc_stats_desc, i);
1475 for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++)
1476 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
1477 pport_pfc_stall_stats_desc, i);
1482 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_prio)
1484 return mlx5e_grp_per_prio_traffic_get_num_stats() +
1485 mlx5e_grp_per_prio_pfc_get_num_stats(priv);
1488 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_prio)
1490 idx = mlx5e_grp_per_prio_traffic_fill_strings(priv, data, idx);
1491 idx = mlx5e_grp_per_prio_pfc_fill_strings(priv, data, idx);
1495 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_prio)
1497 idx = mlx5e_grp_per_prio_traffic_fill_stats(priv, data, idx);
1498 idx = mlx5e_grp_per_prio_pfc_fill_stats(priv, data, idx);
1502 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_prio)
1504 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1505 struct mlx5_core_dev *mdev = priv->mdev;
1506 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1507 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1511 if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
1514 MLX5_SET(ppcnt_reg, in, local_port, 1);
1515 MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
1516 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1517 out = pstats->per_prio_counters[prio];
1518 MLX5_SET(ppcnt_reg, in, prio_tc, prio);
1519 mlx5_core_access_reg(mdev, in, sz, out, sz,
1520 MLX5_REG_PPCNT, 0, 0);
1524 static const struct counter_desc mlx5e_pme_status_desc[] = {
1525 { "module_unplug", sizeof(u64) * MLX5_MODULE_STATUS_UNPLUGGED },
1528 static const struct counter_desc mlx5e_pme_error_desc[] = {
1529 { "module_bus_stuck", sizeof(u64) * MLX5_MODULE_EVENT_ERROR_BUS_STUCK },
1530 { "module_high_temp", sizeof(u64) * MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE },
1531 { "module_bad_shorted", sizeof(u64) * MLX5_MODULE_EVENT_ERROR_BAD_CABLE },
1534 #define NUM_PME_STATUS_STATS ARRAY_SIZE(mlx5e_pme_status_desc)
1535 #define NUM_PME_ERR_STATS ARRAY_SIZE(mlx5e_pme_error_desc)
1537 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pme)
1539 return NUM_PME_STATUS_STATS + NUM_PME_ERR_STATS;
1542 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pme)
1546 for (i = 0; i < NUM_PME_STATUS_STATS; i++)
1547 strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_status_desc[i].format);
1549 for (i = 0; i < NUM_PME_ERR_STATS; i++)
1550 strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_error_desc[i].format);
1555 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pme)
1557 struct mlx5_pme_stats pme_stats;
1560 mlx5_get_pme_stats(priv->mdev, &pme_stats);
1562 for (i = 0; i < NUM_PME_STATUS_STATS; i++)
1563 data[idx++] = MLX5E_READ_CTR64_CPU(pme_stats.status_counters,
1564 mlx5e_pme_status_desc, i);
1566 for (i = 0; i < NUM_PME_ERR_STATS; i++)
1567 data[idx++] = MLX5E_READ_CTR64_CPU(pme_stats.error_counters,
1568 mlx5e_pme_error_desc, i);
1573 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pme) { return; }
1575 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(tls)
1577 return mlx5e_tls_get_count(priv);
1580 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(tls)
1582 return idx + mlx5e_tls_get_strings(priv, data + idx * ETH_GSTRING_LEN);
1585 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(tls)
1587 return idx + mlx5e_tls_get_stats(priv, data + idx);
1590 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(tls) { return; }
1592 static const struct counter_desc rq_stats_desc[] = {
1593 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
1594 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
1595 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) },
1596 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail) },
1597 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail_slow) },
1598 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
1599 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
1600 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
1601 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) },
1602 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_redirect) },
1603 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) },
1604 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) },
1605 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, ecn_mark) },
1606 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
1607 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
1608 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
1609 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
1610 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) },
1611 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
1612 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
1613 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
1614 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_reuse) },
1615 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_full) },
1616 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_empty) },
1617 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_busy) },
1618 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_waive) },
1619 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, congst_umr) },
1620 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_err) },
1621 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, recover) },
1622 #ifdef CONFIG_MLX5_EN_TLS
1623 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_packets) },
1624 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_bytes) },
1625 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_ctx) },
1626 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_del) },
1627 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_pkt) },
1628 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_start) },
1629 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_end) },
1630 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_skip) },
1631 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_ok) },
1632 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_skip) },
1633 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_err) },
1637 static const struct counter_desc sq_stats_desc[] = {
1638 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, packets) },
1639 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, bytes) },
1640 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_packets) },
1641 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_bytes) },
1642 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
1643 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
1644 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
1645 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
1646 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
1647 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) },
1648 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, mpwqe_blks) },
1649 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, mpwqe_pkts) },
1650 #ifdef CONFIG_MLX5_EN_TLS
1651 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) },
1652 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) },
1653 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ctx) },
1654 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ooo) },
1655 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) },
1656 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) },
1657 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_resync_bytes) },
1658 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_skip_no_sync_data) },
1659 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) },
1660 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) },
1662 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) },
1663 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) },
1664 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) },
1665 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
1666 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, recover) },
1667 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqes) },
1668 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) },
1669 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqe_err) },
1672 static const struct counter_desc rq_xdpsq_stats_desc[] = {
1673 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
1674 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
1675 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
1676 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, nops) },
1677 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) },
1678 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) },
1679 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
1682 static const struct counter_desc xdpsq_stats_desc[] = {
1683 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
1684 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
1685 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
1686 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, nops) },
1687 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) },
1688 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) },
1689 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
1692 static const struct counter_desc xskrq_stats_desc[] = {
1693 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, packets) },
1694 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, bytes) },
1695 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_complete) },
1696 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
1697 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
1698 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_none) },
1699 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, ecn_mark) },
1700 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
1701 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, xdp_drop) },
1702 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, xdp_redirect) },
1703 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, wqe_err) },
1704 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
1705 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
1706 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) },
1707 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
1708 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
1709 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
1710 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, congst_umr) },
1711 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, arfs_err) },
1714 static const struct counter_desc xsksq_stats_desc[] = {
1715 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
1716 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
1717 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
1718 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, full) },
1719 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, err) },
1720 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
1723 static const struct counter_desc ch_stats_desc[] = {
1724 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, events) },
1725 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, poll) },
1726 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, arm) },
1727 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, aff_change) },
1728 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, force_irq) },
1729 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, eq_rearm) },
1732 static const struct counter_desc ptp_sq_stats_desc[] = {
1733 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, packets) },
1734 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, bytes) },
1735 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
1736 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
1737 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
1738 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, nop) },
1739 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, csum_none) },
1740 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, stopped) },
1741 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, dropped) },
1742 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
1743 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, recover) },
1744 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, cqes) },
1745 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, wake) },
1746 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, cqe_err) },
1749 static const struct counter_desc ptp_ch_stats_desc[] = {
1750 { MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, events) },
1751 { MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, poll) },
1752 { MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, arm) },
1753 { MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, eq_rearm) },
1756 static const struct counter_desc ptp_cq_stats_desc[] = {
1757 { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, cqe) },
1758 { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, err_cqe) },
1759 { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort) },
1760 { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort_abs_diff_ns) },
1763 static const struct counter_desc qos_sq_stats_desc[] = {
1764 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, packets) },
1765 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, bytes) },
1766 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_packets) },
1767 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_bytes) },
1768 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
1769 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
1770 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
1771 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
1772 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
1773 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, nop) },
1774 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, mpwqe_blks) },
1775 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, mpwqe_pkts) },
1776 #ifdef CONFIG_MLX5_EN_TLS
1777 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) },
1778 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) },
1779 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_ctx) },
1780 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_ooo) },
1781 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) },
1782 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) },
1783 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_resync_bytes) },
1784 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_skip_no_sync_data) },
1785 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) },
1786 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) },
1788 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, csum_none) },
1789 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, stopped) },
1790 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, dropped) },
1791 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
1792 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, recover) },
1793 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, cqes) },
1794 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, wake) },
1795 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, cqe_err) },
1798 #define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc)
1799 #define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc)
1800 #define NUM_XDPSQ_STATS ARRAY_SIZE(xdpsq_stats_desc)
1801 #define NUM_RQ_XDPSQ_STATS ARRAY_SIZE(rq_xdpsq_stats_desc)
1802 #define NUM_XSKRQ_STATS ARRAY_SIZE(xskrq_stats_desc)
1803 #define NUM_XSKSQ_STATS ARRAY_SIZE(xsksq_stats_desc)
1804 #define NUM_CH_STATS ARRAY_SIZE(ch_stats_desc)
1805 #define NUM_PTP_SQ_STATS ARRAY_SIZE(ptp_sq_stats_desc)
1806 #define NUM_PTP_CH_STATS ARRAY_SIZE(ptp_ch_stats_desc)
1807 #define NUM_PTP_CQ_STATS ARRAY_SIZE(ptp_cq_stats_desc)
1808 #define NUM_QOS_SQ_STATS ARRAY_SIZE(qos_sq_stats_desc)
1810 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qos)
1812 /* Pairs with smp_store_release in mlx5e_open_qos_sq. */
1813 return NUM_QOS_SQ_STATS * smp_load_acquire(&priv->htb.max_qos_sqs);
1816 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qos)
1818 /* Pairs with smp_store_release in mlx5e_open_qos_sq. */
1819 u16 max_qos_sqs = smp_load_acquire(&priv->htb.max_qos_sqs);
1822 for (qid = 0; qid < max_qos_sqs; qid++)
1823 for (i = 0; i < NUM_QOS_SQ_STATS; i++)
1824 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1825 qos_sq_stats_desc[i].format, qid);
1830 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qos)
1832 struct mlx5e_sq_stats **stats;
1836 /* Pairs with smp_store_release in mlx5e_open_qos_sq. */
1837 max_qos_sqs = smp_load_acquire(&priv->htb.max_qos_sqs);
1838 stats = READ_ONCE(priv->htb.qos_sq_stats);
1840 for (qid = 0; qid < max_qos_sqs; qid++) {
1841 struct mlx5e_sq_stats *s = READ_ONCE(stats[qid]);
1843 for (i = 0; i < NUM_QOS_SQ_STATS; i++)
1844 data[idx++] = MLX5E_READ_CTR64_CPU(s, qos_sq_stats_desc, i);
1850 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qos) { return; }
1852 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ptp)
1854 return priv->port_ptp_opened ?
1856 ((NUM_PTP_SQ_STATS + NUM_PTP_CQ_STATS) * priv->max_opened_tc) :
1860 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ptp)
1864 if (!priv->port_ptp_opened)
1867 for (i = 0; i < NUM_PTP_CH_STATS; i++)
1868 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1869 ptp_ch_stats_desc[i].format);
1871 for (tc = 0; tc < priv->max_opened_tc; tc++)
1872 for (i = 0; i < NUM_PTP_SQ_STATS; i++)
1873 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1874 ptp_sq_stats_desc[i].format, tc);
1876 for (tc = 0; tc < priv->max_opened_tc; tc++)
1877 for (i = 0; i < NUM_PTP_CQ_STATS; i++)
1878 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1879 ptp_cq_stats_desc[i].format, tc);
1883 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ptp)
1887 if (!priv->port_ptp_opened)
1890 for (i = 0; i < NUM_PTP_CH_STATS; i++)
1892 MLX5E_READ_CTR64_CPU(&priv->port_ptp_stats.ch,
1893 ptp_ch_stats_desc, i);
1895 for (tc = 0; tc < priv->max_opened_tc; tc++)
1896 for (i = 0; i < NUM_PTP_SQ_STATS; i++)
1898 MLX5E_READ_CTR64_CPU(&priv->port_ptp_stats.sq[tc],
1899 ptp_sq_stats_desc, i);
1901 for (tc = 0; tc < priv->max_opened_tc; tc++)
1902 for (i = 0; i < NUM_PTP_CQ_STATS; i++)
1904 MLX5E_READ_CTR64_CPU(&priv->port_ptp_stats.cq[tc],
1905 ptp_cq_stats_desc, i);
1910 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ptp) { return; }
1912 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(channels)
1914 int max_nch = priv->max_nch;
1916 return (NUM_RQ_STATS * max_nch) +
1917 (NUM_CH_STATS * max_nch) +
1918 (NUM_SQ_STATS * max_nch * priv->max_opened_tc) +
1919 (NUM_RQ_XDPSQ_STATS * max_nch) +
1920 (NUM_XDPSQ_STATS * max_nch) +
1921 (NUM_XSKRQ_STATS * max_nch * priv->xsk.ever_used) +
1922 (NUM_XSKSQ_STATS * max_nch * priv->xsk.ever_used);
1925 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(channels)
1927 bool is_xsk = priv->xsk.ever_used;
1928 int max_nch = priv->max_nch;
1931 for (i = 0; i < max_nch; i++)
1932 for (j = 0; j < NUM_CH_STATS; j++)
1933 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1934 ch_stats_desc[j].format, i);
1936 for (i = 0; i < max_nch; i++) {
1937 for (j = 0; j < NUM_RQ_STATS; j++)
1938 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1939 rq_stats_desc[j].format, i);
1940 for (j = 0; j < NUM_XSKRQ_STATS * is_xsk; j++)
1941 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1942 xskrq_stats_desc[j].format, i);
1943 for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++)
1944 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1945 rq_xdpsq_stats_desc[j].format, i);
1948 for (tc = 0; tc < priv->max_opened_tc; tc++)
1949 for (i = 0; i < max_nch; i++)
1950 for (j = 0; j < NUM_SQ_STATS; j++)
1951 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1952 sq_stats_desc[j].format,
1955 for (i = 0; i < max_nch; i++) {
1956 for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++)
1957 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1958 xsksq_stats_desc[j].format, i);
1959 for (j = 0; j < NUM_XDPSQ_STATS; j++)
1960 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1961 xdpsq_stats_desc[j].format, i);
1967 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(channels)
1969 bool is_xsk = priv->xsk.ever_used;
1970 int max_nch = priv->max_nch;
1973 for (i = 0; i < max_nch; i++)
1974 for (j = 0; j < NUM_CH_STATS; j++)
1976 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].ch,
1979 for (i = 0; i < max_nch; i++) {
1980 for (j = 0; j < NUM_RQ_STATS; j++)
1982 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].rq,
1984 for (j = 0; j < NUM_XSKRQ_STATS * is_xsk; j++)
1986 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].xskrq,
1987 xskrq_stats_desc, j);
1988 for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++)
1990 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].rq_xdpsq,
1991 rq_xdpsq_stats_desc, j);
1994 for (tc = 0; tc < priv->max_opened_tc; tc++)
1995 for (i = 0; i < max_nch; i++)
1996 for (j = 0; j < NUM_SQ_STATS; j++)
1998 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].sq[tc],
2001 for (i = 0; i < max_nch; i++) {
2002 for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++)
2004 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].xsksq,
2005 xsksq_stats_desc, j);
2006 for (j = 0; j < NUM_XDPSQ_STATS; j++)
2008 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].xdpsq,
2009 xdpsq_stats_desc, j);
2015 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(channels) { return; }
2017 MLX5E_DEFINE_STATS_GRP(sw, 0);
2018 MLX5E_DEFINE_STATS_GRP(qcnt, MLX5E_NDO_UPDATE_STATS);
2019 MLX5E_DEFINE_STATS_GRP(vnic_env, 0);
2020 MLX5E_DEFINE_STATS_GRP(vport, MLX5E_NDO_UPDATE_STATS);
2021 MLX5E_DEFINE_STATS_GRP(802_3, MLX5E_NDO_UPDATE_STATS);
2022 MLX5E_DEFINE_STATS_GRP(2863, 0);
2023 MLX5E_DEFINE_STATS_GRP(2819, 0);
2024 MLX5E_DEFINE_STATS_GRP(phy, 0);
2025 MLX5E_DEFINE_STATS_GRP(pcie, 0);
2026 MLX5E_DEFINE_STATS_GRP(per_prio, 0);
2027 MLX5E_DEFINE_STATS_GRP(pme, 0);
2028 MLX5E_DEFINE_STATS_GRP(channels, 0);
2029 MLX5E_DEFINE_STATS_GRP(per_port_buff_congest, 0);
2030 MLX5E_DEFINE_STATS_GRP(eth_ext, 0);
2031 static MLX5E_DEFINE_STATS_GRP(tls, 0);
2032 static MLX5E_DEFINE_STATS_GRP(ptp, 0);
2033 static MLX5E_DEFINE_STATS_GRP(qos, 0);
2035 /* The stats groups order is opposite to the update_stats() order calls */
2036 mlx5e_stats_grp_t mlx5e_nic_stats_grps[] = {
2037 &MLX5E_STATS_GRP(sw),
2038 &MLX5E_STATS_GRP(qcnt),
2039 &MLX5E_STATS_GRP(vnic_env),
2040 &MLX5E_STATS_GRP(vport),
2041 &MLX5E_STATS_GRP(802_3),
2042 &MLX5E_STATS_GRP(2863),
2043 &MLX5E_STATS_GRP(2819),
2044 &MLX5E_STATS_GRP(phy),
2045 &MLX5E_STATS_GRP(eth_ext),
2046 &MLX5E_STATS_GRP(pcie),
2047 &MLX5E_STATS_GRP(per_prio),
2048 &MLX5E_STATS_GRP(pme),
2049 #ifdef CONFIG_MLX5_EN_IPSEC
2050 &MLX5E_STATS_GRP(ipsec_sw),
2051 &MLX5E_STATS_GRP(ipsec_hw),
2053 &MLX5E_STATS_GRP(tls),
2054 &MLX5E_STATS_GRP(channels),
2055 &MLX5E_STATS_GRP(per_port_buff_congest),
2056 &MLX5E_STATS_GRP(ptp),
2057 &MLX5E_STATS_GRP(qos),
2060 unsigned int mlx5e_nic_stats_grps_num(struct mlx5e_priv *priv)
2062 return ARRAY_SIZE(mlx5e_nic_stats_grps);