2 * Copyright (c) 2017, Mellanox Technologies, Ltd. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include "en_accel/tls.h"
36 #include "en_accel/en_accel.h"
38 static unsigned int stats_grps_num(struct mlx5e_priv *priv)
40 return !priv->profile->stats_grps_num ? 0 :
41 priv->profile->stats_grps_num(priv);
44 unsigned int mlx5e_stats_total_num(struct mlx5e_priv *priv)
46 mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
47 const unsigned int num_stats_grps = stats_grps_num(priv);
48 unsigned int total = 0;
51 for (i = 0; i < num_stats_grps; i++)
52 total += stats_grps[i]->get_num_stats(priv);
57 void mlx5e_stats_update_ndo_stats(struct mlx5e_priv *priv)
59 mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
60 const unsigned int num_stats_grps = stats_grps_num(priv);
63 for (i = num_stats_grps - 1; i >= 0; i--)
64 if (stats_grps[i]->update_stats &&
65 stats_grps[i]->update_stats_mask & MLX5E_NDO_UPDATE_STATS)
66 stats_grps[i]->update_stats(priv);
69 void mlx5e_stats_update(struct mlx5e_priv *priv)
71 mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
72 const unsigned int num_stats_grps = stats_grps_num(priv);
75 for (i = num_stats_grps - 1; i >= 0; i--)
76 if (stats_grps[i]->update_stats)
77 stats_grps[i]->update_stats(priv);
80 void mlx5e_stats_fill(struct mlx5e_priv *priv, u64 *data, int idx)
82 mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
83 const unsigned int num_stats_grps = stats_grps_num(priv);
86 for (i = 0; i < num_stats_grps; i++)
87 idx = stats_grps[i]->fill_stats(priv, data, idx);
90 void mlx5e_stats_fill_strings(struct mlx5e_priv *priv, u8 *data)
92 mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
93 const unsigned int num_stats_grps = stats_grps_num(priv);
96 for (i = 0; i < num_stats_grps; i++)
97 idx = stats_grps[i]->fill_strings(priv, data, idx);
100 /* Concrete NIC Stats */
102 static const struct counter_desc sw_stats_desc[] = {
103 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
104 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
105 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
106 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
107 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_packets) },
108 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_bytes) },
109 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_packets) },
110 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) },
111 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_added_vlan_packets) },
112 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_nop) },
113 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_mpwqe_blks) },
114 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_mpwqe_pkts) },
116 #ifdef CONFIG_MLX5_EN_TLS
117 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_packets) },
118 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_bytes) },
119 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ctx) },
120 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ooo) },
121 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_packets) },
122 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_bytes) },
123 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_resync_bytes) },
124 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_skip_no_sync_data) },
125 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_no_sync_data) },
126 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_bypass_req) },
129 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) },
130 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) },
131 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_ecn_mark) },
132 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_removed_vlan_packets) },
133 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) },
134 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) },
135 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) },
136 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail) },
137 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail_slow) },
138 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) },
139 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) },
140 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_redirect) },
141 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_xmit) },
142 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_mpwqe) },
143 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_inlnw) },
144 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_nops) },
145 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) },
146 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_err) },
147 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_cqe) },
148 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_none) },
149 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) },
150 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) },
151 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) },
152 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) },
153 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) },
154 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_recover) },
155 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqes) },
156 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
157 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) },
158 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_xmit) },
159 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_mpwqe) },
160 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_inlnw) },
161 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_nops) },
162 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_full) },
163 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_err) },
164 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_cqes) },
165 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
166 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_cqes) },
167 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_strides) },
168 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_oversize_pkts_sw_drop) },
169 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
170 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
171 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
172 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_reuse) },
173 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_full) },
174 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_empty) },
175 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) },
176 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) },
177 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_congst_umr) },
178 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_err) },
179 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_recover) },
180 #ifdef CONFIG_MLX5_EN_TLS
181 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_packets) },
182 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_bytes) },
183 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_ctx) },
184 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_del) },
185 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_pkt) },
186 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_start) },
187 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_end) },
188 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_skip) },
189 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_ok) },
190 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_skip) },
191 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_err) },
193 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_events) },
194 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_poll) },
195 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_arm) },
196 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_aff_change) },
197 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_force_irq) },
198 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_eq_rearm) },
199 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_packets) },
200 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_bytes) },
201 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_complete) },
202 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_unnecessary) },
203 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_unnecessary_inner) },
204 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_none) },
205 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_ecn_mark) },
206 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_removed_vlan_packets) },
207 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_xdp_drop) },
208 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_xdp_redirect) },
209 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_wqe_err) },
210 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_mpwqe_filler_cqes) },
211 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_mpwqe_filler_strides) },
212 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_oversize_pkts_sw_drop) },
213 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_buff_alloc_err) },
214 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_cqe_compress_blks) },
215 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_cqe_compress_pkts) },
216 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_congst_umr) },
217 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_arfs_err) },
218 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_xmit) },
219 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_mpwqe) },
220 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_inlnw) },
221 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_full) },
222 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_err) },
223 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_cqes) },
226 #define NUM_SW_COUNTERS ARRAY_SIZE(sw_stats_desc)
228 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(sw)
230 return NUM_SW_COUNTERS;
233 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(sw)
237 for (i = 0; i < NUM_SW_COUNTERS; i++)
238 strcpy(data + (idx++) * ETH_GSTRING_LEN, sw_stats_desc[i].format);
242 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw)
246 for (i = 0; i < NUM_SW_COUNTERS; i++)
247 data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.sw, sw_stats_desc, i);
251 static void mlx5e_stats_grp_sw_update_stats_xdp_red(struct mlx5e_sw_stats *s,
252 struct mlx5e_xdpsq_stats *xdpsq_red_stats)
254 s->tx_xdp_xmit += xdpsq_red_stats->xmit;
255 s->tx_xdp_mpwqe += xdpsq_red_stats->mpwqe;
256 s->tx_xdp_inlnw += xdpsq_red_stats->inlnw;
257 s->tx_xdp_nops += xdpsq_red_stats->nops;
258 s->tx_xdp_full += xdpsq_red_stats->full;
259 s->tx_xdp_err += xdpsq_red_stats->err;
260 s->tx_xdp_cqes += xdpsq_red_stats->cqes;
263 static void mlx5e_stats_grp_sw_update_stats_xdpsq(struct mlx5e_sw_stats *s,
264 struct mlx5e_xdpsq_stats *xdpsq_stats)
266 s->rx_xdp_tx_xmit += xdpsq_stats->xmit;
267 s->rx_xdp_tx_mpwqe += xdpsq_stats->mpwqe;
268 s->rx_xdp_tx_inlnw += xdpsq_stats->inlnw;
269 s->rx_xdp_tx_nops += xdpsq_stats->nops;
270 s->rx_xdp_tx_full += xdpsq_stats->full;
271 s->rx_xdp_tx_err += xdpsq_stats->err;
272 s->rx_xdp_tx_cqe += xdpsq_stats->cqes;
275 static void mlx5e_stats_grp_sw_update_stats_xsksq(struct mlx5e_sw_stats *s,
276 struct mlx5e_xdpsq_stats *xsksq_stats)
278 s->tx_xsk_xmit += xsksq_stats->xmit;
279 s->tx_xsk_mpwqe += xsksq_stats->mpwqe;
280 s->tx_xsk_inlnw += xsksq_stats->inlnw;
281 s->tx_xsk_full += xsksq_stats->full;
282 s->tx_xsk_err += xsksq_stats->err;
283 s->tx_xsk_cqes += xsksq_stats->cqes;
286 static void mlx5e_stats_grp_sw_update_stats_xskrq(struct mlx5e_sw_stats *s,
287 struct mlx5e_rq_stats *xskrq_stats)
289 s->rx_xsk_packets += xskrq_stats->packets;
290 s->rx_xsk_bytes += xskrq_stats->bytes;
291 s->rx_xsk_csum_complete += xskrq_stats->csum_complete;
292 s->rx_xsk_csum_unnecessary += xskrq_stats->csum_unnecessary;
293 s->rx_xsk_csum_unnecessary_inner += xskrq_stats->csum_unnecessary_inner;
294 s->rx_xsk_csum_none += xskrq_stats->csum_none;
295 s->rx_xsk_ecn_mark += xskrq_stats->ecn_mark;
296 s->rx_xsk_removed_vlan_packets += xskrq_stats->removed_vlan_packets;
297 s->rx_xsk_xdp_drop += xskrq_stats->xdp_drop;
298 s->rx_xsk_xdp_redirect += xskrq_stats->xdp_redirect;
299 s->rx_xsk_wqe_err += xskrq_stats->wqe_err;
300 s->rx_xsk_mpwqe_filler_cqes += xskrq_stats->mpwqe_filler_cqes;
301 s->rx_xsk_mpwqe_filler_strides += xskrq_stats->mpwqe_filler_strides;
302 s->rx_xsk_oversize_pkts_sw_drop += xskrq_stats->oversize_pkts_sw_drop;
303 s->rx_xsk_buff_alloc_err += xskrq_stats->buff_alloc_err;
304 s->rx_xsk_cqe_compress_blks += xskrq_stats->cqe_compress_blks;
305 s->rx_xsk_cqe_compress_pkts += xskrq_stats->cqe_compress_pkts;
306 s->rx_xsk_congst_umr += xskrq_stats->congst_umr;
307 s->rx_xsk_arfs_err += xskrq_stats->arfs_err;
310 static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s,
311 struct mlx5e_rq_stats *rq_stats)
313 s->rx_packets += rq_stats->packets;
314 s->rx_bytes += rq_stats->bytes;
315 s->rx_lro_packets += rq_stats->lro_packets;
316 s->rx_lro_bytes += rq_stats->lro_bytes;
317 s->rx_ecn_mark += rq_stats->ecn_mark;
318 s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets;
319 s->rx_csum_none += rq_stats->csum_none;
320 s->rx_csum_complete += rq_stats->csum_complete;
321 s->rx_csum_complete_tail += rq_stats->csum_complete_tail;
322 s->rx_csum_complete_tail_slow += rq_stats->csum_complete_tail_slow;
323 s->rx_csum_unnecessary += rq_stats->csum_unnecessary;
324 s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
325 s->rx_xdp_drop += rq_stats->xdp_drop;
326 s->rx_xdp_redirect += rq_stats->xdp_redirect;
327 s->rx_wqe_err += rq_stats->wqe_err;
328 s->rx_mpwqe_filler_cqes += rq_stats->mpwqe_filler_cqes;
329 s->rx_mpwqe_filler_strides += rq_stats->mpwqe_filler_strides;
330 s->rx_oversize_pkts_sw_drop += rq_stats->oversize_pkts_sw_drop;
331 s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
332 s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
333 s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
334 s->rx_cache_reuse += rq_stats->cache_reuse;
335 s->rx_cache_full += rq_stats->cache_full;
336 s->rx_cache_empty += rq_stats->cache_empty;
337 s->rx_cache_busy += rq_stats->cache_busy;
338 s->rx_cache_waive += rq_stats->cache_waive;
339 s->rx_congst_umr += rq_stats->congst_umr;
340 s->rx_arfs_err += rq_stats->arfs_err;
341 s->rx_recover += rq_stats->recover;
342 #ifdef CONFIG_MLX5_EN_TLS
343 s->rx_tls_decrypted_packets += rq_stats->tls_decrypted_packets;
344 s->rx_tls_decrypted_bytes += rq_stats->tls_decrypted_bytes;
345 s->rx_tls_ctx += rq_stats->tls_ctx;
346 s->rx_tls_del += rq_stats->tls_del;
347 s->rx_tls_resync_req_pkt += rq_stats->tls_resync_req_pkt;
348 s->rx_tls_resync_req_start += rq_stats->tls_resync_req_start;
349 s->rx_tls_resync_req_end += rq_stats->tls_resync_req_end;
350 s->rx_tls_resync_req_skip += rq_stats->tls_resync_req_skip;
351 s->rx_tls_resync_res_ok += rq_stats->tls_resync_res_ok;
352 s->rx_tls_resync_res_skip += rq_stats->tls_resync_res_skip;
353 s->rx_tls_err += rq_stats->tls_err;
357 static void mlx5e_stats_grp_sw_update_stats_ch_stats(struct mlx5e_sw_stats *s,
358 struct mlx5e_ch_stats *ch_stats)
360 s->ch_events += ch_stats->events;
361 s->ch_poll += ch_stats->poll;
362 s->ch_arm += ch_stats->arm;
363 s->ch_aff_change += ch_stats->aff_change;
364 s->ch_force_irq += ch_stats->force_irq;
365 s->ch_eq_rearm += ch_stats->eq_rearm;
368 static void mlx5e_stats_grp_sw_update_stats_sq(struct mlx5e_sw_stats *s,
369 struct mlx5e_sq_stats *sq_stats)
371 s->tx_packets += sq_stats->packets;
372 s->tx_bytes += sq_stats->bytes;
373 s->tx_tso_packets += sq_stats->tso_packets;
374 s->tx_tso_bytes += sq_stats->tso_bytes;
375 s->tx_tso_inner_packets += sq_stats->tso_inner_packets;
376 s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes;
377 s->tx_added_vlan_packets += sq_stats->added_vlan_packets;
378 s->tx_nop += sq_stats->nop;
379 s->tx_mpwqe_blks += sq_stats->mpwqe_blks;
380 s->tx_mpwqe_pkts += sq_stats->mpwqe_pkts;
381 s->tx_queue_stopped += sq_stats->stopped;
382 s->tx_queue_wake += sq_stats->wake;
383 s->tx_queue_dropped += sq_stats->dropped;
384 s->tx_cqe_err += sq_stats->cqe_err;
385 s->tx_recover += sq_stats->recover;
386 s->tx_xmit_more += sq_stats->xmit_more;
387 s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
388 s->tx_csum_none += sq_stats->csum_none;
389 s->tx_csum_partial += sq_stats->csum_partial;
390 #ifdef CONFIG_MLX5_EN_TLS
391 s->tx_tls_encrypted_packets += sq_stats->tls_encrypted_packets;
392 s->tx_tls_encrypted_bytes += sq_stats->tls_encrypted_bytes;
393 s->tx_tls_ctx += sq_stats->tls_ctx;
394 s->tx_tls_ooo += sq_stats->tls_ooo;
395 s->tx_tls_dump_bytes += sq_stats->tls_dump_bytes;
396 s->tx_tls_dump_packets += sq_stats->tls_dump_packets;
397 s->tx_tls_resync_bytes += sq_stats->tls_resync_bytes;
398 s->tx_tls_skip_no_sync_data += sq_stats->tls_skip_no_sync_data;
399 s->tx_tls_drop_no_sync_data += sq_stats->tls_drop_no_sync_data;
400 s->tx_tls_drop_bypass_req += sq_stats->tls_drop_bypass_req;
402 s->tx_cqes += sq_stats->cqes;
405 static void mlx5e_stats_grp_sw_update_stats_ptp(struct mlx5e_priv *priv,
406 struct mlx5e_sw_stats *s)
410 if (!priv->port_ptp_opened)
413 mlx5e_stats_grp_sw_update_stats_ch_stats(s, &priv->port_ptp_stats.ch);
415 for (i = 0; i < priv->max_opened_tc; i++) {
416 mlx5e_stats_grp_sw_update_stats_sq(s, &priv->port_ptp_stats.sq[i]);
418 /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
423 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)
425 struct mlx5e_sw_stats *s = &priv->stats.sw;
428 memset(s, 0, sizeof(*s));
430 for (i = 0; i < priv->max_nch; i++) {
431 struct mlx5e_channel_stats *channel_stats =
432 &priv->channel_stats[i];
435 mlx5e_stats_grp_sw_update_stats_rq_stats(s, &channel_stats->rq);
436 mlx5e_stats_grp_sw_update_stats_xdpsq(s, &channel_stats->rq_xdpsq);
437 mlx5e_stats_grp_sw_update_stats_ch_stats(s, &channel_stats->ch);
439 mlx5e_stats_grp_sw_update_stats_xdp_red(s, &channel_stats->xdpsq);
440 /* AF_XDP zero-copy */
441 mlx5e_stats_grp_sw_update_stats_xskrq(s, &channel_stats->xskrq);
442 mlx5e_stats_grp_sw_update_stats_xsksq(s, &channel_stats->xsksq);
444 for (j = 0; j < priv->max_opened_tc; j++) {
445 mlx5e_stats_grp_sw_update_stats_sq(s, &channel_stats->sq[j]);
447 /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
451 mlx5e_stats_grp_sw_update_stats_ptp(priv, s);
454 static const struct counter_desc q_stats_desc[] = {
455 { MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_out_of_buffer) },
458 static const struct counter_desc drop_rq_stats_desc[] = {
459 { MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_if_down_packets) },
462 #define NUM_Q_COUNTERS ARRAY_SIZE(q_stats_desc)
463 #define NUM_DROP_RQ_COUNTERS ARRAY_SIZE(drop_rq_stats_desc)
465 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qcnt)
470 num_stats += NUM_Q_COUNTERS;
472 if (priv->drop_rq_q_counter)
473 num_stats += NUM_DROP_RQ_COUNTERS;
478 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qcnt)
482 for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++)
483 strcpy(data + (idx++) * ETH_GSTRING_LEN,
484 q_stats_desc[i].format);
486 for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++)
487 strcpy(data + (idx++) * ETH_GSTRING_LEN,
488 drop_rq_stats_desc[i].format);
493 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qcnt)
497 for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++)
498 data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
500 for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++)
501 data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
502 drop_rq_stats_desc, i);
506 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qcnt)
508 struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt;
509 u32 out[MLX5_ST_SZ_DW(query_q_counter_out)] = {};
510 u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {};
513 MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
515 if (priv->q_counter) {
516 MLX5_SET(query_q_counter_in, in, counter_set_id,
518 ret = mlx5_cmd_exec_inout(priv->mdev, query_q_counter, in, out);
520 qcnt->rx_out_of_buffer = MLX5_GET(query_q_counter_out,
524 if (priv->drop_rq_q_counter) {
525 MLX5_SET(query_q_counter_in, in, counter_set_id,
526 priv->drop_rq_q_counter);
527 ret = mlx5_cmd_exec_inout(priv->mdev, query_q_counter, in, out);
529 qcnt->rx_if_down_packets = MLX5_GET(query_q_counter_out,
534 #define VNIC_ENV_OFF(c) MLX5_BYTE_OFF(query_vnic_env_out, c)
535 static const struct counter_desc vnic_env_stats_steer_desc[] = {
536 { "rx_steer_missed_packets",
537 VNIC_ENV_OFF(vport_env.nic_receive_steering_discard) },
540 static const struct counter_desc vnic_env_stats_dev_oob_desc[] = {
541 { "dev_internal_queue_oob",
542 VNIC_ENV_OFF(vport_env.internal_rq_out_of_buffer) },
545 #define NUM_VNIC_ENV_STEER_COUNTERS(dev) \
546 (MLX5_CAP_GEN(dev, nic_receive_steering_discard) ? \
547 ARRAY_SIZE(vnic_env_stats_steer_desc) : 0)
548 #define NUM_VNIC_ENV_DEV_OOB_COUNTERS(dev) \
549 (MLX5_CAP_GEN(dev, vnic_env_int_rq_oob) ? \
550 ARRAY_SIZE(vnic_env_stats_dev_oob_desc) : 0)
552 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vnic_env)
554 return NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev) +
555 NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev);
558 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vnic_env)
562 for (i = 0; i < NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev); i++)
563 strcpy(data + (idx++) * ETH_GSTRING_LEN,
564 vnic_env_stats_steer_desc[i].format);
566 for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++)
567 strcpy(data + (idx++) * ETH_GSTRING_LEN,
568 vnic_env_stats_dev_oob_desc[i].format);
572 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vnic_env)
576 for (i = 0; i < NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev); i++)
577 data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vnic.query_vnic_env_out,
578 vnic_env_stats_steer_desc, i);
580 for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++)
581 data[idx++] = MLX5E_READ_CTR32_BE(priv->stats.vnic.query_vnic_env_out,
582 vnic_env_stats_dev_oob_desc, i);
586 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vnic_env)
588 u32 *out = (u32 *)priv->stats.vnic.query_vnic_env_out;
589 u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {};
590 struct mlx5_core_dev *mdev = priv->mdev;
592 if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard))
595 MLX5_SET(query_vnic_env_in, in, opcode, MLX5_CMD_OP_QUERY_VNIC_ENV);
596 mlx5_cmd_exec_inout(mdev, query_vnic_env, in, out);
599 #define VPORT_COUNTER_OFF(c) MLX5_BYTE_OFF(query_vport_counter_out, c)
600 static const struct counter_desc vport_stats_desc[] = {
601 { "rx_vport_unicast_packets",
602 VPORT_COUNTER_OFF(received_eth_unicast.packets) },
603 { "rx_vport_unicast_bytes",
604 VPORT_COUNTER_OFF(received_eth_unicast.octets) },
605 { "tx_vport_unicast_packets",
606 VPORT_COUNTER_OFF(transmitted_eth_unicast.packets) },
607 { "tx_vport_unicast_bytes",
608 VPORT_COUNTER_OFF(transmitted_eth_unicast.octets) },
609 { "rx_vport_multicast_packets",
610 VPORT_COUNTER_OFF(received_eth_multicast.packets) },
611 { "rx_vport_multicast_bytes",
612 VPORT_COUNTER_OFF(received_eth_multicast.octets) },
613 { "tx_vport_multicast_packets",
614 VPORT_COUNTER_OFF(transmitted_eth_multicast.packets) },
615 { "tx_vport_multicast_bytes",
616 VPORT_COUNTER_OFF(transmitted_eth_multicast.octets) },
617 { "rx_vport_broadcast_packets",
618 VPORT_COUNTER_OFF(received_eth_broadcast.packets) },
619 { "rx_vport_broadcast_bytes",
620 VPORT_COUNTER_OFF(received_eth_broadcast.octets) },
621 { "tx_vport_broadcast_packets",
622 VPORT_COUNTER_OFF(transmitted_eth_broadcast.packets) },
623 { "tx_vport_broadcast_bytes",
624 VPORT_COUNTER_OFF(transmitted_eth_broadcast.octets) },
625 { "rx_vport_rdma_unicast_packets",
626 VPORT_COUNTER_OFF(received_ib_unicast.packets) },
627 { "rx_vport_rdma_unicast_bytes",
628 VPORT_COUNTER_OFF(received_ib_unicast.octets) },
629 { "tx_vport_rdma_unicast_packets",
630 VPORT_COUNTER_OFF(transmitted_ib_unicast.packets) },
631 { "tx_vport_rdma_unicast_bytes",
632 VPORT_COUNTER_OFF(transmitted_ib_unicast.octets) },
633 { "rx_vport_rdma_multicast_packets",
634 VPORT_COUNTER_OFF(received_ib_multicast.packets) },
635 { "rx_vport_rdma_multicast_bytes",
636 VPORT_COUNTER_OFF(received_ib_multicast.octets) },
637 { "tx_vport_rdma_multicast_packets",
638 VPORT_COUNTER_OFF(transmitted_ib_multicast.packets) },
639 { "tx_vport_rdma_multicast_bytes",
640 VPORT_COUNTER_OFF(transmitted_ib_multicast.octets) },
643 #define NUM_VPORT_COUNTERS ARRAY_SIZE(vport_stats_desc)
645 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vport)
647 return NUM_VPORT_COUNTERS;
650 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport)
654 for (i = 0; i < NUM_VPORT_COUNTERS; i++)
655 strcpy(data + (idx++) * ETH_GSTRING_LEN, vport_stats_desc[i].format);
659 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport)
663 for (i = 0; i < NUM_VPORT_COUNTERS; i++)
664 data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out,
665 vport_stats_desc, i);
669 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport)
671 u32 *out = (u32 *)priv->stats.vport.query_vport_out;
672 u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {};
673 struct mlx5_core_dev *mdev = priv->mdev;
675 MLX5_SET(query_vport_counter_in, in, opcode, MLX5_CMD_OP_QUERY_VPORT_COUNTER);
676 mlx5_cmd_exec_inout(mdev, query_vport_counter, in, out);
679 #define PPORT_802_3_OFF(c) \
680 MLX5_BYTE_OFF(ppcnt_reg, \
681 counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)
682 static const struct counter_desc pport_802_3_stats_desc[] = {
683 { "tx_packets_phy", PPORT_802_3_OFF(a_frames_transmitted_ok) },
684 { "rx_packets_phy", PPORT_802_3_OFF(a_frames_received_ok) },
685 { "rx_crc_errors_phy", PPORT_802_3_OFF(a_frame_check_sequence_errors) },
686 { "tx_bytes_phy", PPORT_802_3_OFF(a_octets_transmitted_ok) },
687 { "rx_bytes_phy", PPORT_802_3_OFF(a_octets_received_ok) },
688 { "tx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_xmitted_ok) },
689 { "tx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_xmitted_ok) },
690 { "rx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_received_ok) },
691 { "rx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_received_ok) },
692 { "rx_in_range_len_errors_phy", PPORT_802_3_OFF(a_in_range_length_errors) },
693 { "rx_out_of_range_len_phy", PPORT_802_3_OFF(a_out_of_range_length_field) },
694 { "rx_oversize_pkts_phy", PPORT_802_3_OFF(a_frame_too_long_errors) },
695 { "rx_symbol_err_phy", PPORT_802_3_OFF(a_symbol_error_during_carrier) },
696 { "tx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_transmitted) },
697 { "rx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_received) },
698 { "rx_unsupported_op_phy", PPORT_802_3_OFF(a_unsupported_opcodes_received) },
699 { "rx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_received) },
700 { "tx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_transmitted) },
703 #define NUM_PPORT_802_3_COUNTERS ARRAY_SIZE(pport_802_3_stats_desc)
705 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(802_3)
707 return NUM_PPORT_802_3_COUNTERS;
710 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(802_3)
714 for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
715 strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_802_3_stats_desc[i].format);
719 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(802_3)
723 for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
724 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.IEEE_802_3_counters,
725 pport_802_3_stats_desc, i);
729 #define MLX5_BASIC_PPCNT_SUPPORTED(mdev) \
730 (MLX5_CAP_GEN(mdev, pcam_reg) ? MLX5_CAP_PCAM_REG(mdev, ppcnt) : 1)
732 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(802_3)
734 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
735 struct mlx5_core_dev *mdev = priv->mdev;
736 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
737 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
740 if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
743 MLX5_SET(ppcnt_reg, in, local_port, 1);
744 out = pstats->IEEE_802_3_counters;
745 MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
746 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
749 #define MLX5E_READ_CTR64_BE_F(ptr, c) \
750 be64_to_cpu(*(__be64 *)((char *)ptr + \
751 MLX5_BYTE_OFF(ppcnt_reg, \
752 counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)))
754 void mlx5e_stats_pause_get(struct mlx5e_priv *priv,
755 struct ethtool_pause_stats *pause_stats)
757 u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)];
758 struct mlx5_core_dev *mdev = priv->mdev;
759 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
760 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
762 if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
765 MLX5_SET(ppcnt_reg, in, local_port, 1);
766 MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
767 mlx5_core_access_reg(mdev, in, sz, ppcnt_ieee_802_3,
768 sz, MLX5_REG_PPCNT, 0, 0);
770 pause_stats->tx_pause_frames =
771 MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
772 a_pause_mac_ctrl_frames_transmitted);
773 pause_stats->rx_pause_frames =
774 MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
775 a_pause_mac_ctrl_frames_received);
778 #define PPORT_2863_OFF(c) \
779 MLX5_BYTE_OFF(ppcnt_reg, \
780 counter_set.eth_2863_cntrs_grp_data_layout.c##_high)
781 static const struct counter_desc pport_2863_stats_desc[] = {
782 { "rx_discards_phy", PPORT_2863_OFF(if_in_discards) },
783 { "tx_discards_phy", PPORT_2863_OFF(if_out_discards) },
784 { "tx_errors_phy", PPORT_2863_OFF(if_out_errors) },
787 #define NUM_PPORT_2863_COUNTERS ARRAY_SIZE(pport_2863_stats_desc)
789 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(2863)
791 return NUM_PPORT_2863_COUNTERS;
794 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(2863)
798 for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
799 strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2863_stats_desc[i].format);
803 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(2863)
807 for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
808 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2863_counters,
809 pport_2863_stats_desc, i);
813 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(2863)
815 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
816 struct mlx5_core_dev *mdev = priv->mdev;
817 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
818 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
821 MLX5_SET(ppcnt_reg, in, local_port, 1);
822 out = pstats->RFC_2863_counters;
823 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
824 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
827 #define PPORT_2819_OFF(c) \
828 MLX5_BYTE_OFF(ppcnt_reg, \
829 counter_set.eth_2819_cntrs_grp_data_layout.c##_high)
830 static const struct counter_desc pport_2819_stats_desc[] = {
831 { "rx_undersize_pkts_phy", PPORT_2819_OFF(ether_stats_undersize_pkts) },
832 { "rx_fragments_phy", PPORT_2819_OFF(ether_stats_fragments) },
833 { "rx_jabbers_phy", PPORT_2819_OFF(ether_stats_jabbers) },
834 { "rx_64_bytes_phy", PPORT_2819_OFF(ether_stats_pkts64octets) },
835 { "rx_65_to_127_bytes_phy", PPORT_2819_OFF(ether_stats_pkts65to127octets) },
836 { "rx_128_to_255_bytes_phy", PPORT_2819_OFF(ether_stats_pkts128to255octets) },
837 { "rx_256_to_511_bytes_phy", PPORT_2819_OFF(ether_stats_pkts256to511octets) },
838 { "rx_512_to_1023_bytes_phy", PPORT_2819_OFF(ether_stats_pkts512to1023octets) },
839 { "rx_1024_to_1518_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1024to1518octets) },
840 { "rx_1519_to_2047_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1519to2047octets) },
841 { "rx_2048_to_4095_bytes_phy", PPORT_2819_OFF(ether_stats_pkts2048to4095octets) },
842 { "rx_4096_to_8191_bytes_phy", PPORT_2819_OFF(ether_stats_pkts4096to8191octets) },
843 { "rx_8192_to_10239_bytes_phy", PPORT_2819_OFF(ether_stats_pkts8192to10239octets) },
846 #define NUM_PPORT_2819_COUNTERS ARRAY_SIZE(pport_2819_stats_desc)
848 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(2819)
850 return NUM_PPORT_2819_COUNTERS;
853 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(2819)
857 for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
858 strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2819_stats_desc[i].format);
862 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(2819)
866 for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
867 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2819_counters,
868 pport_2819_stats_desc, i);
872 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(2819)
874 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
875 struct mlx5_core_dev *mdev = priv->mdev;
876 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
877 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
880 if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
883 MLX5_SET(ppcnt_reg, in, local_port, 1);
884 out = pstats->RFC_2819_counters;
885 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
886 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
889 #define PPORT_PHY_STATISTICAL_OFF(c) \
890 MLX5_BYTE_OFF(ppcnt_reg, \
891 counter_set.phys_layer_statistical_cntrs.c##_high)
892 static const struct counter_desc pport_phy_statistical_stats_desc[] = {
893 { "rx_pcs_symbol_err_phy", PPORT_PHY_STATISTICAL_OFF(phy_symbol_errors) },
894 { "rx_corrected_bits_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits) },
897 static const struct counter_desc
898 pport_phy_statistical_err_lanes_stats_desc[] = {
899 { "rx_err_lane_0_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane0) },
900 { "rx_err_lane_1_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane1) },
901 { "rx_err_lane_2_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane2) },
902 { "rx_err_lane_3_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane3) },
905 #define NUM_PPORT_PHY_STATISTICAL_COUNTERS \
906 ARRAY_SIZE(pport_phy_statistical_stats_desc)
907 #define NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS \
908 ARRAY_SIZE(pport_phy_statistical_err_lanes_stats_desc)
910 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(phy)
912 struct mlx5_core_dev *mdev = priv->mdev;
915 /* "1" for link_down_events special counter */
918 num_stats += MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group) ?
919 NUM_PPORT_PHY_STATISTICAL_COUNTERS : 0;
921 num_stats += MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters) ?
922 NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS : 0;
927 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(phy)
929 struct mlx5_core_dev *mdev = priv->mdev;
932 strcpy(data + (idx++) * ETH_GSTRING_LEN, "link_down_events_phy");
934 if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
937 for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
938 strcpy(data + (idx++) * ETH_GSTRING_LEN,
939 pport_phy_statistical_stats_desc[i].format);
941 if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters))
942 for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++)
943 strcpy(data + (idx++) * ETH_GSTRING_LEN,
944 pport_phy_statistical_err_lanes_stats_desc[i].format);
949 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(phy)
951 struct mlx5_core_dev *mdev = priv->mdev;
954 /* link_down_events_phy has special handling since it is not stored in __be64 format */
955 data[idx++] = MLX5_GET(ppcnt_reg, priv->stats.pport.phy_counters,
956 counter_set.phys_layer_cntrs.link_down_events);
958 if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
961 for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
963 MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
964 pport_phy_statistical_stats_desc, i);
966 if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters))
967 for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++)
969 MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
970 pport_phy_statistical_err_lanes_stats_desc,
975 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(phy)
977 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
978 struct mlx5_core_dev *mdev = priv->mdev;
979 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
980 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
983 MLX5_SET(ppcnt_reg, in, local_port, 1);
984 out = pstats->phy_counters;
985 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
986 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
988 if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
991 out = pstats->phy_statistical_counters;
992 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
993 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
996 #define PPORT_ETH_EXT_OFF(c) \
997 MLX5_BYTE_OFF(ppcnt_reg, \
998 counter_set.eth_extended_cntrs_grp_data_layout.c##_high)
999 static const struct counter_desc pport_eth_ext_stats_desc[] = {
1000 { "rx_buffer_passed_thres_phy", PPORT_ETH_EXT_OFF(rx_buffer_almost_full) },
1003 #define NUM_PPORT_ETH_EXT_COUNTERS ARRAY_SIZE(pport_eth_ext_stats_desc)
1005 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(eth_ext)
1007 if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
1008 return NUM_PPORT_ETH_EXT_COUNTERS;
1013 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(eth_ext)
1017 if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
1018 for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++)
1019 strcpy(data + (idx++) * ETH_GSTRING_LEN,
1020 pport_eth_ext_stats_desc[i].format);
1024 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(eth_ext)
1028 if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
1029 for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++)
1031 MLX5E_READ_CTR64_BE(&priv->stats.pport.eth_ext_counters,
1032 pport_eth_ext_stats_desc, i);
1036 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(eth_ext)
1038 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1039 struct mlx5_core_dev *mdev = priv->mdev;
1040 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1041 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1044 if (!MLX5_CAP_PCAM_FEATURE(mdev, rx_buffer_fullness_counters))
1047 MLX5_SET(ppcnt_reg, in, local_port, 1);
1048 out = pstats->eth_ext_counters;
1049 MLX5_SET(ppcnt_reg, in, grp, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP);
1050 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1053 #define PCIE_PERF_OFF(c) \
1054 MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c)
1055 static const struct counter_desc pcie_perf_stats_desc[] = {
1056 { "rx_pci_signal_integrity", PCIE_PERF_OFF(rx_errors) },
1057 { "tx_pci_signal_integrity", PCIE_PERF_OFF(tx_errors) },
1060 #define PCIE_PERF_OFF64(c) \
1061 MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c##_high)
1062 static const struct counter_desc pcie_perf_stats_desc64[] = {
1063 { "outbound_pci_buffer_overflow", PCIE_PERF_OFF64(tx_overflow_buffer_pkt) },
1066 static const struct counter_desc pcie_perf_stall_stats_desc[] = {
1067 { "outbound_pci_stalled_rd", PCIE_PERF_OFF(outbound_stalled_reads) },
1068 { "outbound_pci_stalled_wr", PCIE_PERF_OFF(outbound_stalled_writes) },
1069 { "outbound_pci_stalled_rd_events", PCIE_PERF_OFF(outbound_stalled_reads_events) },
1070 { "outbound_pci_stalled_wr_events", PCIE_PERF_OFF(outbound_stalled_writes_events) },
1073 #define NUM_PCIE_PERF_COUNTERS ARRAY_SIZE(pcie_perf_stats_desc)
1074 #define NUM_PCIE_PERF_COUNTERS64 ARRAY_SIZE(pcie_perf_stats_desc64)
1075 #define NUM_PCIE_PERF_STALL_COUNTERS ARRAY_SIZE(pcie_perf_stall_stats_desc)
1077 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pcie)
1081 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
1082 num_stats += NUM_PCIE_PERF_COUNTERS;
1084 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
1085 num_stats += NUM_PCIE_PERF_COUNTERS64;
1087 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
1088 num_stats += NUM_PCIE_PERF_STALL_COUNTERS;
1093 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pcie)
1097 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
1098 for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
1099 strcpy(data + (idx++) * ETH_GSTRING_LEN,
1100 pcie_perf_stats_desc[i].format);
1102 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
1103 for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++)
1104 strcpy(data + (idx++) * ETH_GSTRING_LEN,
1105 pcie_perf_stats_desc64[i].format);
1107 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
1108 for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++)
1109 strcpy(data + (idx++) * ETH_GSTRING_LEN,
1110 pcie_perf_stall_stats_desc[i].format);
1114 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pcie)
1118 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
1119 for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
1121 MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
1122 pcie_perf_stats_desc, i);
1124 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
1125 for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++)
1127 MLX5E_READ_CTR64_BE(&priv->stats.pcie.pcie_perf_counters,
1128 pcie_perf_stats_desc64, i);
1130 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
1131 for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++)
1133 MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
1134 pcie_perf_stall_stats_desc, i);
1138 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pcie)
1140 struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie;
1141 struct mlx5_core_dev *mdev = priv->mdev;
1142 u32 in[MLX5_ST_SZ_DW(mpcnt_reg)] = {0};
1143 int sz = MLX5_ST_SZ_BYTES(mpcnt_reg);
1146 if (!MLX5_CAP_MCAM_FEATURE(mdev, pcie_performance_group))
1149 out = pcie_stats->pcie_perf_counters;
1150 MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP);
1151 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
1154 #define PPORT_PER_TC_PRIO_OFF(c) \
1155 MLX5_BYTE_OFF(ppcnt_reg, \
1156 counter_set.eth_per_tc_prio_grp_data_layout.c##_high)
1158 static const struct counter_desc pport_per_tc_prio_stats_desc[] = {
1159 { "rx_prio%d_buf_discard", PPORT_PER_TC_PRIO_OFF(no_buffer_discard_uc) },
1162 #define NUM_PPORT_PER_TC_PRIO_COUNTERS ARRAY_SIZE(pport_per_tc_prio_stats_desc)
1164 #define PPORT_PER_TC_CONGEST_PRIO_OFF(c) \
1165 MLX5_BYTE_OFF(ppcnt_reg, \
1166 counter_set.eth_per_tc_congest_prio_grp_data_layout.c##_high)
1168 static const struct counter_desc pport_per_tc_congest_prio_stats_desc[] = {
1169 { "rx_prio%d_cong_discard", PPORT_PER_TC_CONGEST_PRIO_OFF(wred_discard) },
1170 { "rx_prio%d_marked", PPORT_PER_TC_CONGEST_PRIO_OFF(ecn_marked_tc) },
1173 #define NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS \
1174 ARRAY_SIZE(pport_per_tc_congest_prio_stats_desc)
1176 static int mlx5e_grp_per_tc_prio_get_num_stats(struct mlx5e_priv *priv)
1178 struct mlx5_core_dev *mdev = priv->mdev;
1180 if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1183 return NUM_PPORT_PER_TC_PRIO_COUNTERS * NUM_PPORT_PRIO;
1186 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_port_buff_congest)
1188 struct mlx5_core_dev *mdev = priv->mdev;
1191 if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1194 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1195 for (i = 0; i < NUM_PPORT_PER_TC_PRIO_COUNTERS; i++)
1196 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1197 pport_per_tc_prio_stats_desc[i].format, prio);
1198 for (i = 0; i < NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS; i++)
1199 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1200 pport_per_tc_congest_prio_stats_desc[i].format, prio);
1206 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_port_buff_congest)
1208 struct mlx5e_pport_stats *pport = &priv->stats.pport;
1209 struct mlx5_core_dev *mdev = priv->mdev;
1212 if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1215 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1216 for (i = 0; i < NUM_PPORT_PER_TC_PRIO_COUNTERS; i++)
1218 MLX5E_READ_CTR64_BE(&pport->per_tc_prio_counters[prio],
1219 pport_per_tc_prio_stats_desc, i);
1220 for (i = 0; i < NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS ; i++)
1222 MLX5E_READ_CTR64_BE(&pport->per_tc_congest_prio_counters[prio],
1223 pport_per_tc_congest_prio_stats_desc, i);
1229 static void mlx5e_grp_per_tc_prio_update_stats(struct mlx5e_priv *priv)
1231 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1232 struct mlx5_core_dev *mdev = priv->mdev;
1233 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1234 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1238 if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1241 MLX5_SET(ppcnt_reg, in, pnat, 2);
1242 MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP);
1243 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1244 out = pstats->per_tc_prio_counters[prio];
1245 MLX5_SET(ppcnt_reg, in, prio_tc, prio);
1246 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1250 static int mlx5e_grp_per_tc_congest_prio_get_num_stats(struct mlx5e_priv *priv)
1252 struct mlx5_core_dev *mdev = priv->mdev;
1254 if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1257 return NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS * NUM_PPORT_PRIO;
1260 static void mlx5e_grp_per_tc_congest_prio_update_stats(struct mlx5e_priv *priv)
1262 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1263 struct mlx5_core_dev *mdev = priv->mdev;
1264 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1265 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1269 if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1272 MLX5_SET(ppcnt_reg, in, pnat, 2);
1273 MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_TRAFFIC_CLASS_CONGESTION_GROUP);
1274 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1275 out = pstats->per_tc_congest_prio_counters[prio];
1276 MLX5_SET(ppcnt_reg, in, prio_tc, prio);
1277 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1281 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_port_buff_congest)
1283 return mlx5e_grp_per_tc_prio_get_num_stats(priv) +
1284 mlx5e_grp_per_tc_congest_prio_get_num_stats(priv);
1287 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_port_buff_congest)
1289 mlx5e_grp_per_tc_prio_update_stats(priv);
1290 mlx5e_grp_per_tc_congest_prio_update_stats(priv);
1293 #define PPORT_PER_PRIO_OFF(c) \
1294 MLX5_BYTE_OFF(ppcnt_reg, \
1295 counter_set.eth_per_prio_grp_data_layout.c##_high)
1296 static const struct counter_desc pport_per_prio_traffic_stats_desc[] = {
1297 { "rx_prio%d_bytes", PPORT_PER_PRIO_OFF(rx_octets) },
1298 { "rx_prio%d_packets", PPORT_PER_PRIO_OFF(rx_frames) },
1299 { "rx_prio%d_discards", PPORT_PER_PRIO_OFF(rx_discards) },
1300 { "tx_prio%d_bytes", PPORT_PER_PRIO_OFF(tx_octets) },
1301 { "tx_prio%d_packets", PPORT_PER_PRIO_OFF(tx_frames) },
1304 #define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS ARRAY_SIZE(pport_per_prio_traffic_stats_desc)
1306 static int mlx5e_grp_per_prio_traffic_get_num_stats(void)
1308 return NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * NUM_PPORT_PRIO;
1311 static int mlx5e_grp_per_prio_traffic_fill_strings(struct mlx5e_priv *priv,
1317 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1318 for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
1319 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1320 pport_per_prio_traffic_stats_desc[i].format, prio);
1326 static int mlx5e_grp_per_prio_traffic_fill_stats(struct mlx5e_priv *priv,
1332 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1333 for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
1335 MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
1336 pport_per_prio_traffic_stats_desc, i);
1342 static const struct counter_desc pport_per_prio_pfc_stats_desc[] = {
1343 /* %s is "global" or "prio{i}" */
1344 { "rx_%s_pause", PPORT_PER_PRIO_OFF(rx_pause) },
1345 { "rx_%s_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) },
1346 { "tx_%s_pause", PPORT_PER_PRIO_OFF(tx_pause) },
1347 { "tx_%s_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) },
1348 { "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
1351 static const struct counter_desc pport_pfc_stall_stats_desc[] = {
1352 { "tx_pause_storm_warning_events", PPORT_PER_PRIO_OFF(device_stall_minor_watermark_cnt) },
1353 { "tx_pause_storm_error_events", PPORT_PER_PRIO_OFF(device_stall_critical_watermark_cnt) },
1356 #define NUM_PPORT_PER_PRIO_PFC_COUNTERS ARRAY_SIZE(pport_per_prio_pfc_stats_desc)
1357 #define NUM_PPORT_PFC_STALL_COUNTERS(priv) (ARRAY_SIZE(pport_pfc_stall_stats_desc) * \
1358 MLX5_CAP_PCAM_FEATURE((priv)->mdev, pfcc_mask) * \
1359 MLX5_CAP_DEBUG((priv)->mdev, stall_detect))
1361 static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv)
1363 struct mlx5_core_dev *mdev = priv->mdev;
1368 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1371 err = mlx5_query_port_pfc(mdev, &pfc_en_tx, &pfc_en_rx);
1373 return err ? 0 : pfc_en_tx | pfc_en_rx;
1376 static bool mlx5e_query_global_pause_combined(struct mlx5e_priv *priv)
1378 struct mlx5_core_dev *mdev = priv->mdev;
1383 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1386 err = mlx5_query_port_pause(mdev, &rx_pause, &tx_pause);
1388 return err ? false : rx_pause | tx_pause;
1391 static int mlx5e_grp_per_prio_pfc_get_num_stats(struct mlx5e_priv *priv)
1393 return (mlx5e_query_global_pause_combined(priv) +
1394 hweight8(mlx5e_query_pfc_combined(priv))) *
1395 NUM_PPORT_PER_PRIO_PFC_COUNTERS +
1396 NUM_PPORT_PFC_STALL_COUNTERS(priv);
1399 static int mlx5e_grp_per_prio_pfc_fill_strings(struct mlx5e_priv *priv,
1403 unsigned long pfc_combined;
1406 pfc_combined = mlx5e_query_pfc_combined(priv);
1407 for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
1408 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1409 char pfc_string[ETH_GSTRING_LEN];
1411 snprintf(pfc_string, sizeof(pfc_string), "prio%d", prio);
1412 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1413 pport_per_prio_pfc_stats_desc[i].format, pfc_string);
1417 if (mlx5e_query_global_pause_combined(priv)) {
1418 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1419 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1420 pport_per_prio_pfc_stats_desc[i].format, "global");
1424 for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++)
1425 strcpy(data + (idx++) * ETH_GSTRING_LEN,
1426 pport_pfc_stall_stats_desc[i].format);
1431 static int mlx5e_grp_per_prio_pfc_fill_stats(struct mlx5e_priv *priv,
1435 unsigned long pfc_combined;
1438 pfc_combined = mlx5e_query_pfc_combined(priv);
1439 for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
1440 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1442 MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
1443 pport_per_prio_pfc_stats_desc, i);
1447 if (mlx5e_query_global_pause_combined(priv)) {
1448 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1450 MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
1451 pport_per_prio_pfc_stats_desc, i);
1455 for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++)
1456 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
1457 pport_pfc_stall_stats_desc, i);
1462 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_prio)
1464 return mlx5e_grp_per_prio_traffic_get_num_stats() +
1465 mlx5e_grp_per_prio_pfc_get_num_stats(priv);
1468 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_prio)
1470 idx = mlx5e_grp_per_prio_traffic_fill_strings(priv, data, idx);
1471 idx = mlx5e_grp_per_prio_pfc_fill_strings(priv, data, idx);
1475 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_prio)
1477 idx = mlx5e_grp_per_prio_traffic_fill_stats(priv, data, idx);
1478 idx = mlx5e_grp_per_prio_pfc_fill_stats(priv, data, idx);
1482 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_prio)
1484 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1485 struct mlx5_core_dev *mdev = priv->mdev;
1486 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1487 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1491 if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
1494 MLX5_SET(ppcnt_reg, in, local_port, 1);
1495 MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
1496 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1497 out = pstats->per_prio_counters[prio];
1498 MLX5_SET(ppcnt_reg, in, prio_tc, prio);
1499 mlx5_core_access_reg(mdev, in, sz, out, sz,
1500 MLX5_REG_PPCNT, 0, 0);
1504 static const struct counter_desc mlx5e_pme_status_desc[] = {
1505 { "module_unplug", sizeof(u64) * MLX5_MODULE_STATUS_UNPLUGGED },
1508 static const struct counter_desc mlx5e_pme_error_desc[] = {
1509 { "module_bus_stuck", sizeof(u64) * MLX5_MODULE_EVENT_ERROR_BUS_STUCK },
1510 { "module_high_temp", sizeof(u64) * MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE },
1511 { "module_bad_shorted", sizeof(u64) * MLX5_MODULE_EVENT_ERROR_BAD_CABLE },
1514 #define NUM_PME_STATUS_STATS ARRAY_SIZE(mlx5e_pme_status_desc)
1515 #define NUM_PME_ERR_STATS ARRAY_SIZE(mlx5e_pme_error_desc)
1517 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pme)
1519 return NUM_PME_STATUS_STATS + NUM_PME_ERR_STATS;
1522 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pme)
1526 for (i = 0; i < NUM_PME_STATUS_STATS; i++)
1527 strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_status_desc[i].format);
1529 for (i = 0; i < NUM_PME_ERR_STATS; i++)
1530 strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_error_desc[i].format);
1535 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pme)
1537 struct mlx5_pme_stats pme_stats;
1540 mlx5_get_pme_stats(priv->mdev, &pme_stats);
1542 for (i = 0; i < NUM_PME_STATUS_STATS; i++)
1543 data[idx++] = MLX5E_READ_CTR64_CPU(pme_stats.status_counters,
1544 mlx5e_pme_status_desc, i);
1546 for (i = 0; i < NUM_PME_ERR_STATS; i++)
1547 data[idx++] = MLX5E_READ_CTR64_CPU(pme_stats.error_counters,
1548 mlx5e_pme_error_desc, i);
1553 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pme) { return; }
1555 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(tls)
1557 return mlx5e_tls_get_count(priv);
1560 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(tls)
1562 return idx + mlx5e_tls_get_strings(priv, data + idx * ETH_GSTRING_LEN);
1565 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(tls)
1567 return idx + mlx5e_tls_get_stats(priv, data + idx);
1570 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(tls) { return; }
1572 static const struct counter_desc rq_stats_desc[] = {
1573 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
1574 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
1575 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) },
1576 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail) },
1577 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail_slow) },
1578 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
1579 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
1580 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
1581 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) },
1582 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_redirect) },
1583 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) },
1584 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) },
1585 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, ecn_mark) },
1586 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
1587 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
1588 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
1589 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
1590 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) },
1591 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
1592 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
1593 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
1594 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_reuse) },
1595 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_full) },
1596 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_empty) },
1597 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_busy) },
1598 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_waive) },
1599 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, congst_umr) },
1600 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_err) },
1601 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, recover) },
1602 #ifdef CONFIG_MLX5_EN_TLS
1603 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_packets) },
1604 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_bytes) },
1605 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_ctx) },
1606 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_del) },
1607 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_pkt) },
1608 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_start) },
1609 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_end) },
1610 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_skip) },
1611 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_ok) },
1612 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_skip) },
1613 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_err) },
1617 static const struct counter_desc sq_stats_desc[] = {
1618 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, packets) },
1619 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, bytes) },
1620 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_packets) },
1621 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_bytes) },
1622 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
1623 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
1624 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
1625 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
1626 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
1627 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) },
1628 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, mpwqe_blks) },
1629 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, mpwqe_pkts) },
1630 #ifdef CONFIG_MLX5_EN_TLS
1631 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) },
1632 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) },
1633 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ctx) },
1634 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ooo) },
1635 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) },
1636 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) },
1637 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_resync_bytes) },
1638 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_skip_no_sync_data) },
1639 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) },
1640 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) },
1642 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) },
1643 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) },
1644 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) },
1645 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
1646 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, recover) },
1647 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqes) },
1648 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) },
1649 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqe_err) },
1652 static const struct counter_desc rq_xdpsq_stats_desc[] = {
1653 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
1654 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
1655 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
1656 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, nops) },
1657 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) },
1658 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) },
1659 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
1662 static const struct counter_desc xdpsq_stats_desc[] = {
1663 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
1664 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
1665 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
1666 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, nops) },
1667 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) },
1668 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) },
1669 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
1672 static const struct counter_desc xskrq_stats_desc[] = {
1673 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, packets) },
1674 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, bytes) },
1675 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_complete) },
1676 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
1677 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
1678 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_none) },
1679 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, ecn_mark) },
1680 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
1681 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, xdp_drop) },
1682 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, xdp_redirect) },
1683 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, wqe_err) },
1684 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
1685 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
1686 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) },
1687 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
1688 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
1689 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
1690 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, congst_umr) },
1691 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, arfs_err) },
1694 static const struct counter_desc xsksq_stats_desc[] = {
1695 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
1696 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
1697 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
1698 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, full) },
1699 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, err) },
1700 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
1703 static const struct counter_desc ch_stats_desc[] = {
1704 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, events) },
1705 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, poll) },
1706 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, arm) },
1707 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, aff_change) },
1708 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, force_irq) },
1709 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, eq_rearm) },
1712 static const struct counter_desc ptp_sq_stats_desc[] = {
1713 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, packets) },
1714 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, bytes) },
1715 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
1716 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
1717 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
1718 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, nop) },
1719 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, csum_none) },
1720 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, stopped) },
1721 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, dropped) },
1722 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
1723 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, recover) },
1724 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, cqes) },
1725 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, wake) },
1726 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, cqe_err) },
1729 static const struct counter_desc ptp_ch_stats_desc[] = {
1730 { MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, events) },
1731 { MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, poll) },
1732 { MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, arm) },
1733 { MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, eq_rearm) },
1736 static const struct counter_desc ptp_cq_stats_desc[] = {
1737 { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, cqe) },
1738 { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, err_cqe) },
1739 { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort) },
1740 { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort_abs_diff_ns) },
1743 #define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc)
1744 #define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc)
1745 #define NUM_XDPSQ_STATS ARRAY_SIZE(xdpsq_stats_desc)
1746 #define NUM_RQ_XDPSQ_STATS ARRAY_SIZE(rq_xdpsq_stats_desc)
1747 #define NUM_XSKRQ_STATS ARRAY_SIZE(xskrq_stats_desc)
1748 #define NUM_XSKSQ_STATS ARRAY_SIZE(xsksq_stats_desc)
1749 #define NUM_CH_STATS ARRAY_SIZE(ch_stats_desc)
1750 #define NUM_PTP_SQ_STATS ARRAY_SIZE(ptp_sq_stats_desc)
1751 #define NUM_PTP_CH_STATS ARRAY_SIZE(ptp_ch_stats_desc)
1752 #define NUM_PTP_CQ_STATS ARRAY_SIZE(ptp_cq_stats_desc)
1754 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ptp)
1756 return priv->port_ptp_opened ?
1758 ((NUM_PTP_SQ_STATS + NUM_PTP_CQ_STATS) * priv->max_opened_tc) :
1762 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ptp)
1766 if (!priv->port_ptp_opened)
1769 for (i = 0; i < NUM_PTP_CH_STATS; i++)
1770 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1771 ptp_ch_stats_desc[i].format);
1773 for (tc = 0; tc < priv->max_opened_tc; tc++)
1774 for (i = 0; i < NUM_PTP_SQ_STATS; i++)
1775 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1776 ptp_sq_stats_desc[i].format, tc);
1778 for (tc = 0; tc < priv->max_opened_tc; tc++)
1779 for (i = 0; i < NUM_PTP_CQ_STATS; i++)
1780 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1781 ptp_cq_stats_desc[i].format, tc);
1785 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ptp)
1789 if (!priv->port_ptp_opened)
1792 for (i = 0; i < NUM_PTP_CH_STATS; i++)
1794 MLX5E_READ_CTR64_CPU(&priv->port_ptp_stats.ch,
1795 ptp_ch_stats_desc, i);
1797 for (tc = 0; tc < priv->max_opened_tc; tc++)
1798 for (i = 0; i < NUM_PTP_SQ_STATS; i++)
1800 MLX5E_READ_CTR64_CPU(&priv->port_ptp_stats.sq[tc],
1801 ptp_sq_stats_desc, i);
1803 for (tc = 0; tc < priv->max_opened_tc; tc++)
1804 for (i = 0; i < NUM_PTP_CQ_STATS; i++)
1806 MLX5E_READ_CTR64_CPU(&priv->port_ptp_stats.cq[tc],
1807 ptp_cq_stats_desc, i);
1812 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ptp) { return; }
1814 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(channels)
1816 int max_nch = priv->max_nch;
1818 return (NUM_RQ_STATS * max_nch) +
1819 (NUM_CH_STATS * max_nch) +
1820 (NUM_SQ_STATS * max_nch * priv->max_opened_tc) +
1821 (NUM_RQ_XDPSQ_STATS * max_nch) +
1822 (NUM_XDPSQ_STATS * max_nch) +
1823 (NUM_XSKRQ_STATS * max_nch * priv->xsk.ever_used) +
1824 (NUM_XSKSQ_STATS * max_nch * priv->xsk.ever_used);
1827 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(channels)
1829 bool is_xsk = priv->xsk.ever_used;
1830 int max_nch = priv->max_nch;
1833 for (i = 0; i < max_nch; i++)
1834 for (j = 0; j < NUM_CH_STATS; j++)
1835 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1836 ch_stats_desc[j].format, i);
1838 for (i = 0; i < max_nch; i++) {
1839 for (j = 0; j < NUM_RQ_STATS; j++)
1840 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1841 rq_stats_desc[j].format, i);
1842 for (j = 0; j < NUM_XSKRQ_STATS * is_xsk; j++)
1843 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1844 xskrq_stats_desc[j].format, i);
1845 for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++)
1846 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1847 rq_xdpsq_stats_desc[j].format, i);
1850 for (tc = 0; tc < priv->max_opened_tc; tc++)
1851 for (i = 0; i < max_nch; i++)
1852 for (j = 0; j < NUM_SQ_STATS; j++)
1853 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1854 sq_stats_desc[j].format,
1857 for (i = 0; i < max_nch; i++) {
1858 for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++)
1859 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1860 xsksq_stats_desc[j].format, i);
1861 for (j = 0; j < NUM_XDPSQ_STATS; j++)
1862 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1863 xdpsq_stats_desc[j].format, i);
1869 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(channels)
1871 bool is_xsk = priv->xsk.ever_used;
1872 int max_nch = priv->max_nch;
1875 for (i = 0; i < max_nch; i++)
1876 for (j = 0; j < NUM_CH_STATS; j++)
1878 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].ch,
1881 for (i = 0; i < max_nch; i++) {
1882 for (j = 0; j < NUM_RQ_STATS; j++)
1884 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].rq,
1886 for (j = 0; j < NUM_XSKRQ_STATS * is_xsk; j++)
1888 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].xskrq,
1889 xskrq_stats_desc, j);
1890 for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++)
1892 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].rq_xdpsq,
1893 rq_xdpsq_stats_desc, j);
1896 for (tc = 0; tc < priv->max_opened_tc; tc++)
1897 for (i = 0; i < max_nch; i++)
1898 for (j = 0; j < NUM_SQ_STATS; j++)
1900 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].sq[tc],
1903 for (i = 0; i < max_nch; i++) {
1904 for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++)
1906 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].xsksq,
1907 xsksq_stats_desc, j);
1908 for (j = 0; j < NUM_XDPSQ_STATS; j++)
1910 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].xdpsq,
1911 xdpsq_stats_desc, j);
1917 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(channels) { return; }
1919 MLX5E_DEFINE_STATS_GRP(sw, 0);
1920 MLX5E_DEFINE_STATS_GRP(qcnt, MLX5E_NDO_UPDATE_STATS);
1921 MLX5E_DEFINE_STATS_GRP(vnic_env, 0);
1922 MLX5E_DEFINE_STATS_GRP(vport, MLX5E_NDO_UPDATE_STATS);
1923 MLX5E_DEFINE_STATS_GRP(802_3, MLX5E_NDO_UPDATE_STATS);
1924 MLX5E_DEFINE_STATS_GRP(2863, 0);
1925 MLX5E_DEFINE_STATS_GRP(2819, 0);
1926 MLX5E_DEFINE_STATS_GRP(phy, 0);
1927 MLX5E_DEFINE_STATS_GRP(pcie, 0);
1928 MLX5E_DEFINE_STATS_GRP(per_prio, 0);
1929 MLX5E_DEFINE_STATS_GRP(pme, 0);
1930 MLX5E_DEFINE_STATS_GRP(channels, 0);
1931 MLX5E_DEFINE_STATS_GRP(per_port_buff_congest, 0);
1932 MLX5E_DEFINE_STATS_GRP(eth_ext, 0);
1933 static MLX5E_DEFINE_STATS_GRP(tls, 0);
1934 static MLX5E_DEFINE_STATS_GRP(ptp, 0);
1936 /* The stats groups order is opposite to the update_stats() order calls */
1937 mlx5e_stats_grp_t mlx5e_nic_stats_grps[] = {
1938 &MLX5E_STATS_GRP(sw),
1939 &MLX5E_STATS_GRP(qcnt),
1940 &MLX5E_STATS_GRP(vnic_env),
1941 &MLX5E_STATS_GRP(vport),
1942 &MLX5E_STATS_GRP(802_3),
1943 &MLX5E_STATS_GRP(2863),
1944 &MLX5E_STATS_GRP(2819),
1945 &MLX5E_STATS_GRP(phy),
1946 &MLX5E_STATS_GRP(eth_ext),
1947 &MLX5E_STATS_GRP(pcie),
1948 &MLX5E_STATS_GRP(per_prio),
1949 &MLX5E_STATS_GRP(pme),
1950 #ifdef CONFIG_MLX5_EN_IPSEC
1951 &MLX5E_STATS_GRP(ipsec_sw),
1952 &MLX5E_STATS_GRP(ipsec_hw),
1954 &MLX5E_STATS_GRP(tls),
1955 &MLX5E_STATS_GRP(channels),
1956 &MLX5E_STATS_GRP(per_port_buff_congest),
1957 &MLX5E_STATS_GRP(ptp),
1960 unsigned int mlx5e_nic_stats_grps_num(struct mlx5e_priv *priv)
1962 return ARRAY_SIZE(mlx5e_nic_stats_grps);