1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 // Copyright (c) 2020 Mellanox Technologies
8 #define MLX5E_PTP_CHANNEL_IX 0
10 struct mlx5e_ptp_params {
11 struct mlx5e_params params;
12 struct mlx5e_sq_param txq_sq_param;
15 struct mlx5e_skb_cb_hwtstamp {
17 ktime_t port_hwtstamp;
20 void mlx5e_skb_cb_hwtstamp_init(struct sk_buff *skb)
22 memset(skb->cb, 0, sizeof(struct mlx5e_skb_cb_hwtstamp));
25 static struct mlx5e_skb_cb_hwtstamp *mlx5e_skb_cb_get_hwts(struct sk_buff *skb)
27 BUILD_BUG_ON(sizeof(struct mlx5e_skb_cb_hwtstamp) > sizeof(skb->cb));
28 return (struct mlx5e_skb_cb_hwtstamp *)skb->cb;
31 static void mlx5e_skb_cb_hwtstamp_tx(struct sk_buff *skb,
32 struct mlx5e_ptp_cq_stats *cq_stats)
34 struct skb_shared_hwtstamps hwts = {};
37 diff = abs(mlx5e_skb_cb_get_hwts(skb)->port_hwtstamp -
38 mlx5e_skb_cb_get_hwts(skb)->cqe_hwtstamp);
40 /* Maximal allowed diff is 1 / 128 second */
41 if (diff > (NSEC_PER_SEC >> 7)) {
43 cq_stats->abort_abs_diff_ns += diff;
47 hwts.hwtstamp = mlx5e_skb_cb_get_hwts(skb)->port_hwtstamp;
48 skb_tstamp_tx(skb, &hwts);
51 void mlx5e_skb_cb_hwtstamp_handler(struct sk_buff *skb, int hwtstamp_type,
53 struct mlx5e_ptp_cq_stats *cq_stats)
55 switch (hwtstamp_type) {
56 case (MLX5E_SKB_CB_CQE_HWTSTAMP):
57 mlx5e_skb_cb_get_hwts(skb)->cqe_hwtstamp = hwtstamp;
59 case (MLX5E_SKB_CB_PORT_HWTSTAMP):
60 mlx5e_skb_cb_get_hwts(skb)->port_hwtstamp = hwtstamp;
64 /* If both CQEs arrive, check and report the port tstamp, and clear skb cb as
65 * skb soon to be released.
67 if (!mlx5e_skb_cb_get_hwts(skb)->cqe_hwtstamp ||
68 !mlx5e_skb_cb_get_hwts(skb)->port_hwtstamp)
71 mlx5e_skb_cb_hwtstamp_tx(skb, cq_stats);
72 memset(skb->cb, 0, sizeof(struct mlx5e_skb_cb_hwtstamp));
75 static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq,
76 struct mlx5_cqe64 *cqe,
79 struct sk_buff *skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo);
80 struct mlx5e_txqsq *sq = &ptpsq->txqsq;
83 if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
84 ptpsq->cq_stats->err_cqe++;
88 hwtstamp = mlx5e_cqe_ts_to_ns(sq->ptp_cyc2time, sq->clock, get_cqe_ts(cqe));
89 mlx5e_skb_cb_hwtstamp_handler(skb, MLX5E_SKB_CB_PORT_HWTSTAMP,
90 hwtstamp, ptpsq->cq_stats);
91 ptpsq->cq_stats->cqe++;
94 napi_consume_skb(skb, budget);
97 static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int budget)
99 struct mlx5e_ptpsq *ptpsq = container_of(cq, struct mlx5e_ptpsq, ts_cq);
100 struct mlx5_cqwq *cqwq = &cq->wq;
101 struct mlx5_cqe64 *cqe;
104 if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &ptpsq->txqsq.state)))
107 cqe = mlx5_cqwq_get_cqe(cqwq);
114 mlx5e_ptp_handle_ts_cqe(ptpsq, cqe, budget);
115 } while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(cqwq)));
117 mlx5_cqwq_update_db_record(cqwq);
119 /* ensure cq space is freed before enabling more cqes */
122 return work_done == budget;
125 static int mlx5e_ptp_napi_poll(struct napi_struct *napi, int budget)
127 struct mlx5e_ptp *c = container_of(napi, struct mlx5e_ptp, napi);
128 struct mlx5e_ch_stats *ch_stats = c->stats;
137 for (i = 0; i < c->num_tc; i++) {
138 busy |= mlx5e_poll_tx_cq(&c->ptpsq[i].txqsq.cq, budget);
139 busy |= mlx5e_ptp_poll_ts_cq(&c->ptpsq[i].ts_cq, budget);
147 if (unlikely(!napi_complete_done(napi, work_done)))
152 for (i = 0; i < c->num_tc; i++) {
153 mlx5e_cq_arm(&c->ptpsq[i].txqsq.cq);
154 mlx5e_cq_arm(&c->ptpsq[i].ts_cq);
163 static int mlx5e_ptp_alloc_txqsq(struct mlx5e_ptp *c, int txq_ix,
164 struct mlx5e_params *params,
165 struct mlx5e_sq_param *param,
166 struct mlx5e_txqsq *sq, int tc,
167 struct mlx5e_ptpsq *ptpsq)
169 void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
170 struct mlx5_core_dev *mdev = c->mdev;
171 struct mlx5_wq_cyc *wq = &sq->wq;
176 sq->tstamp = c->tstamp;
177 sq->clock = &mdev->clock;
178 sq->mkey_be = c->mkey_be;
179 sq->netdev = c->netdev;
182 sq->ch_ix = MLX5E_PTP_CHANNEL_IX;
184 sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
185 sq->min_inline_mode = params->tx_min_inline_mode;
186 sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
187 sq->stats = &c->priv->ptp_stats.sq[tc];
189 INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work);
190 if (!MLX5_CAP_ETH(mdev, wqe_vlan_insert))
191 set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state);
192 sq->stop_room = param->stop_room;
193 sq->ptp_cyc2time = mlx5_sq_ts_translator(mdev);
195 node = dev_to_node(mlx5_core_dma_dev(mdev));
197 param->wq.db_numa_node = node;
198 err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, wq, &sq->wq_ctrl);
201 wq->db = &wq->db[MLX5_SND_DBR];
203 err = mlx5e_alloc_txqsq_db(sq, node);
205 goto err_sq_wq_destroy;
210 mlx5_wq_destroy(&sq->wq_ctrl);
215 static void mlx5e_ptp_destroy_sq(struct mlx5_core_dev *mdev, u32 sqn)
217 mlx5_core_destroy_sq(mdev, sqn);
220 static int mlx5e_ptp_alloc_traffic_db(struct mlx5e_ptpsq *ptpsq, int numa)
222 int wq_sz = mlx5_wq_cyc_get_size(&ptpsq->txqsq.wq);
224 ptpsq->skb_fifo.fifo = kvzalloc_node(array_size(wq_sz, sizeof(*ptpsq->skb_fifo.fifo)),
226 if (!ptpsq->skb_fifo.fifo)
229 ptpsq->skb_fifo.pc = &ptpsq->skb_fifo_pc;
230 ptpsq->skb_fifo.cc = &ptpsq->skb_fifo_cc;
231 ptpsq->skb_fifo.mask = wq_sz - 1;
236 static void mlx5e_ptp_drain_skb_fifo(struct mlx5e_skb_fifo *skb_fifo)
238 while (*skb_fifo->pc != *skb_fifo->cc) {
239 struct sk_buff *skb = mlx5e_skb_fifo_pop(skb_fifo);
241 dev_kfree_skb_any(skb);
245 static void mlx5e_ptp_free_traffic_db(struct mlx5e_skb_fifo *skb_fifo)
247 mlx5e_ptp_drain_skb_fifo(skb_fifo);
248 kvfree(skb_fifo->fifo);
251 static int mlx5e_ptp_open_txqsq(struct mlx5e_ptp *c, u32 tisn,
252 int txq_ix, struct mlx5e_ptp_params *cparams,
253 int tc, struct mlx5e_ptpsq *ptpsq)
255 struct mlx5e_sq_param *sqp = &cparams->txq_sq_param;
256 struct mlx5e_txqsq *txqsq = &ptpsq->txqsq;
257 struct mlx5e_create_sq_param csp = {};
260 err = mlx5e_ptp_alloc_txqsq(c, txq_ix, &cparams->params, sqp,
267 csp.cqn = txqsq->cq.mcq.cqn;
268 csp.wq_ctrl = &txqsq->wq_ctrl;
269 csp.min_inline_mode = txqsq->min_inline_mode;
270 csp.ts_cqe_to_dest_cqn = ptpsq->ts_cq.mcq.cqn;
272 err = mlx5e_create_sq_rdy(c->mdev, sqp, &csp, 0, &txqsq->sqn);
276 err = mlx5e_ptp_alloc_traffic_db(ptpsq,
277 dev_to_node(mlx5_core_dma_dev(c->mdev)));
284 mlx5e_free_txqsq(txqsq);
289 static void mlx5e_ptp_close_txqsq(struct mlx5e_ptpsq *ptpsq)
291 struct mlx5e_txqsq *sq = &ptpsq->txqsq;
292 struct mlx5_core_dev *mdev = sq->mdev;
294 mlx5e_ptp_free_traffic_db(&ptpsq->skb_fifo);
295 cancel_work_sync(&sq->recover_work);
296 mlx5e_ptp_destroy_sq(mdev, sq->sqn);
297 mlx5e_free_txqsq_descs(sq);
298 mlx5e_free_txqsq(sq);
301 static int mlx5e_ptp_open_txqsqs(struct mlx5e_ptp *c,
302 struct mlx5e_ptp_params *cparams)
304 struct mlx5e_params *params = &cparams->params;
309 ix_base = params->num_tc * params->num_channels;
311 for (tc = 0; tc < params->num_tc; tc++) {
312 int txq_ix = ix_base + tc;
314 err = mlx5e_ptp_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix,
315 cparams, tc, &c->ptpsq[tc]);
323 for (--tc; tc >= 0; tc--)
324 mlx5e_ptp_close_txqsq(&c->ptpsq[tc]);
329 static void mlx5e_ptp_close_txqsqs(struct mlx5e_ptp *c)
333 for (tc = 0; tc < c->num_tc; tc++)
334 mlx5e_ptp_close_txqsq(&c->ptpsq[tc]);
337 static int mlx5e_ptp_open_cqs(struct mlx5e_ptp *c,
338 struct mlx5e_ptp_params *cparams)
340 struct mlx5e_params *params = &cparams->params;
341 struct mlx5e_create_cq_param ccp = {};
342 struct dim_cq_moder ptp_moder = {};
343 struct mlx5e_cq_param *cq_param;
347 ccp.node = dev_to_node(mlx5_core_dma_dev(c->mdev));
348 ccp.ch_stats = c->stats;
350 ccp.ix = MLX5E_PTP_CHANNEL_IX;
352 cq_param = &cparams->txq_sq_param.cqp;
354 for (tc = 0; tc < params->num_tc; tc++) {
355 struct mlx5e_cq *cq = &c->ptpsq[tc].txqsq.cq;
357 err = mlx5e_open_cq(c->priv, ptp_moder, cq_param, &ccp, cq);
359 goto out_err_txqsq_cq;
362 for (tc = 0; tc < params->num_tc; tc++) {
363 struct mlx5e_cq *cq = &c->ptpsq[tc].ts_cq;
364 struct mlx5e_ptpsq *ptpsq = &c->ptpsq[tc];
366 err = mlx5e_open_cq(c->priv, ptp_moder, cq_param, &ccp, cq);
370 ptpsq->cq_stats = &c->priv->ptp_stats.cq[tc];
376 for (--tc; tc >= 0; tc--)
377 mlx5e_close_cq(&c->ptpsq[tc].ts_cq);
380 for (--tc; tc >= 0; tc--)
381 mlx5e_close_cq(&c->ptpsq[tc].txqsq.cq);
386 static void mlx5e_ptp_close_cqs(struct mlx5e_ptp *c)
390 for (tc = 0; tc < c->num_tc; tc++)
391 mlx5e_close_cq(&c->ptpsq[tc].ts_cq);
393 for (tc = 0; tc < c->num_tc; tc++)
394 mlx5e_close_cq(&c->ptpsq[tc].txqsq.cq);
397 static void mlx5e_ptp_build_sq_param(struct mlx5_core_dev *mdev,
398 struct mlx5e_params *params,
399 struct mlx5e_sq_param *param)
401 void *sqc = param->sqc;
404 mlx5e_build_sq_param_common(mdev, param);
406 wq = MLX5_ADDR_OF(sqc, sqc, wq);
407 MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
408 param->stop_room = mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS);
409 mlx5e_build_tx_cq_param(mdev, params, ¶m->cqp);
412 static void mlx5e_ptp_build_params(struct mlx5e_ptp *c,
413 struct mlx5e_ptp_params *cparams,
414 struct mlx5e_params *orig)
416 struct mlx5e_params *params = &cparams->params;
418 params->tx_min_inline_mode = orig->tx_min_inline_mode;
419 params->num_channels = orig->num_channels;
420 params->hard_mtu = orig->hard_mtu;
421 params->sw_mtu = orig->sw_mtu;
422 params->num_tc = orig->num_tc;
425 params->log_sq_size = orig->log_sq_size;
427 mlx5e_ptp_build_sq_param(c->mdev, params, &cparams->txq_sq_param);
430 static int mlx5e_ptp_open_queues(struct mlx5e_ptp *c,
431 struct mlx5e_ptp_params *cparams)
435 err = mlx5e_ptp_open_cqs(c, cparams);
439 err = mlx5e_ptp_open_txqsqs(c, cparams);
446 mlx5e_ptp_close_cqs(c);
451 static void mlx5e_ptp_close_queues(struct mlx5e_ptp *c)
453 mlx5e_ptp_close_txqsqs(c);
454 mlx5e_ptp_close_cqs(c);
457 int mlx5e_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
458 u8 lag_port, struct mlx5e_ptp **cp)
460 struct net_device *netdev = priv->netdev;
461 struct mlx5_core_dev *mdev = priv->mdev;
462 struct mlx5e_ptp_params *cparams;
467 c = kvzalloc_node(sizeof(*c), GFP_KERNEL, dev_to_node(mlx5_core_dma_dev(mdev)));
468 cparams = kvzalloc(sizeof(*cparams), GFP_KERNEL);
473 c->mdev = priv->mdev;
474 c->tstamp = &priv->tstamp;
475 c->pdev = mlx5_core_dma_dev(priv->mdev);
476 c->netdev = priv->netdev;
477 c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey.key);
478 c->num_tc = params->num_tc;
479 c->stats = &priv->ptp_stats.ch;
480 c->lag_port = lag_port;
482 netif_napi_add(netdev, &c->napi, mlx5e_ptp_napi_poll, 64);
484 mlx5e_ptp_build_params(c, cparams, params);
486 err = mlx5e_ptp_open_queues(c, cparams);
497 netif_napi_del(&c->napi);
504 void mlx5e_ptp_close(struct mlx5e_ptp *c)
506 mlx5e_ptp_close_queues(c);
507 netif_napi_del(&c->napi);
512 void mlx5e_ptp_activate_channel(struct mlx5e_ptp *c)
516 napi_enable(&c->napi);
518 for (tc = 0; tc < c->num_tc; tc++)
519 mlx5e_activate_txqsq(&c->ptpsq[tc].txqsq);
522 void mlx5e_ptp_deactivate_channel(struct mlx5e_ptp *c)
526 for (tc = 0; tc < c->num_tc; tc++)
527 mlx5e_deactivate_txqsq(&c->ptpsq[tc].txqsq);
529 napi_disable(&c->napi);