struct mlx5e_cq_param icosq_cq;
};
-static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
+bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
{
- return MLX5_CAP_GEN(mdev, striding_rq) &&
+ bool striding_rq_umr = MLX5_CAP_GEN(mdev, striding_rq) &&
MLX5_CAP_GEN(mdev, umr_ptr_rlky) &&
MLX5_CAP_ETH(mdev, reg_umr_sq);
+ u16 max_wqe_sz_cap = MLX5_CAP_GEN(mdev, max_wqe_sz_sq);
+ bool inline_umr = MLX5E_UMR_WQE_INLINE_SZ <= max_wqe_sz_cap;
+
+ if (!striding_rq_umr)
+ return false;
+ if (!inline_umr) {
+ mlx5_core_warn(mdev, "Cannot support Striding RQ: UMR WQE size (%d) exceeds maximum supported (%d).\n",
+ (int)MLX5E_UMR_WQE_INLINE_SZ, max_wqe_sz_cap);
+ return false;
+ }
+ return true;
+}
+
+static u32 mlx5e_mpwqe_get_linear_frag_sz(struct mlx5e_params *params)
+{
+ if (!params->xdp_prog) {
+ u16 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
+ u16 rq_headroom = MLX5_RX_HEADROOM + NET_IP_ALIGN;
+
+ return MLX5_SKB_FRAG_SZ(rq_headroom + hw_mtu);
+ }
+
+ return PAGE_SIZE;
+}
+
+static u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params *params)
+{
+ u32 linear_frag_sz = mlx5e_mpwqe_get_linear_frag_sz(params);
+
+ return MLX5_MPWRQ_LOG_WQE_SZ - order_base_2(linear_frag_sz);
+}
+
+static bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params)
+{
+ u32 frag_sz = mlx5e_mpwqe_get_linear_frag_sz(params);
+ s8 signed_log_num_strides_param;
+ u8 log_num_strides;
+
+ if (params->lro_en || frag_sz > PAGE_SIZE)
+ return false;
+
+ if (MLX5_CAP_GEN(mdev, ext_stride_num_range))
+ return true;
+
+ log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ - order_base_2(frag_sz);
+ signed_log_num_strides_param =
+ (s8)log_num_strides - MLX5_MPWQE_LOG_NUM_STRIDES_BASE;
+
+ return signed_log_num_strides_param >= 0;
+}
+
+static u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5e_params *params)
+{
+ if (params->log_rq_mtu_frames <
+ mlx5e_mpwqe_log_pkts_per_wqe(params) + MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW)
+ return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW;
+
+ return params->log_rq_mtu_frames - mlx5e_mpwqe_log_pkts_per_wqe(params);
+}
+
+static u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params)
+{
+ if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params))
+ return order_base_2(mlx5e_mpwqe_get_linear_frag_sz(params));
+
+ return MLX5E_MPWQE_STRIDE_SZ(mdev,
+ MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
+}
+
+static u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params)
+{
+ return MLX5_MPWRQ_LOG_WQE_SZ -
+ mlx5e_mpwqe_get_log_stride_size(mdev, params);
+}
+
+static u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params)
+{
+ u16 linear_rq_headroom = params->xdp_prog ?
+ XDP_PACKET_HEADROOM : MLX5_RX_HEADROOM;
+
+ linear_rq_headroom += NET_IP_ALIGN;
+
+ if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST)
+ return linear_rq_headroom;
+
+ if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params))
+ return linear_rq_headroom;
+
+ return 0;
}
void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params, u8 rq_type)
+ struct mlx5e_params *params)
{
- params->rq_wq_type = rq_type;
params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
+ params->log_rq_mtu_frames = is_kdump_kernel() ?
+ MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE :
+ MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
switch (params->rq_wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
- params->log_rq_size = is_kdump_kernel() ?
- MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW :
- MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW;
- params->mpwqe_log_stride_sz = MLX5E_MPWQE_STRIDE_SZ(mdev,
- MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
- params->mpwqe_log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ -
- params->mpwqe_log_stride_sz;
break;
default: /* MLX5_WQ_TYPE_LINKED_LIST */
- params->log_rq_size = is_kdump_kernel() ?
- MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE :
- MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
- params->rq_headroom = params->xdp_prog ?
- XDP_PACKET_HEADROOM : MLX5_RX_HEADROOM;
- params->rq_headroom += NET_IP_ALIGN;
-
/* Extra room needed for build_skb */
- params->lro_wqe_sz -= params->rq_headroom +
+ params->lro_wqe_sz -= mlx5e_get_rq_headroom(mdev, params) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
}
mlx5_core_info(mdev, "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n",
params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ,
- BIT(params->log_rq_size),
- BIT(params->mpwqe_log_stride_sz),
+ params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ ?
+ BIT(mlx5e_mpwqe_get_log_rq_size(params)) :
+ BIT(params->log_rq_mtu_frames),
+ BIT(mlx5e_mpwqe_get_log_stride_size(mdev, params)),
MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
}
-static void mlx5e_set_rq_params(struct mlx5_core_dev *mdev,
+bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev,
struct mlx5e_params *params)
{
- u8 rq_type = mlx5e_check_fragmented_striding_rq_cap(mdev) &&
- !params->xdp_prog && !MLX5_IPSEC_DEV(mdev) ?
- MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
- MLX5_WQ_TYPE_LINKED_LIST;
- mlx5e_init_rq_type_params(mdev, params, rq_type);
+ return mlx5e_check_fragmented_striding_rq_cap(mdev) &&
+ !MLX5_IPSEC_DEV(mdev) &&
+ !(params->xdp_prog && !mlx5e_rx_mpwqe_is_linear_skb(mdev, params));
+}
+
+void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
+{
+ params->rq_wq_type = mlx5e_striding_rq_possible(mdev, params) &&
+ MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ) ?
+ MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
+ MLX5_WQ_TYPE_LINKED_LIST;
}
static void mlx5e_update_carrier(struct mlx5e_priv *priv)
mutex_unlock(&priv->state_lock);
}
-static void mlx5e_tx_timeout_work(struct work_struct *work)
-{
- struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
- tx_timeout_work);
- int err;
-
- rtnl_lock();
- mutex_lock(&priv->state_lock);
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
- goto unlock;
- mlx5e_close_locked(priv->netdev);
- err = mlx5e_open_locked(priv->netdev);
- if (err)
- netdev_err(priv->netdev, "mlx5e_open_locked failed recovering from a tx_timeout, err(%d).\n",
- err);
-unlock:
- mutex_unlock(&priv->state_lock);
- rtnl_unlock();
-}
-
void mlx5e_update_stats(struct mlx5e_priv *priv)
{
int i;
synchronize_irq(pci_irq_vector(priv->mdev->pdev, MLX5_EQ_VEC_ASYNC));
}
-static inline int mlx5e_get_wqe_mtt_sz(void)
-{
- /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes.
- * To avoid copying garbage after the mtt array, we allocate
- * a little more.
- */
- return ALIGN(MLX5_MPWRQ_PAGES_PER_WQE * sizeof(__be64),
- MLX5_UMR_MTT_ALIGNMENT);
-}
-
static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
struct mlx5e_icosq *sq,
- struct mlx5e_umr_wqe *wqe,
- u16 ix)
+ struct mlx5e_umr_wqe *wqe)
{
struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl;
- struct mlx5_wqe_data_seg *dseg = &wqe->data;
- struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
- u8 ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS);
- u32 umr_wqe_mtt_offset = mlx5e_get_wqe_mtt_offset(rq, ix);
+ u8 ds_cnt = DIV_ROUND_UP(MLX5E_UMR_WQE_INLINE_SZ, MLX5_SEND_WQE_DS);
cseg->qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
ds_cnt);
cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
cseg->imm = rq->mkey_be;
- ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN;
+ ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN | MLX5_UMR_INLINE;
ucseg->xlt_octowords =
cpu_to_be16(MLX5_MTT_OCTW(MLX5_MPWRQ_PAGES_PER_WQE));
- ucseg->bsf_octowords =
- cpu_to_be16(MLX5_MTT_OCTW(umr_wqe_mtt_offset));
ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
-
- dseg->lkey = sq->mkey_be;
- dseg->addr = cpu_to_be64(wi->umr.mtt_addr);
}
static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq,
struct mlx5e_channel *c)
{
int wq_sz = mlx5_wq_ll_get_size(&rq->wq);
- int mtt_sz = mlx5e_get_wqe_mtt_sz();
- int mtt_alloc = mtt_sz + MLX5_UMR_ALIGN - 1;
- int i;
rq->mpwqe.info = kzalloc_node(wq_sz * sizeof(*rq->mpwqe.info),
GFP_KERNEL, cpu_to_node(c->cpu));
if (!rq->mpwqe.info)
- goto err_out;
-
- /* We allocate more than mtt_sz as we will align the pointer */
- rq->mpwqe.mtt_no_align = kzalloc_node(mtt_alloc * wq_sz, GFP_KERNEL,
- cpu_to_node(c->cpu));
- if (unlikely(!rq->mpwqe.mtt_no_align))
- goto err_free_wqe_info;
-
- for (i = 0; i < wq_sz; i++) {
- struct mlx5e_mpw_info *wi = &rq->mpwqe.info[i];
-
- wi->umr.mtt = PTR_ALIGN(rq->mpwqe.mtt_no_align + i * mtt_alloc,
- MLX5_UMR_ALIGN);
- wi->umr.mtt_addr = dma_map_single(c->pdev, wi->umr.mtt, mtt_sz,
- PCI_DMA_TODEVICE);
- if (unlikely(dma_mapping_error(c->pdev, wi->umr.mtt_addr)))
- goto err_unmap_mtts;
+ return -ENOMEM;
- mlx5e_build_umr_wqe(rq, &c->icosq, &wi->umr.wqe, i);
- }
+ mlx5e_build_umr_wqe(rq, &c->icosq, &rq->mpwqe.umr_wqe);
return 0;
-
-err_unmap_mtts:
- while (--i >= 0) {
- struct mlx5e_mpw_info *wi = &rq->mpwqe.info[i];
-
- dma_unmap_single(c->pdev, wi->umr.mtt_addr, mtt_sz,
- PCI_DMA_TODEVICE);
- }
- kfree(rq->mpwqe.mtt_no_align);
-err_free_wqe_info:
- kfree(rq->mpwqe.info);
-
-err_out:
- return -ENOMEM;
-}
-
-static void mlx5e_rq_free_mpwqe_info(struct mlx5e_rq *rq)
-{
- int wq_sz = mlx5_wq_ll_get_size(&rq->wq);
- int mtt_sz = mlx5e_get_wqe_mtt_sz();
- int i;
-
- for (i = 0; i < wq_sz; i++) {
- struct mlx5e_mpw_info *wi = &rq->mpwqe.info[i];
-
- dma_unmap_single(rq->pdev, wi->umr.mtt_addr, mtt_sz,
- PCI_DMA_TODEVICE);
- }
- kfree(rq->mpwqe.mtt_no_align);
- kfree(rq->mpwqe.info);
}
static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev,
u32 *in;
int err;
- if (!MLX5E_VALID_NUM_MTTS(npages))
- return -EINVAL;
-
in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
MLX5_SET(mkc, mkc, umr_en, 1);
MLX5_SET(mkc, mkc, lw, 1);
MLX5_SET(mkc, mkc, lr, 1);
- MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_MTT);
+ MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
MLX5_SET(mkc, mkc, qpn, 0xffffff);
MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.pdn);
return mlx5e_create_umr_mkey(mdev, num_mtts, PAGE_SHIFT, &rq->umr_mkey);
}
+static inline u64 mlx5e_get_mpwqe_offset(struct mlx5e_rq *rq, u16 wqe_ix)
+{
+ return (wqe_ix << MLX5E_LOG_ALIGNED_MPWQE_PPW) << PAGE_SHIFT;
+}
+
static int mlx5e_alloc_rq(struct mlx5e_channel *c,
struct mlx5e_params *params,
struct mlx5e_rq_param *rqp,
rq->channel = c;
rq->ix = c->ix;
rq->mdev = mdev;
+ rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL;
if (IS_ERR(rq->xdp_prog)) {
goto err_rq_wq_destroy;
rq->buff.map_dir = rq->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
- rq->buff.headroom = params->rq_headroom;
+ rq->buff.headroom = mlx5e_get_rq_headroom(mdev, params);
switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
-
rq->post_wqes = mlx5e_post_rx_mpwqes;
rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
goto err_rq_wq_destroy;
}
- rq->mpwqe.log_stride_sz = params->mpwqe_log_stride_sz;
- rq->mpwqe.num_strides = BIT(params->mpwqe_log_num_strides);
+ rq->mpwqe.skb_from_cqe_mpwrq =
+ mlx5e_rx_mpwqe_is_linear_skb(mdev, params) ?
+ mlx5e_skb_from_cqe_mpwrq_linear :
+ mlx5e_skb_from_cqe_mpwrq_nonlinear;
+ rq->mpwqe.log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params);
+ rq->mpwqe.num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params));
byte_count = rq->mpwqe.num_strides << rq->mpwqe.log_stride_sz;
byte_count = params->lro_en ?
params->lro_wqe_sz :
- MLX5E_SW2HW_MTU(c->priv, c->netdev->mtu);
+ MLX5E_SW2HW_MTU(params, params->sw_mtu);
#ifdef CONFIG_MLX5_EN_IPSEC
if (MLX5_IPSEC_DEV(mdev))
byte_count += MLX5E_METADATA_ETHER_LEN;
struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i);
if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
- u64 dma_offset = (u64)mlx5e_get_wqe_mtt_offset(rq, i) << PAGE_SHIFT;
+ u64 dma_offset = mlx5e_get_mpwqe_offset(rq, i);
- wqe->data.addr = cpu_to_be64(dma_offset);
+ wqe->data.addr = cpu_to_be64(dma_offset + rq->buff.headroom);
}
wqe->data.byte_count = cpu_to_be32(byte_count);
switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
- mlx5e_rq_free_mpwqe_info(rq);
+ kfree(rq->mpwqe.info);
mlx5_core_destroy_mkey(rq->mdev, &rq->umr_mkey);
break;
default: /* MLX5_WQ_TYPE_LINKED_LIST */
static int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state,
int next_state)
{
- struct mlx5e_channel *c = rq->channel;
- struct mlx5_core_dev *mdev = c->mdev;
+ struct mlx5_core_dev *mdev = rq->mdev;
void *in;
void *rqc;
struct mlx5_core_dev *mdev = c->mdev;
int err;
- sq->mkey_be = c->mkey_be;
sq->channel = c;
sq->uar_map = mdev->mlx5e_res.bfreg.map;
return 0;
}
+static void mlx5e_sq_recover(struct work_struct *work);
static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
int txq_ix,
struct mlx5e_params *params,
sq->channel = c;
sq->txq_ix = txq_ix;
sq->uar_map = mdev->mlx5e_res.bfreg.map;
- sq->max_inline = params->tx_max_inline;
sq->min_inline_mode = params->tx_min_inline_mode;
+ INIT_WORK(&sq->recover.recover_work, mlx5e_sq_recover);
if (MLX5_IPSEC_DEV(c->priv->mdev))
set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
MLX5_SET(sqc, sqc, min_wqe_inline_mode, csp->min_inline_mode);
MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
+ MLX5_SET(sqc, sqc, flush_in_error_en, 1);
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
MLX5_SET(wq, wq, uar_page, mdev->mlx5e_res.bfreg.index);
return err;
}
+static void mlx5e_reset_txqsq_cc_pc(struct mlx5e_txqsq *sq)
+{
+ WARN_ONCE(sq->cc != sq->pc,
+ "SQ 0x%x: cc (0x%x) != pc (0x%x)\n",
+ sq->sqn, sq->cc, sq->pc);
+ sq->cc = 0;
+ sq->dma_fifo_cc = 0;
+ sq->pc = 0;
+}
+
static void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq)
{
sq->txq = netdev_get_tx_queue(sq->channel->netdev, sq->txq_ix);
+ clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state);
set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
netdev_tx_reset_queue(sq->txq);
netif_tx_start_queue(sq->txq);
{
struct mlx5e_channel *c = sq->channel;
struct mlx5_core_dev *mdev = c->mdev;
+ struct mlx5_rate_limit rl = {0};
mlx5e_destroy_sq(mdev, sq->sqn);
- if (sq->rate_limit)
- mlx5_rl_remove_rate(mdev, sq->rate_limit);
+ if (sq->rate_limit) {
+ rl.rate = sq->rate_limit;
+ mlx5_rl_remove_rate(mdev, &rl);
+ }
mlx5e_free_txqsq_descs(sq);
mlx5e_free_txqsq(sq);
}
+static int mlx5e_wait_for_sq_flush(struct mlx5e_txqsq *sq)
+{
+ unsigned long exp_time = jiffies + msecs_to_jiffies(2000);
+
+ while (time_before(jiffies, exp_time)) {
+ if (sq->cc == sq->pc)
+ return 0;
+
+ msleep(20);
+ }
+
+ netdev_err(sq->channel->netdev,
+ "Wait for SQ 0x%x flush timeout (sq cc = 0x%x, sq pc = 0x%x)\n",
+ sq->sqn, sq->cc, sq->pc);
+
+ return -ETIMEDOUT;
+}
+
+static int mlx5e_sq_to_ready(struct mlx5e_txqsq *sq, int curr_state)
+{
+ struct mlx5_core_dev *mdev = sq->channel->mdev;
+ struct net_device *dev = sq->channel->netdev;
+ struct mlx5e_modify_sq_param msp = {0};
+ int err;
+
+ msp.curr_state = curr_state;
+ msp.next_state = MLX5_SQC_STATE_RST;
+
+ err = mlx5e_modify_sq(mdev, sq->sqn, &msp);
+ if (err) {
+ netdev_err(dev, "Failed to move sq 0x%x to reset\n", sq->sqn);
+ return err;
+ }
+
+ memset(&msp, 0, sizeof(msp));
+ msp.curr_state = MLX5_SQC_STATE_RST;
+ msp.next_state = MLX5_SQC_STATE_RDY;
+
+ err = mlx5e_modify_sq(mdev, sq->sqn, &msp);
+ if (err) {
+ netdev_err(dev, "Failed to move sq 0x%x to ready\n", sq->sqn);
+ return err;
+ }
+
+ return 0;
+}
+
+static void mlx5e_sq_recover(struct work_struct *work)
+{
+ struct mlx5e_txqsq_recover *recover =
+ container_of(work, struct mlx5e_txqsq_recover,
+ recover_work);
+ struct mlx5e_txqsq *sq = container_of(recover, struct mlx5e_txqsq,
+ recover);
+ struct mlx5_core_dev *mdev = sq->channel->mdev;
+ struct net_device *dev = sq->channel->netdev;
+ u8 state;
+ int err;
+
+ err = mlx5_core_query_sq_state(mdev, sq->sqn, &state);
+ if (err) {
+ netdev_err(dev, "Failed to query SQ 0x%x state. err = %d\n",
+ sq->sqn, err);
+ return;
+ }
+
+ if (state != MLX5_RQC_STATE_ERR) {
+ netdev_err(dev, "SQ 0x%x not in ERROR state\n", sq->sqn);
+ return;
+ }
+
+ netif_tx_disable_queue(sq->txq);
+
+ if (mlx5e_wait_for_sq_flush(sq))
+ return;
+
+ /* If the interval between two consecutive recovers per SQ is too
+ * short, don't recover to avoid infinite loop of ERR_CQE -> recover.
+ * If we reached this state, there is probably a bug that needs to be
+ * fixed. let's keep the queue close and let tx timeout cleanup.
+ */
+ if (jiffies_to_msecs(jiffies - recover->last_recover) <
+ MLX5E_SQ_RECOVER_MIN_INTERVAL) {
+ netdev_err(dev, "Recover SQ 0x%x canceled, too many error CQEs\n",
+ sq->sqn);
+ return;
+ }
+
+ /* At this point, no new packets will arrive from the stack as TXQ is
+ * marked with QUEUE_STATE_DRV_XOFF. In addition, NAPI cleared all
+ * pending WQEs. SQ can safely reset the SQ.
+ */
+ if (mlx5e_sq_to_ready(sq, state))
+ return;
+
+ mlx5e_reset_txqsq_cc_pc(sq);
+ sq->stats.recover++;
+ recover->last_recover = jiffies;
+ mlx5e_activate_txqsq(sq);
+}
+
static int mlx5e_open_icosq(struct mlx5e_channel *c,
struct mlx5e_params *params,
struct mlx5e_sq_param *param,
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_modify_sq_param msp = {0};
+ struct mlx5_rate_limit rl = {0};
u16 rl_index = 0;
int err;
/* nothing to do */
return 0;
- if (sq->rate_limit)
+ if (sq->rate_limit) {
+ rl.rate = sq->rate_limit;
/* remove current rl index to free space to next ones */
- mlx5_rl_remove_rate(mdev, sq->rate_limit);
+ mlx5_rl_remove_rate(mdev, &rl);
+ }
sq->rate_limit = 0;
if (rate) {
- err = mlx5_rl_add_rate(mdev, rate, &rl_index);
+ rl.rate = rate;
+ err = mlx5_rl_add_rate(mdev, &rl_index, &rl);
if (err) {
netdev_err(dev, "Failed configuring rate %u: %d\n",
rate, err);
rate, err);
/* remove the rate from the table */
if (rate)
- mlx5_rl_remove_rate(mdev, rate);
+ mlx5_rl_remove_rate(mdev, &rl);
return err;
}
struct mlx5e_params *params,
struct mlx5e_rq_param *param)
{
+ struct mlx5_core_dev *mdev = priv->mdev;
void *rqc = param->rqc;
void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
switch (params->rq_wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
- MLX5_SET(wq, wq, log_wqe_num_of_strides, params->mpwqe_log_num_strides - 9);
- MLX5_SET(wq, wq, log_wqe_stride_size, params->mpwqe_log_stride_sz - 6);
+ MLX5_SET(wq, wq, log_wqe_num_of_strides,
+ mlx5e_mpwqe_get_log_num_strides(mdev, params) -
+ MLX5_MPWQE_LOG_NUM_STRIDES_BASE);
+ MLX5_SET(wq, wq, log_wqe_stride_size,
+ mlx5e_mpwqe_get_log_stride_size(mdev, params) -
+ MLX5_MPWQE_LOG_STRIDE_SZ_BASE);
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ);
+ MLX5_SET(wq, wq, log_wq_sz, mlx5e_mpwqe_get_log_rq_size(params));
break;
default: /* MLX5_WQ_TYPE_LINKED_LIST */
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
+ MLX5_SET(wq, wq, log_wq_sz, params->log_rq_mtu_frames);
}
MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe)));
- MLX5_SET(wq, wq, log_wq_sz, params->log_rq_size);
- MLX5_SET(wq, wq, pd, priv->mdev->mlx5e_res.pdn);
+ MLX5_SET(wq, wq, pd, mdev->mlx5e_res.pdn);
MLX5_SET(rqc, rqc, counter_set_id, priv->q_counter);
MLX5_SET(rqc, rqc, vsd, params->vlan_strip_disable);
MLX5_SET(rqc, rqc, scatter_fcs, params->scatter_fcs_en);
- param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
+ param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev);
param->wq.linear = 1;
}
-static void mlx5e_build_drop_rq_param(struct mlx5e_rq_param *param)
+static void mlx5e_build_drop_rq_param(struct mlx5e_priv *priv,
+ struct mlx5e_rq_param *param)
{
+ struct mlx5_core_dev *mdev = priv->mdev;
void *rqc = param->rqc;
void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe)));
+ MLX5_SET(rqc, rqc, counter_set_id, priv->drop_rq_q_counter);
+
+ param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev);
}
static void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
struct mlx5e_params *params,
struct mlx5e_cq_param *param)
{
+ struct mlx5_core_dev *mdev = priv->mdev;
void *cqc = param->cqc;
u8 log_cq_size;
switch (params->rq_wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
- log_cq_size = params->log_rq_size + params->mpwqe_log_num_strides;
+ log_cq_size = mlx5e_mpwqe_get_log_rq_size(params) +
+ mlx5e_mpwqe_get_log_num_strides(mdev, params);
break;
default: /* MLX5_WQ_TYPE_LINKED_LIST */
- log_cq_size = params->log_rq_size;
+ log_cq_size = params->log_rq_mtu_frames;
}
MLX5_SET(cqc, cqc, log_cq_size, log_cq_size);
mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc, true);
}
-static int mlx5e_set_mtu(struct mlx5e_priv *priv, u16 mtu)
+static int mlx5e_set_mtu(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params, u16 mtu)
{
- struct mlx5_core_dev *mdev = priv->mdev;
- u16 hw_mtu = MLX5E_SW2HW_MTU(priv, mtu);
+ u16 hw_mtu = MLX5E_SW2HW_MTU(params, mtu);
int err;
err = mlx5_set_port_mtu(mdev, hw_mtu, 1);
return 0;
}
-static void mlx5e_query_mtu(struct mlx5e_priv *priv, u16 *mtu)
+static void mlx5e_query_mtu(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params, u16 *mtu)
{
- struct mlx5_core_dev *mdev = priv->mdev;
u16 hw_mtu = 0;
int err;
if (err || !hw_mtu) /* fallback to port oper mtu */
mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
- *mtu = MLX5E_HW2SW_MTU(priv, hw_mtu);
+ *mtu = MLX5E_HW2SW_MTU(params, hw_mtu);
}
static int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv)
{
+ struct mlx5e_params *params = &priv->channels.params;
struct net_device *netdev = priv->netdev;
+ struct mlx5_core_dev *mdev = priv->mdev;
u16 mtu;
int err;
- err = mlx5e_set_mtu(priv, netdev->mtu);
+ err = mlx5e_set_mtu(mdev, params, params->sw_mtu);
if (err)
return err;
- mlx5e_query_mtu(priv, &mtu);
- if (mtu != netdev->mtu)
+ mlx5e_query_mtu(mdev, params, &mtu);
+ if (mtu != params->sw_mtu)
netdev_warn(netdev, "%s: VPort MTU %d is different than netdev mtu %d\n",
- __func__, mtu, netdev->mtu);
+ __func__, mtu, params->sw_mtu);
- netdev->mtu = mtu;
+ params->sw_mtu = mtu;
return 0;
}
mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_UP);
mutex_unlock(&priv->state_lock);
+ if (mlx5e_vxlan_allowed(priv->mdev))
+ udp_tunnel_get_rx_info(netdev);
+
return err;
}
struct mlx5e_cq *cq,
struct mlx5e_cq_param *param)
{
+ param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev);
+ param->wq.db_numa_node = dev_to_node(&mdev->pdev->dev);
+
return mlx5e_alloc_cq_common(mdev, param, cq);
}
-static int mlx5e_open_drop_rq(struct mlx5_core_dev *mdev,
+static int mlx5e_open_drop_rq(struct mlx5e_priv *priv,
struct mlx5e_rq *drop_rq)
{
+ struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_cq_param cq_param = {};
struct mlx5e_rq_param rq_param = {};
struct mlx5e_cq *cq = &drop_rq->cq;
int err;
- mlx5e_build_drop_rq_param(&rq_param);
+ mlx5e_build_drop_rq_param(priv, &rq_param);
err = mlx5e_alloc_drop_cq(mdev, cq, &cq_param);
if (err)
if (err)
goto err_free_rq;
+ err = mlx5e_modify_rq_state(drop_rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
+ if (err)
+ mlx5_core_warn(priv->mdev, "modify_rq_state failed, rx_if_down_packets won't be counted %d\n", err);
+
return 0;
err_free_rq:
}
#endif
-int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
- void *type_data)
+static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
+ void *type_data)
{
switch (type) {
#ifdef CONFIG_MLX5_ESWITCH
static int set_feature_lro(struct net_device *netdev, bool enable)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_channels new_channels = {};
+ struct mlx5e_params *old_params;
int err = 0;
bool reset;
mutex_lock(&priv->state_lock);
- reset = (priv->channels.params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST);
- reset = reset && test_bit(MLX5E_STATE_OPENED, &priv->state);
+ old_params = &priv->channels.params;
+ reset = test_bit(MLX5E_STATE_OPENED, &priv->state);
- new_channels.params = priv->channels.params;
+ new_channels.params = *old_params;
new_channels.params.lro_en = enable;
+ if (old_params->rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST) {
+ if (mlx5e_rx_mpwqe_is_linear_skb(mdev, old_params) ==
+ mlx5e_rx_mpwqe_is_linear_skb(mdev, &new_channels.params))
+ reset = false;
+ }
+
if (!reset) {
- priv->channels.params = new_channels.params;
+ *old_params = new_channels.params;
err = mlx5e_modify_tirs_lro(priv);
goto out;
}
netdev_features_t features)
{
netdev_features_t oper_features = netdev->features;
- int err;
+ int err = 0;
- err = mlx5e_handle_feature(netdev, &oper_features, features,
- NETIF_F_LRO, set_feature_lro);
- err |= mlx5e_handle_feature(netdev, &oper_features, features,
- NETIF_F_HW_VLAN_CTAG_FILTER,
+#define MLX5E_HANDLE_FEATURE(feature, handler) \
+ mlx5e_handle_feature(netdev, &oper_features, features, feature, handler)
+
+ err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro);
+ err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_FILTER,
set_feature_cvlan_filter);
- err |= mlx5e_handle_feature(netdev, &oper_features, features,
- NETIF_F_HW_TC, set_feature_tc_num_filters);
- err |= mlx5e_handle_feature(netdev, &oper_features, features,
- NETIF_F_RXALL, set_feature_rx_all);
- err |= mlx5e_handle_feature(netdev, &oper_features, features,
- NETIF_F_RXFCS, set_feature_rx_fcs);
- err |= mlx5e_handle_feature(netdev, &oper_features, features,
- NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan);
+ err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TC, set_feature_tc_num_filters);
+ err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXALL, set_feature_rx_all);
+ err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXFCS, set_feature_rx_fcs);
+ err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan);
#ifdef CONFIG_RFS_ACCEL
- err |= mlx5e_handle_feature(netdev, &oper_features, features,
- NETIF_F_NTUPLE, set_feature_arfs);
+ err |= MLX5E_HANDLE_FEATURE(NETIF_F_NTUPLE, set_feature_arfs);
#endif
if (err) {
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_channels new_channels = {};
- int curr_mtu;
+ struct mlx5e_params *params;
int err = 0;
bool reset;
mutex_lock(&priv->state_lock);
- reset = !priv->channels.params.lro_en &&
- (priv->channels.params.rq_wq_type !=
- MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ);
+ params = &priv->channels.params;
+ reset = !params->lro_en;
reset = reset && test_bit(MLX5E_STATE_OPENED, &priv->state);
- curr_mtu = netdev->mtu;
- netdev->mtu = new_mtu;
+ new_channels.params = *params;
+ new_channels.params.sw_mtu = new_mtu;
+
+ if (params->rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST) {
+ u8 ppw_old = mlx5e_mpwqe_log_pkts_per_wqe(params);
+ u8 ppw_new = mlx5e_mpwqe_log_pkts_per_wqe(&new_channels.params);
+
+ reset = reset && (ppw_old != ppw_new);
+ }
if (!reset) {
+ params->sw_mtu = new_mtu;
mlx5e_set_dev_port_mtu(priv);
+ netdev->mtu = params->sw_mtu;
goto out;
}
- new_channels.params = priv->channels.params;
err = mlx5e_open_channels(priv, &new_channels);
- if (err) {
- netdev->mtu = curr_mtu;
+ if (err)
goto out;
- }
mlx5e_switch_priv_channels(priv, &new_channels, mlx5e_set_dev_port_mtu);
+ netdev->mtu = new_channels.params.sw_mtu;
out:
mutex_unlock(&priv->state_lock);
static bool mlx5e_tx_timeout_eq_recover(struct net_device *dev,
struct mlx5e_txqsq *sq)
{
- struct mlx5e_priv *priv = netdev_priv(dev);
- struct mlx5_core_dev *mdev = priv->mdev;
- int irqn_not_used, eqn;
- struct mlx5_eq *eq;
+ struct mlx5_eq *eq = sq->cq.mcq.eq;
u32 eqe_count;
- if (mlx5_vector2eqn(mdev, sq->cq.mcq.vector, &eqn, &irqn_not_used))
- return false;
-
- eq = mlx5_eqn2eq(mdev, eqn);
- if (IS_ERR(eq))
- return false;
-
netdev_err(dev, "EQ 0x%x: Cons = 0x%x, irqn = 0x%x\n",
- eqn, eq->cons_index, eq->irqn);
+ eq->eqn, eq->cons_index, eq->irqn);
eqe_count = mlx5_eq_poll_irq_disabled(eq);
if (!eqe_count)
return true;
}
-static void mlx5e_tx_timeout(struct net_device *dev)
+static void mlx5e_tx_timeout_work(struct work_struct *work)
{
- struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
+ tx_timeout_work);
+ struct net_device *dev = priv->netdev;
bool reopen_channels = false;
- int i;
+ int i, err;
- netdev_err(dev, "TX timeout detected\n");
+ rtnl_lock();
+ mutex_lock(&priv->state_lock);
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ goto unlock;
for (i = 0; i < priv->channels.num * priv->channels.params.num_tc; i++) {
struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, i);
if (!netif_xmit_stopped(dev_queue))
continue;
- netdev_err(dev, "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x, usecs since last trans: %u\n",
+
+ netdev_err(dev,
+ "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x, usecs since last trans: %u\n",
i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc,
jiffies_to_usecs(jiffies - dev_queue->trans_start));
}
}
- if (reopen_channels && test_bit(MLX5E_STATE_OPENED, &priv->state))
- schedule_work(&priv->tx_timeout_work);
+ if (!reopen_channels)
+ goto unlock;
+
+ mlx5e_close_locked(dev);
+ err = mlx5e_open_locked(dev);
+ if (err)
+ netdev_err(priv->netdev,
+ "mlx5e_open_locked failed recovering from a tx_timeout, err(%d).\n",
+ err);
+
+unlock:
+ mutex_unlock(&priv->state_lock);
+ rtnl_unlock();
+}
+
+static void mlx5e_tx_timeout(struct net_device *dev)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ netdev_err(dev, "TX timeout detected\n");
+ queue_work(priv->wq, &priv->tx_timeout_work);
}
static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
bpf_prog_put(old_prog);
if (reset) /* change RQ type according to priv->xdp_prog */
- mlx5e_set_rq_params(priv->mdev, &priv->channels.params);
+ mlx5e_set_rq_type(priv->mdev, &priv->channels.params);
if (was_opened && reset)
mlx5e_open_locked(netdev);
return 0;
}
-u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev)
-{
- int bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
-
- return bf_buf_size -
- sizeof(struct mlx5e_tx_wqe) +
- 2 /*sizeof(mlx5e_tx_wqe.inline_hdr_start)*/;
-}
-
void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
int num_channels)
{
indirection_rqt[i] = i % num_channels;
}
-static bool cqe_compress_heuristic(u32 link_speed, u32 pci_bw)
+static bool slow_pci_heuristic(struct mlx5_core_dev *mdev)
{
- return (link_speed && pci_bw &&
- (pci_bw < 40000) && (pci_bw < link_speed));
-}
+ u32 link_speed = 0;
+ u32 pci_bw = 0;
-static bool hw_lro_heuristic(u32 link_speed, u32 pci_bw)
-{
- return !(link_speed && pci_bw &&
- (pci_bw <= 16000) && (pci_bw < link_speed));
+ mlx5e_get_max_linkspeed(mdev, &link_speed);
+ pci_bw = pcie_bandwidth_available(mdev->pdev, NULL, NULL, NULL);
+ mlx5_core_dbg_once(mdev, "Max link speed = %d, PCI BW = %d\n",
+ link_speed, pci_bw);
+
+#define MLX5E_SLOW_PCI_RATIO (2)
+
+ return link_speed && pci_bw &&
+ link_speed > MLX5E_SLOW_PCI_RATIO * pci_bw;
}
void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
}
-u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout)
+static u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout)
{
int i;
void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
- u16 max_channels)
+ u16 max_channels, u16 mtu)
{
- u8 cq_period_mode = 0;
- u32 link_speed = 0;
- u32 pci_bw = 0;
+ u8 rx_cq_period_mode;
+ params->sw_mtu = mtu;
+ params->hard_mtu = MLX5E_ETH_HARD_MTU;
params->num_channels = max_channels;
params->num_tc = 1;
- mlx5e_get_max_linkspeed(mdev, &link_speed);
- pci_bw = pcie_bandwidth_available(mdev->pdev, NULL, NULL, NULL);
- mlx5_core_dbg(mdev, "Max link speed = %d, PCI BW = %d\n",
- link_speed, pci_bw);
-
/* SQ */
params->log_sq_size = is_kdump_kernel() ?
MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE :
params->rx_cqe_compress_def = false;
if (MLX5_CAP_GEN(mdev, cqe_compression) &&
MLX5_CAP_GEN(mdev, vport_group_manager))
- params->rx_cqe_compress_def = cqe_compress_heuristic(link_speed, pci_bw);
+ params->rx_cqe_compress_def = slow_pci_heuristic(mdev);
MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS, params->rx_cqe_compress_def);
/* RQ */
- mlx5e_set_rq_params(mdev, params);
+ if (mlx5e_striding_rq_possible(mdev, params))
+ MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ,
+ !slow_pci_heuristic(mdev));
+ mlx5e_set_rq_type(mdev, params);
+ mlx5e_init_rq_type_params(mdev, params);
/* HW LRO */
/* TODO: && MLX5_CAP_ETH(mdev, lro_cap) */
if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
- params->lro_en = hw_lro_heuristic(link_speed, pci_bw);
+ if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params))
+ params->lro_en = !slow_pci_heuristic(mdev);
params->lro_timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
/* CQ moderation params */
- cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
+ rx_cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
- mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
- mlx5e_set_tx_cq_mode_params(params, cq_period_mode);
+ mlx5e_set_rx_cq_mode_params(params, rx_cq_period_mode);
+ mlx5e_set_tx_cq_mode_params(params, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
/* TX inline */
- params->tx_max_inline = mlx5e_get_max_inline_cap(mdev);
params->tx_min_inline_mode = mlx5e_params_calculate_tx_min_inline(mdev);
/* RSS */
priv->profile = profile;
priv->ppriv = ppriv;
priv->msglevel = MLX5E_MSG_LEVEL;
- priv->hard_mtu = MLX5E_ETH_HARD_MTU;
- mlx5e_build_nic_params(mdev, &priv->channels.params, profile->max_nch(mdev));
+ mlx5e_build_nic_params(mdev, &priv->channels.params,
+ profile->max_nch(mdev), netdev->mtu);
mutex_init(&priv->state_lock);
}
}
-#if IS_ENABLED(CONFIG_NET_SWITCHDEV) && IS_ENABLED(CONFIG_MLX5_ESWITCH)
+#if IS_ENABLED(CONFIG_MLX5_ESWITCH)
static const struct switchdev_ops mlx5e_switchdev_ops = {
.switchdev_port_attr_get = mlx5e_attr_get,
};
netdev->vlan_features |= NETIF_F_RXCSUM;
netdev->vlan_features |= NETIF_F_RXHASH;
+ netdev->hw_enc_features |= NETIF_F_HW_VLAN_CTAG_TX;
+ netdev->hw_enc_features |= NETIF_F_HW_VLAN_CTAG_RX;
+
if (!!MLX5_CAP_ETH(mdev, lro_cap))
netdev->vlan_features |= NETIF_F_LRO;
mlx5e_set_netdev_dev_addr(netdev);
-#if IS_ENABLED(CONFIG_NET_SWITCHDEV) && IS_ENABLED(CONFIG_MLX5_ESWITCH)
+#if IS_ENABLED(CONFIG_MLX5_ESWITCH)
if (MLX5_VPORT_MANAGER(mdev))
netdev->switchdev_ops = &mlx5e_switchdev_ops;
#endif
mlx5e_ipsec_build_netdev(priv);
}
-static void mlx5e_create_q_counter(struct mlx5e_priv *priv)
+static void mlx5e_create_q_counters(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
int err;
mlx5_core_warn(mdev, "alloc queue counter failed, %d\n", err);
priv->q_counter = 0;
}
+
+ err = mlx5_core_alloc_q_counter(mdev, &priv->drop_rq_q_counter);
+ if (err) {
+ mlx5_core_warn(mdev, "alloc drop RQ counter failed, %d\n", err);
+ priv->drop_rq_q_counter = 0;
+ }
}
-static void mlx5e_destroy_q_counter(struct mlx5e_priv *priv)
+static void mlx5e_destroy_q_counters(struct mlx5e_priv *priv)
{
- if (!priv->q_counter)
- return;
+ if (priv->q_counter)
+ mlx5_core_dealloc_q_counter(priv->mdev, priv->q_counter);
- mlx5_core_dealloc_q_counter(priv->mdev, priv->q_counter);
+ if (priv->drop_rq_q_counter)
+ mlx5_core_dealloc_q_counter(priv->mdev, priv->drop_rq_q_counter);
}
static void mlx5e_nic_init(struct mlx5_core_dev *mdev,
/* MTU range: 68 - hw-specific max */
netdev->min_mtu = ETH_MIN_MTU;
mlx5_query_port_max_mtu(priv->mdev, &max_mtu, 1);
- netdev->max_mtu = MLX5E_HW2SW_MTU(priv, max_mtu);
+ netdev->max_mtu = MLX5E_HW2SW_MTU(&priv->channels.params, max_mtu);
mlx5e_set_dev_port_mtu(priv);
mlx5_lag_add(mdev, netdev);
#ifdef CONFIG_MLX5_CORE_EN_DCB
mlx5e_dcbnl_init_app(priv);
#endif
- /* Device already registered: sync netdev system state */
- if (mlx5e_vxlan_allowed(mdev)) {
- rtnl_lock();
- udp_tunnel_get_rx_info(netdev);
- rtnl_unlock();
- }
queue_work(priv->wq, &priv->set_rx_mode_work);
if (err)
goto out;
- err = mlx5e_open_drop_rq(mdev, &priv->drop_rq);
+ mlx5e_create_q_counters(priv);
+
+ err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
if (err) {
mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
- goto err_cleanup_tx;
+ goto err_destroy_q_counters;
}
err = profile->init_rx(priv);
if (err)
goto err_close_drop_rq;
- mlx5e_create_q_counter(priv);
-
if (profile->enable)
profile->enable(priv);
err_close_drop_rq:
mlx5e_close_drop_rq(&priv->drop_rq);
-err_cleanup_tx:
+err_destroy_q_counters:
+ mlx5e_destroy_q_counters(priv);
profile->cleanup_tx(priv);
out:
profile->disable(priv);
flush_workqueue(priv->wq);
- mlx5e_destroy_q_counter(priv);
profile->cleanup_rx(priv);
mlx5e_close_drop_rq(&priv->drop_rq);
+ mlx5e_destroy_q_counters(priv);
profile->cleanup_tx(priv);
cancel_delayed_work_sync(&priv->update_stats_work);
}