ipoib_warn(priv, "mtu > %d will cause multicast packet drops.\n",
priv->mcast_mtu);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
if (carrier_status)
netif_carrier_on(dev);
} else {
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
}
return ret;
}
}
- bond_dev->mtu = new_mtu;
+ WRITE_ONCE(bond_dev->mtu, new_mtu);
return 0;
return -EINVAL;
}
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
EXPORT_SYMBOL_GPL(can_change_mtu);
!can_is_canxl_dev_mtu(new_mtu))
return -EINVAL;
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
!can_is_canxl_dev_mtu(new_mtu))
return -EINVAL;
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
et131x_disable_txrx(netdev);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
et131x_adapter_memory_free(adapter);
struct ace_regs __iomem *regs = ap->regs;
writel(new_mtu + ETH_HLEN + 4, ®s->IfMtu);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
if (new_mtu > ACE_STD_MTU) {
if (!(ap->jumbo)) {
return -EBUSY;
}
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
netdev_update_features(dev);
return 0;
if (!ret) {
netif_dbg(adapter, drv, dev, "Set MTU to %d\n", new_mtu);
update_rx_ring_mtu(adapter, new_mtu);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
} else {
netif_err(adapter, drv, dev, "Failed to set MTU to %d\n",
new_mtu);
if (!netif_running(dev)) {
/* new_mtu will be used
- * when device starts netxt time
+ * when device starts next time
*/
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
/* stop the chip */
writel(RUN, lp->mmio + CMD0);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
err = amd8111e_restart(dev);
spin_unlock_irq(&lp->lock);
return ret;
pdata->rx_buf_size = ret;
- netdev->mtu = mtu;
+ WRITE_ONCE(netdev->mtu, mtu);
xgbe_restart_dev(pdata);
frame_size = (new_mtu > ETH_DATA_LEN) ? (new_mtu + 18) : 0x600;
xgene_enet_close(ndev);
- ndev->mtu = new_mtu;
+ WRITE_ONCE(ndev->mtu, new_mtu);
pdata->mac_ops->set_framesize(pdata, frame_size);
xgene_enet_open(ndev);
if (err < 0)
goto err_exit;
- ndev->mtu = new_mtu;
+ WRITE_ONCE(ndev->mtu, new_mtu);
err_exit:
return err;
{
struct ag71xx *ag = netdev_priv(ndev);
- ndev->mtu = new_mtu;
+ WRITE_ONCE(ndev->mtu, new_mtu);
ag71xx_wr(ag, AG71XX_REG_MAC_MFL,
ag71xx_max_frame_len(ndev->mtu));
struct alx_priv *alx = netdev_priv(netdev);
int max_frame = ALX_MAX_FRAME_LEN(mtu);
- netdev->mtu = mtu;
+ WRITE_ONCE(netdev->mtu, mtu);
alx->hw.mtu = mtu;
alx->rxbuf_size = max(max_frame, ALX_DEF_RXBUF_SIZE);
netdev_update_features(netdev);
if (netif_running(netdev)) {
while (test_and_set_bit(__AT_RESETTING, &adapter->flags))
msleep(1);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
adapter->hw.max_frame_size = new_mtu;
atl1c_set_rxbufsize(adapter, netdev);
atl1c_down(adapter);
if (netif_running(netdev)) {
while (test_and_set_bit(__AT_RESETTING, &adapter->flags))
msleep(1);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
adapter->hw.max_frame_size = new_mtu;
adapter->hw.rx_jumbo_th = (max_frame + 7) >> 3;
atl1e_down(adapter);
adapter->rx_buffer_len = (max_frame + 7) & ~7;
adapter->hw.rx_jumbo_th = adapter->rx_buffer_len / 8;
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
if (netif_running(netdev)) {
atl1_down(adapter);
atl1_up(adapter);
struct atl2_hw *hw = &adapter->hw;
/* set MTU */
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
hw->max_frame_size = new_mtu;
ATL2_WRITE_REG(hw, REG_MTU, new_mtu + ETH_HLEN +
VLAN_HLEN + ETH_FCS_LEN);
/* We'll just catch it later when the
* device is up'd.
*/
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
spin_lock_irq(&bp->lock);
b44_halt(bp);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
b44_init_rings(bp);
b44_init_hw(bp, B44_FULL_RESET);
spin_unlock_irq(&bp->lock);
priv->rx_frag_size = SKB_DATA_ALIGN(priv->rx_buf_offset + priv->rx_buf_size) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
{
struct bnx2 *bp = netdev_priv(dev);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size,
false);
}
* because the actual alloc size is
* only updated as part of load
*/
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
if (!bnx2x_mtu_allows_gro(new_mtu))
dev->features &= ~NETIF_F_GRO_HW;
if (netif_running(dev))
bnxt_close_nic(bp, true, false);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
bnxt_set_ring_params(bp);
if (netif_running(dev))
static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
int new_mtu)
{
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
if (new_mtu > ETH_DATA_LEN) {
if (tg3_flag(tp, 5780_CLASS)) {
mutex_lock(&bnad->conf_mutex);
mtu = netdev->mtu;
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
frame = BNAD_FRAME_SIZE(mtu);
new_frame = BNAD_FRAME_SIZE(new_mtu);
if (netif_running(dev))
return -EBUSY;
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
/* Bring interface down, change mtu and bring interface back up */
xgmac_stop(dev);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return xgmac_open(dev);
}
return -EINVAL;
}
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
lio->mtu = new_mtu;
WRITE_ONCE(sc->caller_is_done, true);
return -EIO;
}
- ndev->mtu = new_mtu;
+ WRITE_ONCE(ndev->mtu, new_mtu);
return 0;
}
struct octeon_mgmt *p = netdev_priv(netdev);
int max_packet = new_mtu + ETH_HLEN + ETH_FCS_LEN;
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
/* HW lifts the limit if the frame is VLAN tagged
* (+4 bytes per each tag, up to two tags)
return -EINVAL;
}
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
if (!netif_running(netdev))
return 0;
return -EOPNOTSUPP;
if ((ret = mac->ops->set_mtu(mac, new_mtu)))
return ret;
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
return ret;
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
init_port_mtus(adapter);
if (adapter->params.rev == 0 && offload_running(adapter))
t3_load_mtus(adapter, adapter->params.mtus,
ret = t4_set_rxmode(pi->adapter, pi->adapter->mbox, pi->viid,
pi->viid_mirror, new_mtu, -1, -1, -1, -1, true);
if (!ret)
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return ret;
}
ret = t4vf_set_rxmode(pi->adapter, pi->viid, new_mtu,
-1, -1, -1, -1, true);
if (!ret)
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return ret;
}
return err;
}
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
if (running) {
err = enic_open(netdev);
gmac_disable_tx_rx(netdev);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
gmac_update_config0_reg(netdev, max_len << CONFIG0_MAXLEN_SHIFT,
CONFIG0_MAXLEN_MASK);
{
if (netif_running(dev))
return -EBUSY;
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
}
iowrite32(maccr, priv->base + FTMAC100_OFFSET_MACCR);
- netdev->mtu = mtu;
+ WRITE_ONCE(netdev->mtu, mtu);
return 0;
}
if (priv->xdp_prog && !xdp_validate_mtu(priv, new_mtu))
return -EINVAL;
- net_dev->mtu = new_mtu;
+ WRITE_ONCE(net_dev->mtu, new_mtu);
return 0;
}
return err;
out:
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
return err;
}
- netdev->mtu = mtu;
+ WRITE_ONCE(netdev->mtu, mtu);
return 0;
}
if (dev->flags & IFF_UP)
stop_gfar(dev);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
if (dev->flags & IFF_UP)
startup_gfar(dev);
rc = fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_MTU, new_mtu);
if (!rc)
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
return rc;
}
}
/* finally, set new mtu to netdevice */
- ndev->mtu = new_mtu;
+ WRITE_ONCE(ndev->mtu, new_mtu);
out:
if (if_running) {
netdev_err(netdev, "failed to change MTU in hardware %d\n",
ret);
else
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
return ret;
}
if (err)
netif_err(nic_dev, drv, netdev, "Failed to set port mtu\n");
else
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
return err;
}
/* This is to prevent starting RX channel in emac_rx_enable() */
set_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
- dev->ndev->mtu = new_mtu;
+ WRITE_ONCE(dev->ndev->mtu, new_mtu);
emac_full_tx_reset(dev);
}
}
if (!ret) {
- ndev->mtu = new_mtu;
+ WRITE_ONCE(ndev->mtu, new_mtu);
dev->rx_skb_size = emac_rx_skb_size(new_mtu);
dev->rx_sync_size = emac_rx_sync_size(new_mtu);
}
adapter->rx_buff_pool[i].active = 1;
if (new_mtu_oh <= adapter->rx_buff_pool[i].buff_size) {
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
vio_cmo_set_dev_desired(viodev,
ibmveth_get_desired_dma
(viodev));
netdev_dbg(netdev, "changing MTU from %d to %d\n",
netdev->mtu, new_mtu);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
if (netif_running(netdev))
e1000_up(adapter);
adapter->max_frame_size = max_frame;
netdev_dbg(netdev, "changing MTU from %d to %d\n",
netdev->mtu, new_mtu);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
pm_runtime_get_sync(netdev->dev.parent);
netdev_dbg(netdev, "changing MTU from %d to %d\n",
netdev->mtu, new_mtu);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
if (netif_running(netdev))
i40e_vsi_reinit_locked(vsi);
set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
netdev_dbg(netdev, "changing MTU from %d to %d\n",
netdev->mtu, new_mtu);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
if (netif_running(netdev)) {
iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED);
return -EBUSY;
}
- netdev->mtu = (unsigned int)new_mtu;
+ WRITE_ONCE(netdev->mtu, (unsigned int)new_mtu);
err = ice_down_up(vsi);
if (err)
return err;
idpf_vport_ctrl_lock(netdev);
vport = idpf_netdev_to_vport(netdev);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
err = idpf_initiate_soft_reset(vport, IDPF_SR_MTU_CHANGE);
netdev_dbg(netdev, "changing MTU from %d to %d\n",
netdev->mtu, new_mtu);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
if (netif_running(netdev))
igb_up(adapter);
netdev_dbg(netdev, "changing MTU from %d to %d\n",
netdev->mtu, new_mtu);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
if (netif_running(netdev))
igbvf_up(adapter);
igc_down(adapter);
netdev_dbg(netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
if (netif_running(netdev))
igc_up(adapter);
netdev->mtu, new_mtu);
/* must set new MTU before calling down or up */
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
if (netif_running(netdev))
ixgbe_reinit_locked(adapter);
netdev->mtu, new_mtu);
/* must set new MTU before calling down or up */
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
if (netif_running(netdev))
ixgbevf_reinit_locked(adapter);
{
struct jme_adapter *jme = netdev_priv(netdev);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
netdev_update_features(netdev);
jme_restart_rx_engine(jme);
struct ltq_etop_priv *priv = netdev_priv(dev);
unsigned long flags;
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
spin_lock_irqsave(&priv->lock, flags);
ltq_etop_w32((ETOP_PLEN_UNDER << 16) | new_mtu, LTQ_ETOP_IGPLEN);
int curr_desc;
int ret = 0;
- net_dev->mtu = new_mtu;
+ WRITE_ONCE(net_dev->mtu, new_mtu);
priv->rx_buf_size = xrx200_buffer_size(new_mtu);
priv->rx_skb_size = xrx200_skb_size(priv->rx_buf_size);
buff = ch_rx->rx_buff[ch_rx->dma.desc];
ret = xrx200_alloc_buf(ch_rx, netdev_alloc_frag);
if (ret) {
- net_dev->mtu = old_mtu;
+ WRITE_ONCE(net_dev->mtu, old_mtu);
priv->rx_buf_size = xrx200_buffer_size(old_mtu);
priv->rx_skb_size = xrx200_skb_size(priv->rx_buf_size);
break;
{
struct mv643xx_eth_private *mp = netdev_priv(dev);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
mv643xx_eth_recalc_skb_size(mp);
tx_set_rate(mp, 1000000000, 16777216);
return -EINVAL;
}
- dev->mtu = mtu;
+ WRITE_ONCE(dev->mtu, mtu);
if (!netif_running(dev)) {
if (pp->bm_priv)
}
out_set:
- dev->mtu = mtu;
+ WRITE_ONCE(dev->mtu, mtu);
dev->wanted_features = dev->features;
netdev_update_features(dev);
true);
if (!err) {
oct->link_info.mtu = new_mtu;
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
}
return err;
err = octep_vf_mbox_set_mtu(oct, new_mtu);
if (!err) {
oct->link_info.mtu = new_mtu;
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
}
return err;
}
netdev_info(netdev, "Changing MTU from %d to %d\n",
netdev->mtu, new_mtu);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
if (if_up)
err = otx2_open(netdev);
netdev_info(netdev, "Changing MTU from %d to %d\n",
netdev->mtu, new_mtu);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
if (if_up)
err = otx2vf_open(netdev);
if (err)
return err;
- dev->mtu = mtu;
+ WRITE_ONCE(dev->mtu, mtu);
return 0;
}
{
struct pxa168_eth_private *pep = netdev_priv(dev);
- dev->mtu = mtu;
+ WRITE_ONCE(dev->mtu, mtu);
set_port_config_ext(pep);
if (!netif_running(dev))
int err;
if (!netif_running(dev)) {
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
skge_down(dev);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
err = skge_up(dev);
if (err)
u32 imask;
if (!netif_running(dev)) {
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
netdev_update_features(dev);
return 0;
}
sky2_rx_stop(sky2);
sky2_rx_clean(sky2);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
netdev_update_features(dev);
mode = DATA_BLIND_VAL(DATA_BLIND_DEF) | GM_SMOD_VLAN_ENA;
}
mtk_set_mcr_max_rx(mac, length);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
sizeof(struct ethtool_flow_id) * MAX_NUM_OF_FS_RULES);
/* Calculate Rx buf size */
- dev->mtu = min(dev->mtu, priv->max_mtu);
+ WRITE_ONCE(dev->mtu, min(dev->mtu, priv->max_mtu));
mlx4_en_calc_rx_buf(dev);
en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size);
!mlx4_en_check_xdp_mtu(dev, new_mtu))
return -EOPNOTSUPP;
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
if (netif_running(dev)) {
mutex_lock(&mdev->state_lock);
err = mlx5e_safe_switch_params(priv, &new_params, preactivate, NULL, reset);
out:
- netdev->mtu = params->sw_mtu;
+ WRITE_ONCE(netdev->mtu, params->sw_mtu);
mutex_unlock(&priv->state_lock);
return err;
}
if (err)
goto out;
- netdev->mtu = new_params.sw_mtu;
+ WRITE_ONCE(netdev->mtu, new_params.sw_mtu);
out:
mutex_unlock(&priv->state_lock);
struct mlx5e_priv *priv = mlx5i_epriv(netdev);
mutex_lock(&priv->state_lock);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
mutex_unlock(&priv->state_lock);
return 0;
err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
if (err)
goto err_port_mtu_set;
- dev->mtu = mtu;
+ WRITE_ONCE(dev->mtu, mtu);
return 0;
err_port_mtu_set:
}
hw_mtu = (hw_mtu + 3) & ~3;
hw_priv->mtu = hw_mtu;
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
ret = lan743x_mac_set_mtu(adapter, new_mtu);
if (!ret)
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
return ret;
}
lan_wr(DEV_MAC_MAXLEN_CFG_MAX_LEN_SET(LAN966X_HW_MTU(new_mtu)),
lan966x, DEV_MAC_MAXLEN_CFG(port->chip_port));
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
if (!lan966x->fdma)
return 0;
goto out;
}
- ndev->mtu = new_mtu;
+ WRITE_ONCE(ndev->mtu, new_mtu);
err = mana_attach(ndev);
if (err) {
netdev_err(ndev, "mana_attach failed: %d\n", err);
- ndev->mtu = old_mtu;
+ WRITE_ONCE(ndev->mtu, old_mtu);
}
out:
/* if we change the mtu on an active device, we must
* reset the device so the firmware sees the change */
myri10ge_close(dev);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
myri10ge_open(dev);
- } else
- dev->mtu = new_mtu;
-
+ } else {
+ WRITE_ONCE(dev->mtu, new_mtu);
+ }
return 0;
}
static int natsemi_change_mtu(struct net_device *dev, int new_mtu)
{
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
/* synchronized against open : rtnl_lock() held by caller */
if (netif_running(dev)) {
struct s2io_nic *sp = netdev_priv(dev);
int ret = 0;
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
if (netif_running(dev)) {
s2io_stop_all_tx_queue(sp);
s2io_card_down(sp);
*dp = nn->dp;
nn->dp = new_dp;
- nn->dp.netdev->mtu = new_dp.mtu;
+ WRITE_ONCE(nn->dp.netdev->mtu, new_dp.mtu);
if (!netif_is_rxfh_configured(nn->dp.netdev))
nfp_net_rss_init_itbl(nn);
if (err)
return err;
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
return 0;
}
NIXGE_MAX_JUMBO_FRAME_SIZE)
return -EINVAL;
- ndev->mtu = new_mtu;
+ WRITE_ONCE(ndev->mtu, new_mtu);
return 0;
}
int old_mtu;
old_mtu = dev->mtu;
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
/* return early if the buffer sizes will not change */
if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN)
}
} else {
pch_gbe_reset(adapter);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
adapter->hw.mac.max_frame_size = max_frame;
}
reg |= PAS_MAC_CFG_MACCFG_MAXF(new_mtu + ETH_HLEN + 4);
write_mac_reg(mac, PAS_MAC_CFG_MACCFG, reg);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
/* MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */
mac->bufsz = new_mtu + ETH_HLEN + ETH_FCS_LEN + LOCAL_SKB_ALIGN + 128;
/* if we're not running, nothing more to do */
if (!netif_running(netdev)) {
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
return 0;
}
mutex_lock(&lif->queue_lock);
ionic_stop_queues_reconfig(lif);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
err = ionic_start_queues_reconfig(lif);
mutex_unlock(&lif->queue_lock);
rc = adapter->set_mtu(adapter, mtu);
if (!rc)
- netdev->mtu = mtu;
+ WRITE_ONCE(netdev->mtu, mtu);
return rc;
}
static void qede_update_mtu(struct qede_dev *edev,
struct qede_reload_args *args)
{
- edev->ndev->mtu = args->u.mtu;
+ WRITE_ONCE(edev->ndev->mtu, args->u.mtu);
}
/* Netdevice NDOs */
rc = qlcnic_fw_cmd_set_mtu(adapter, mtu);
if (!rc)
- netdev->mtu = mtu;
+ WRITE_ONCE(netdev->mtu, mtu);
return rc;
}
netif_dbg(adpt, hw, adpt->netdev,
"changing MTU from %d to %d\n", netdev->mtu,
new_mtu);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
if (netif_running(netdev))
return emac_reinit_locked(adpt);
new_mtu > (priv->real_dev->mtu - headroom))
return -EINVAL;
- rmnet_dev->mtu = new_mtu;
+ WRITE_ONCE(rmnet_dev->mtu, new_mtu);
return 0;
}
/* if network interface not up, no need for complexity */
if (!netif_running(dev)) {
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
cp_set_rxbufsize(cp); /* set new rx buf size */
return 0;
}
/* network IS up, close it, reset MTU, and come up again. */
cp_close(dev);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
cp_set_rxbufsize(cp);
return cp_open(dev);
}
{
struct rtl8169_private *tp = netdev_priv(dev);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
netdev_update_features(dev);
rtl_jumbo_config(tp);
rtl_set_eee_txidle_timer(tp);
{
struct ravb_private *priv = netdev_priv(ndev);
- ndev->mtu = new_mtu;
+ WRITE_ONCE(ndev->mtu, new_mtu);
if (netif_running(ndev)) {
synchronize_irq(priv->emac_irq);
if (netif_running(ndev))
return -EBUSY;
- ndev->mtu = new_mtu;
+ WRITE_ONCE(ndev->mtu, new_mtu);
netdev_update_features(ndev);
return 0;
rocker_port_stop(dev);
netdev_info(dev, "MTU change from %d to %d\n", dev->mtu, new_mtu);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
err = rocker_cmd_set_port_settings_mtu(rocker_port, new_mtu);
if (err)
*/
static int sxgbe_change_mtu(struct net_device *dev, int new_mtu)
{
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
if (!netif_running(dev))
return 0;
efx_stop_all(efx);
mutex_lock(&efx->mac_lock);
- net_dev->mtu = new_mtu;
+ WRITE_ONCE(net_dev->mtu, new_mtu);
efx_mac_reconfigure(efx, true);
mutex_unlock(&efx->mac_lock);
ef4_stop_all(efx);
mutex_lock(&efx->mac_lock);
- net_dev->mtu = new_mtu;
+ WRITE_ONCE(net_dev->mtu, new_mtu);
ef4_mac_reconfigure(efx);
mutex_unlock(&efx->mac_lock);
efx_siena_stop_all(efx);
mutex_lock(&efx->mac_lock);
- net_dev->mtu = new_mtu;
+ WRITE_ONCE(net_dev->mtu, new_mtu);
efx_siena_mac_reconfigure(efx, true);
mutex_unlock(&efx->mac_lock);
stmmac_set_rx_mode(dev);
}
- dev->mtu = mtu;
+ WRITE_ONCE(dev->mtu, mtu);
netdev_update_features(dev);
return 0;
{
struct cas *cp = netdev_priv(dev);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
if (!netif_running(dev) || !netif_device_present(dev))
return 0;
orig_jumbo = (dev->mtu > ETH_DATA_LEN);
new_jumbo = (new_mtu > ETH_DATA_LEN);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
if (!netif_running(dev) ||
(orig_jumbo == new_jumbo))
{
struct gem *gp = netdev_priv(dev);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
/* We'll just catch it later when the device is up'd or resumed */
if (!netif_running(dev) || !netif_device_present(dev))
return ret;
pdata->rx_buf_size = ret;
- netdev->mtu = mtu;
+ WRITE_ONCE(netdev->mtu, mtu);
xlgmac_restart_dev(pdata);
{
ENTER;
- ndev->mtu = new_mtu;
+ WRITE_ONCE(ndev->mtu, new_mtu);
if (netif_running(ndev)) {
bdx_close(ndev);
bdx_open(ndev);
int ret = 0;
if (!netif_running(dev)) {
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
goto out_0;
}
tmp_vptr->rx = rx;
tmp_vptr->tx = tx;
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
velocity_init_registers(vptr, VELOCITY_INIT_COLD);
{
struct wx *wx = netdev_priv(netdev);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
wx_set_rx_buffer_len(wx);
return 0;
XAE_TRL_SIZE) > lp->rxmem)
return -EINVAL;
- ndev->mtu = new_mtu;
+ WRITE_ONCE(ndev->mtu, new_mtu);
return 0;
}
return ret;
}
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
netif_tx_stop_all_queues(netdev);
}
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
if (running) {
for (epidx = 0; epidx < hw->max_epid; epidx++) {
else if (new_mtu < dev->min_mtu)
new_mtu = dev->min_mtu;
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
if (ret)
goto rollback_vf;
- ndev->mtu = mtu;
+ WRITE_ONCE(ndev->mtu, mtu);
ret = netvsc_attach(ndev, device_info);
if (!ret)
goto out;
/* Attempt rollback to original MTU */
- ndev->mtu = orig_mtu;
+ WRITE_ONCE(ndev->mtu, orig_mtu);
if (netvsc_attach(ndev, device_info))
netdev_err(ndev, "restoring mtu failed\n");
if (macsec->real_dev->mtu - extra < new_mtu)
return -ERANGE;
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
if (vlan->lowerdev->mtu < new_mtu)
return -EINVAL;
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
}
}
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
if (ns->xdp.prog && new_mtu > NSIM_XDP_MAX_MTU)
return -EBUSY;
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
return -EINVAL;
if (!netif_running(ndev)) {
- ndev->mtu = new_mtu;
+ WRITE_ONCE(ndev->mtu, new_mtu);
return 0;
}
}
}
- ndev->mtu = new_mtu;
+ WRITE_ONCE(ndev->mtu, new_mtu);
ntb_transport_link_up(dev->qp);
}
}
sl->mtu = mtu;
- dev->mtu = mtu;
+ WRITE_ONCE(dev->mtu, mtu);
sl->buffsize = len;
err = 0;
team->port_mtu_change_allowed = false;
mutex_unlock(&team->lock);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
u16 reg16 = 0;
u8 buf[5];
- net->mtu = new_mtu;
+ WRITE_ONCE(net->mtu, new_mtu);
dev->hard_mtu = net->mtu + net->hard_header_len;
aqc111_read16_cmd(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE,
if ((ll_mtu % dev->maxpacket) == 0)
return -EDOM;
- net->mtu = new_mtu;
+ WRITE_ONCE(net->mtu, new_mtu);
dev->hard_mtu = net->mtu + net->hard_header_len;
ax88178_set_mfb(dev);
struct usbnet *dev = netdev_priv(net);
u16 tmp16;
- net->mtu = new_mtu;
+ WRITE_ONCE(net->mtu, new_mtu);
dev->hard_mtu = net->mtu + net->hard_header_len;
if (net->mtu > 1500) {
{
struct usbnet *dev = netdev_priv(net);
- net->mtu = new_mtu;
+ WRITE_ONCE(net->mtu, new_mtu);
cdc_ncm_set_dgram_size(dev, new_mtu + cdc_ncm_eth_hlen(dev));
return 0;
ret = lan78xx_set_rx_max_frame_length(dev, max_frame_len);
if (!ret)
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
usb_autopm_put_interface(dev->intf);
case RTL_VER_01:
case RTL_VER_02:
case RTL_VER_07:
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
default:
break;
mutex_lock(&tp->control);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
if (netif_running(dev)) {
if (tp->rtl_ops.change_mtu)
// no second zero-length packet read wanted after mtu-sized packets
if ((ll_mtu % dev->maxpacket) == 0)
return -EDOM;
- net->mtu = new_mtu;
+ WRITE_ONCE(net->mtu, new_mtu);
dev->hard_mtu = net->mtu + net->hard_header_len;
if (dev->rx_urb_size == old_hard_mtu) {
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
int err = 0;
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
/*
* Reset_work may be in the middle of resetting the device, wait for its
if (!vsockmon_is_valid_mtu(new_mtu))
return -EINVAL;
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
return -EINVAL;
}
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
if (mtu > max)
return -EINVAL;
- dev->mtu = mtu;
+ WRITE_ONCE(dev->mtu, mtu);
return 0;
}
if (mtu > max)
return -EINVAL;
- dev->mtu = mtu;
+ WRITE_ONCE(dev->mtu, mtu);
return 0;
}
return -EINVAL;
dev->hard_header_len = LL_HEADER_LENGTH + 2;
}
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
if (max_mtu < new_mtu)
return -ERANGE;
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
if (new_mtu < ETH_MIN_MTU || new_mtu > batadv_hardif_min_mtu(dev))
return -EINVAL;
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
bat_priv->mtu_set_by_user = new_mtu;
return 0;
{
struct net_bridge *br = netdev_priv(dev);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
/* this flag will be cleared if the MTU was automatically adjusted */
br_opt_toggle(br, BROPT_MTU_SET_BY_USER, true);
if (err)
goto out_port_failed;
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
dsa_bridge_mtu_normalization(dp);
return -EINVAL;
}
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
break; /* Handled in ndo_change_mtu() */
mtu_max = hsr_get_max_mtu(port->hsr);
master = hsr_port_get_hsr(port->hsr, HSR_PT_MASTER);
- master->dev->mtu = mtu_max;
+ WRITE_ONCE(master->dev->mtu, mtu_max);
break;
case NETDEV_UNREGISTER:
if (!is_hsr_master(dev)) {
dev->needed_headroom += len;
if (set_mtu)
- dev->mtu = max_t(int, dev->mtu - len, 68);
+ WRITE_ONCE(dev->mtu, max_t(int, dev->mtu - len, 68));
if (test_bit(IP_TUNNEL_SEQ_BIT, tunnel->parms.o_flags) ||
(test_bit(IP_TUNNEL_CSUM_BIT, tunnel->parms.o_flags) &&
t->fwmark = fwmark;
mtu = ip_tunnel_bind_dev(dev);
if (set_mtu)
- dev->mtu = mtu;
+ WRITE_ONCE(dev->mtu, mtu);
}
dst_cache_reset(&t->dst_cache);
netdev_state_change(dev);
new_mtu = max_mtu;
}
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
EXPORT_SYMBOL_GPL(__ip_tunnel_change_mtu);
if (new_mtu > IP_MAX_MTU - dev->hard_header_len)
return -EINVAL;
}
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
EXPORT_SYMBOL(ip6_tnl_change_mtu);
dev->flags &= ~IFF_POINTOPOINT;
if (keep_mtu && dev->mtu) {
- dev->mtu = clamp(dev->mtu, dev->min_mtu, dev->max_mtu);
+ WRITE_ONCE(dev->mtu,
+ clamp(dev->mtu, dev->min_mtu, dev->max_mtu));
return;
}
} while ((q = NEXT_SLAVE(q)) != m->slaves);
}
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}