} else {
stmmac_set_desc_addr(priv, first, des);
tmp_pay_len = pay_len;
+ des += proto_hdr_len;
+ pay_len = 0;
}
stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
/* Only the last descriptor gets to point to the skb. */
tx_q->tx_skbuff[tx_q->cur_tx] = skb;
+ /* Manage tx mitigation */
+ tx_q->tx_count_frames += nfrags + 1;
+ if (likely(priv->tx_coal_frames > tx_q->tx_count_frames) &&
+ !((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ priv->hwts_tx_en)) {
+ stmmac_tx_timer_arm(priv, queue);
+ } else {
+ desc = &tx_q->dma_tx[tx_q->cur_tx];
+ tx_q->tx_count_frames = 0;
+ stmmac_set_tx_ic(priv, desc);
+ priv->xstats.tx_set_ic_bit++;
+ }
+
/* We've used all descriptors we need for this skb, however,
* advance cur_tx so that it references a fresh descriptor.
* ndo_start_xmit will fill this descriptor the next time it's
priv->xstats.tx_tso_frames++;
priv->xstats.tx_tso_nfrags += nfrags;
- /* Manage tx mitigation */
- tx_q->tx_count_frames += nfrags + 1;
- if (likely(priv->tx_coal_frames > tx_q->tx_count_frames) &&
- !(priv->synopsys_id >= DWMAC_CORE_4_00 &&
- (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
- priv->hwts_tx_en)) {
- stmmac_tx_timer_arm(priv, queue);
- } else {
- tx_q->tx_count_frames = 0;
- stmmac_set_tx_ic(priv, desc);
- priv->xstats.tx_set_ic_bit++;
- }
-
if (priv->sarc_type)
stmmac_set_desc_sarc(priv, first, priv->sarc_type);
/* Only the last descriptor gets to point to the skb. */
tx_q->tx_skbuff[entry] = skb;
+ /* According to the coalesce parameter the IC bit for the latest
+ * segment is reset and the timer re-started to clean the tx status.
+ * This approach takes care about the fragments: desc is the first
+ * element in case of no SG.
+ */
+ tx_q->tx_count_frames += nfrags + 1;
+ if (likely(priv->tx_coal_frames > tx_q->tx_count_frames) &&
+ !((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ priv->hwts_tx_en)) {
+ stmmac_tx_timer_arm(priv, queue);
+ } else {
+ if (likely(priv->extend_desc))
+ desc = &tx_q->dma_etx[entry].basic;
+ else
+ desc = &tx_q->dma_tx[entry];
+
+ tx_q->tx_count_frames = 0;
+ stmmac_set_tx_ic(priv, desc);
+ priv->xstats.tx_set_ic_bit++;
+ }
+
/* We've used all descriptors we need for this skb, however,
* advance cur_tx so that it references a fresh descriptor.
* ndo_start_xmit will fill this descriptor the next time it's
dev->stats.tx_bytes += skb->len;
- /* According to the coalesce parameter the IC bit for the latest
- * segment is reset and the timer re-started to clean the tx status.
- * This approach takes care about the fragments: desc is the first
- * element in case of no SG.
- */
- tx_q->tx_count_frames += nfrags + 1;
- if (likely(priv->tx_coal_frames > tx_q->tx_count_frames) &&
- !(priv->synopsys_id >= DWMAC_CORE_4_00 &&
- (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
- priv->hwts_tx_en)) {
- stmmac_tx_timer_arm(priv, queue);
- } else {
- tx_q->tx_count_frames = 0;
- stmmac_set_tx_ic(priv, desc);
- priv->xstats.tx_set_ic_bit++;
- }
-
if (priv->sarc_type)
stmmac_set_desc_sarc(priv, first, priv->sarc_type);
if (unlikely(status & dma_own))
break;
- count++;
-
rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
next_entry = rx_q->cur_rx;
goto read_again;
if (unlikely(error)) {
dev_kfree_skb(skb);
+ count++;
continue;
}
skb = napi_alloc_skb(&ch->rx_napi, len);
if (!skb) {
priv->dev->stats.rx_dropped++;
+ count++;
continue;
}
priv->dev->stats.rx_packets++;
priv->dev->stats.rx_bytes += len;
+ count++;
}
if (status & rx_not_ls) {
static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
{
u32 crc, hash = 0;
- u16 vid;
+ int count = 0;
+ u16 vid = 0;
for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
__le16 vid_le = cpu_to_le16(vid);
crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
hash |= (1 << crc);
+ count++;
}
- return stmmac_update_vlan_hash(priv, priv->hw, hash, is_double);
+ if (!priv->dma_cap.vlhash) {
+ if (count > 2) /* VID = 0 always passes filter */
+ return -EOPNOTSUPP;
+
+ vid = cpu_to_le16(vid);
+ hash = 0;
+ }
+
+ return stmmac_update_vlan_hash(priv, priv->hw, hash, vid, is_double);
}
static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
bool is_double = false;
int ret;
- if (!priv->dma_cap.vlhash)
- return -EOPNOTSUPP;
if (be16_to_cpu(proto) == ETH_P_8021AD)
is_double = true;
struct stmmac_priv *priv = netdev_priv(ndev);
bool is_double = false;
- if (!priv->dma_cap.vlhash)
- return -EOPNOTSUPP;
if (be16_to_cpu(proto) == ETH_P_8021AD)
is_double = true;
if (!ret) {
dev_info(priv->device, "Using %d bits DMA width\n",
priv->dma_cap.addr64);
+
+ /*
+ * If more than 32 bits can be addressed, make sure to
+ * enable enhanced addressing mode.
+ */
+ if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
+ priv->plat->dma_cfg->eame = true;
} else {
ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
if (ret) {