ice_update_tx_ring_stats(tx_ring, total_pkts, total_bytes);
- netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts,
- total_bytes);
+ if (ice_ring_is_xdp(tx_ring))
+ return !!budget;
+
+ netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts, total_bytes);
#define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
if (unlikely(total_pkts && netif_carrier_ok(tx_ring->netdev) &&
* sees the new next_to_clean.
*/
smp_mb();
- if (__netif_subqueue_stopped(tx_ring->netdev,
- tx_ring->q_index) &&
+ if (netif_tx_queue_stopped(txring_txq(tx_ring)) &&
!test_bit(ICE_VSI_DOWN, vsi->state)) {
- netif_wake_subqueue(tx_ring->netdev,
- tx_ring->q_index);
+ netif_tx_wake_queue(txring_txq(tx_ring));
++tx_ring->tx_stats.restart_q;
}
}
*/
static int __ice_maybe_stop_tx(struct ice_tx_ring *tx_ring, unsigned int size)
{
- netif_stop_subqueue(tx_ring->netdev, tx_ring->q_index);
+ netif_tx_stop_queue(txring_txq(tx_ring));
/* Memory barrier before checking head and tail */
smp_mb();
if (likely(ICE_DESC_UNUSED(tx_ring) < size))
return -EBUSY;
- /* A reprieve! - use start_subqueue because it doesn't call schedule */
- netif_start_subqueue(tx_ring->netdev, tx_ring->q_index);
+ /* A reprieve! - use start_queue because it doesn't call schedule */
+ netif_tx_start_queue(txring_txq(tx_ring));
++tx_ring->tx_stats.restart_q;
return 0;
}