igb: dont drop packets if rx flow control is enabled
authorRobert Beckett <bob.beckett@collabora.com>
Tue, 22 Oct 2019 15:31:41 +0000 (16:31 +0100)
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>
Tue, 31 Dec 2019 19:17:25 +0000 (11:17 -0800)
If Rx flow control has been enabled (via autoneg or forced), packets
should not be dropped due to Rx descriptor ring exhaustion. Instead
pause frames should be used to apply back pressure. This only applies
if VFs are not in use.

Move SRRCTL setup to its own function for easy reuse and only set drop
enable bit if Rx flow control is not enabled.

Since v1: always enable dropping of packets if VFs in use.

Signed-off-by: Robert Beckett <bob.beckett@collabora.com>
Tested-by: Aaron Brown <aaron.f.brown@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
drivers/net/ethernet/intel/igb/igb.h
drivers/net/ethernet/intel/igb/igb_ethtool.c
drivers/net/ethernet/intel/igb/igb_main.c

index ca54e26..49b5fa9 100644 (file)
@@ -661,6 +661,7 @@ void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *);
 void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *);
 void igb_setup_tctl(struct igb_adapter *);
 void igb_setup_rctl(struct igb_adapter *);
+void igb_setup_srrctl(struct igb_adapter *, struct igb_ring *);
 netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *);
 void igb_alloc_rx_buffers(struct igb_ring *, u16);
 void igb_update_stats(struct igb_adapter *);
index 4690d6c..43c4383 100644 (file)
@@ -396,6 +396,7 @@ static int igb_set_pauseparam(struct net_device *netdev,
        struct igb_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
        int retval = 0;
+       int i;
 
        /* 100basefx does not support setting link flow control */
        if (hw->dev_spec._82575.eth_flags.e100_base_fx)
@@ -428,6 +429,13 @@ static int igb_set_pauseparam(struct net_device *netdev,
 
                retval = ((hw->phy.media_type == e1000_media_type_copper) ?
                          igb_force_mac_fc(hw) : igb_setup_link(hw));
+
+               /* Make sure SRRCTL considers new fc settings for each ring */
+               for (i = 0; i < adapter->num_rx_queues; i++) {
+                       struct igb_ring *ring = adapter->rx_ring[i];
+
+                       igb_setup_srrctl(adapter, ring);
+               }
        }
 
        clear_bit(__IGB_RESETTING, &adapter->state);
index d11e64a..b46bff8 100644 (file)
@@ -4467,6 +4467,37 @@ static inline void igb_set_vmolr(struct igb_adapter *adapter,
        wr32(E1000_VMOLR(vfn), vmolr);
 }
 
+/**
+ *  igb_setup_srrctl - configure the split and replication receive control
+ *                     registers
+ *  @adapter: Board private structure
+ *  @ring: receive ring to be configured
+ **/
+void igb_setup_srrctl(struct igb_adapter *adapter, struct igb_ring *ring)
+{
+       struct e1000_hw *hw = &adapter->hw;
+       int reg_idx = ring->reg_idx;
+       u32 srrctl = 0;
+
+       srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
+       if (ring_uses_large_buffer(ring))
+               srrctl |= IGB_RXBUFFER_3072 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
+       else
+               srrctl |= IGB_RXBUFFER_2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
+       srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
+       if (hw->mac.type >= e1000_82580)
+               srrctl |= E1000_SRRCTL_TIMESTAMP;
+       /* Only set Drop Enable if VFs allocated, or we are supporting multiple
+        * queues and rx flow control is disabled
+        */
+       if (adapter->vfs_allocated_count ||
+           (!(hw->fc.current_mode & e1000_fc_rx_pause) &&
+            adapter->num_rx_queues > 1))
+               srrctl |= E1000_SRRCTL_DROP_EN;
+
+       wr32(E1000_SRRCTL(reg_idx), srrctl);
+}
+
 /**
  *  igb_configure_rx_ring - Configure a receive ring after Reset
  *  @adapter: board private structure
@@ -4481,7 +4512,7 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
        union e1000_adv_rx_desc *rx_desc;
        u64 rdba = ring->dma;
        int reg_idx = ring->reg_idx;
-       u32 srrctl = 0, rxdctl = 0;
+       u32 rxdctl = 0;
 
        /* disable the queue */
        wr32(E1000_RXDCTL(reg_idx), 0);
@@ -4499,19 +4530,7 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
        writel(0, ring->tail);
 
        /* set descriptor configuration */
-       srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
-       if (ring_uses_large_buffer(ring))
-               srrctl |= IGB_RXBUFFER_3072 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
-       else
-               srrctl |= IGB_RXBUFFER_2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
-       srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
-       if (hw->mac.type >= e1000_82580)
-               srrctl |= E1000_SRRCTL_TIMESTAMP;
-       /* Only set Drop Enable if we are supporting multiple queues */
-       if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
-               srrctl |= E1000_SRRCTL_DROP_EN;
-
-       wr32(E1000_SRRCTL(reg_idx), srrctl);
+       igb_setup_srrctl(adapter, ring);
 
        /* set filtering for VMDQ pools */
        igb_set_vmolr(adapter, reg_idx & 0x7, true);