{
        struct mpc52xx_fec_priv *priv = netdev_priv(dev);
        struct bcom_fec_bd *bd;
+       unsigned long flags;
 
        if (bcom_queue_full(priv->tx_dmatsk)) {
                if (net_ratelimit())
                return NETDEV_TX_BUSY;
        }
 
-       spin_lock_irq(&priv->lock);
+       spin_lock_irqsave(&priv->lock, flags);
        dev->trans_start = jiffies;
 
        bd = (struct bcom_fec_bd *)
                netif_stop_queue(dev);
        }
 
-       spin_unlock_irq(&priv->lock);
+       spin_unlock_irqrestore(&priv->lock, flags);
 
        return NETDEV_TX_OK;
 }
 
        struct ixpdev_priv *ip = netdev_priv(dev);
        struct ixpdev_tx_desc *desc;
        int entry;
+       unsigned long flags;
 
        if (unlikely(skb->len > PAGE_SIZE)) {
                /* @@@ Count drops.  */
 
        dev->trans_start = jiffies;
 
-       local_irq_disable();
+       local_irq_save(flags);
        ip->tx_queue_entries++;
        if (ip->tx_queue_entries == TX_BUF_COUNT_PER_CHAN)
                netif_stop_queue(dev);
-       local_irq_enable();
+       local_irq_restore(flags);
 
        return 0;
 }
 
        dma_addr_t mapping;
        unsigned int len, entry;
        u32 ctrl;
+       unsigned long flags;
 
 #ifdef DEBUG
        int i;
 #endif
 
        len = skb->len;
-       spin_lock_irq(&bp->lock);
+       spin_lock_irqsave(&bp->lock, flags);
 
        /* This is a hard error, log it. */
        if (TX_BUFFS_AVAIL(bp) < 1) {
                netif_stop_queue(dev);
-               spin_unlock_irq(&bp->lock);
+               spin_unlock_irqrestore(&bp->lock, flags);
                dev_err(&bp->pdev->dev,
                        "BUG! Tx Ring full when queue awake!\n");
                dev_dbg(&bp->pdev->dev, "tx_head = %u, tx_tail = %u\n",
        if (TX_BUFFS_AVAIL(bp) < 1)
                netif_stop_queue(dev);
 
-       spin_unlock_irq(&bp->lock);
+       spin_unlock_irqrestore(&bp->lock, flags);
 
        dev->trans_start = jiffies;
 
 
 {
        struct mlx4_en_cq *cq = &priv->tx_cq[tx_ind];
        struct mlx4_en_tx_ring *ring = &priv->tx_ring[tx_ind];
+       unsigned long flags;
 
        /* If we don't have a pending timer, set one up to catch our recent
           post in case the interface becomes idle */
 
        /* Poll the CQ every mlx4_en_TX_MODER_POLL packets */
        if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0)
-               if (spin_trylock_irq(&ring->comp_lock)) {
+               if (spin_trylock_irqsave(&ring->comp_lock, flags)) {
                        mlx4_en_process_tx_cq(priv->dev, cq);
-                       spin_unlock_irq(&ring->comp_lock);
+                       spin_unlock_irqrestore(&ring->comp_lock, flags);
                }
 }