static void m_can_clean(struct net_device *net)
{
struct m_can_classdev *cdev = netdev_priv(net);
+ unsigned long irqflags;
if (cdev->tx_ops) {
for (int i = 0; i != cdev->tx_fifo_size; ++i) {
for (int i = 0; i != cdev->can.echo_skb_max; ++i)
can_free_echo_skb(cdev->net, i, NULL);
+
+ spin_lock_irqsave(&cdev->tx_handling_spinlock, irqflags);
+ cdev->tx_fifo_in_flight = 0;
+ spin_unlock_irqrestore(&cdev->tx_handling_spinlock, irqflags);
}
/* For peripherals, pass skb to rx-offload, which will push skb from
stats->tx_packets++;
}
+static void m_can_finish_tx(struct m_can_classdev *cdev, int transmitted)
+{
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&cdev->tx_handling_spinlock, irqflags);
+ cdev->tx_fifo_in_flight -= transmitted;
+ spin_unlock_irqrestore(&cdev->tx_handling_spinlock, irqflags);
+}
+
+static void m_can_start_tx(struct m_can_classdev *cdev)
+{
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&cdev->tx_handling_spinlock, irqflags);
+ ++cdev->tx_fifo_in_flight;
+ spin_unlock_irqrestore(&cdev->tx_handling_spinlock, irqflags);
+}
+
static int m_can_echo_tx_event(struct net_device *dev)
{
u32 txe_count = 0;
int i = 0;
int err = 0;
unsigned int msg_mark;
+ int processed = 0;
struct m_can_classdev *cdev = netdev_priv(dev);
/* update stats */
m_can_tx_update_stats(cdev, msg_mark, timestamp);
+ ++processed;
}
if (ack_fgi != -1)
m_can_write(cdev, M_CAN_TXEFA, FIELD_PREP(TXEFA_EFAI_MASK,
ack_fgi));
+ m_can_finish_tx(cdev, processed);
+
return err;
}
timestamp = m_can_get_timestamp(cdev);
m_can_tx_update_stats(cdev, 0, timestamp);
netif_wake_queue(dev);
+ m_can_finish_tx(cdev, 1);
}
} else {
if (ir & (IR_TEFN | IR_TEFW)) {
return NETDEV_TX_OK;
}
+ m_can_start_tx(cdev);
+
if (cdev->is_peripheral)
return m_can_start_peripheral_xmit(cdev, skb);
else