bpf, net: Use DEV_STAT_INC()
authoryunshui <jiangyunshui@kylinos.cn>
Thu, 23 May 2024 03:35:20 +0000 (11:35 +0800)
committerDaniel Borkmann <daniel@iogearbox.net>
Tue, 28 May 2024 10:04:11 +0000 (12:04 +0200)
syzbot/KCSAN reported that races happen when multiple CPUs updating
dev->stats.tx_error concurrently. Adopt SMP safe DEV_STATS_INC() to
update the dev->stats fields.

Reported-by: syzbot <syzkaller@googlegroups.com>
Signed-off-by: yunshui <jiangyunshui@kylinos.cn>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Link: https://lore.kernel.org/bpf/20240523033520.4029314-1-jiangyunshui@kylinos.cn
net/core/filter.c

index c6edfe9..7c46ecb 100644 (file)
@@ -2274,12 +2274,12 @@ static int __bpf_redirect_neigh_v6(struct sk_buff *skb, struct net_device *dev,
 
        err = bpf_out_neigh_v6(net, skb, dev, nh);
        if (unlikely(net_xmit_eval(err)))
-               dev->stats.tx_errors++;
+               DEV_STATS_INC(dev, tx_errors);
        else
                ret = NET_XMIT_SUCCESS;
        goto out_xmit;
 out_drop:
-       dev->stats.tx_errors++;
+       DEV_STATS_INC(dev, tx_errors);
        kfree_skb(skb);
 out_xmit:
        return ret;
@@ -2380,12 +2380,12 @@ static int __bpf_redirect_neigh_v4(struct sk_buff *skb, struct net_device *dev,
 
        err = bpf_out_neigh_v4(net, skb, dev, nh);
        if (unlikely(net_xmit_eval(err)))
-               dev->stats.tx_errors++;
+               DEV_STATS_INC(dev, tx_errors);
        else
                ret = NET_XMIT_SUCCESS;
        goto out_xmit;
 out_drop:
-       dev->stats.tx_errors++;
+       DEV_STATS_INC(dev, tx_errors);
        kfree_skb(skb);
 out_xmit:
        return ret;