wireguard: use DEV_STATS_INC()
authorEric Dumazet <edumazet@google.com>
Fri, 17 Nov 2023 14:17:33 +0000 (14:17 +0000)
committerDavid S. Miller <davem@davemloft.net>
Sun, 19 Nov 2023 19:48:25 +0000 (19:48 +0000)
wg_xmit() can be called concurrently, KCSAN reported [1]
some device stats updates can be lost.

Use DEV_STATS_INC() for this unlikely case.

[1]
BUG: KCSAN: data-race in wg_xmit / wg_xmit

read-write to 0xffff888104239160 of 8 bytes by task 1375 on cpu 0:
wg_xmit+0x60f/0x680 drivers/net/wireguard/device.c:231
__netdev_start_xmit include/linux/netdevice.h:4918 [inline]
netdev_start_xmit include/linux/netdevice.h:4932 [inline]
xmit_one net/core/dev.c:3543 [inline]
dev_hard_start_xmit+0x11b/0x3f0 net/core/dev.c:3559
...

read-write to 0xffff888104239160 of 8 bytes by task 1378 on cpu 1:
wg_xmit+0x60f/0x680 drivers/net/wireguard/device.c:231
__netdev_start_xmit include/linux/netdevice.h:4918 [inline]
netdev_start_xmit include/linux/netdevice.h:4932 [inline]
xmit_one net/core/dev.c:3543 [inline]
dev_hard_start_xmit+0x11b/0x3f0 net/core/dev.c:3559
...

v2: also change wg_packet_consume_data_done() (Hangbin Liu)
    and wg_packet_purge_staged_packets()

Fixes: e7096c131e51 ("net: WireGuard secure network tunnel")
Reported-by: syzbot <syzkaller@googlegroups.com>
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Jason A. Donenfeld <Jason@zx2c4.com>
Cc: Hangbin Liu <liuhangbin@gmail.com>
Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
Reviewed-by: Hangbin Liu <liuhangbin@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/wireguard/device.c
drivers/net/wireguard/receive.c
drivers/net/wireguard/send.c

index 258dcc1039216f311a223fd348295d4b5e03a3ed..deb9636b0ecf8f47e832a0b07e9e049ba19bdf16 100644 (file)
@@ -210,7 +210,7 @@ static netdev_tx_t wg_xmit(struct sk_buff *skb, struct net_device *dev)
         */
        while (skb_queue_len(&peer->staged_packet_queue) > MAX_STAGED_PACKETS) {
                dev_kfree_skb(__skb_dequeue(&peer->staged_packet_queue));
-               ++dev->stats.tx_dropped;
+               DEV_STATS_INC(dev, tx_dropped);
        }
        skb_queue_splice_tail(&packets, &peer->staged_packet_queue);
        spin_unlock_bh(&peer->staged_packet_queue.lock);
@@ -228,7 +228,7 @@ err_icmp:
        else if (skb->protocol == htons(ETH_P_IPV6))
                icmpv6_ndo_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
 err:
-       ++dev->stats.tx_errors;
+       DEV_STATS_INC(dev, tx_errors);
        kfree_skb(skb);
        return ret;
 }
index 0b3f0c843550957ee1fe3bed7185a7d990246c2b..a176653c88616b1bc871fe52fcea778b5e189f69 100644 (file)
@@ -416,20 +416,20 @@ dishonest_packet_peer:
        net_dbg_skb_ratelimited("%s: Packet has unallowed src IP (%pISc) from peer %llu (%pISpfsc)\n",
                                dev->name, skb, peer->internal_id,
                                &peer->endpoint.addr);
-       ++dev->stats.rx_errors;
-       ++dev->stats.rx_frame_errors;
+       DEV_STATS_INC(dev, rx_errors);
+       DEV_STATS_INC(dev, rx_frame_errors);
        goto packet_processed;
 dishonest_packet_type:
        net_dbg_ratelimited("%s: Packet is neither ipv4 nor ipv6 from peer %llu (%pISpfsc)\n",
                            dev->name, peer->internal_id, &peer->endpoint.addr);
-       ++dev->stats.rx_errors;
-       ++dev->stats.rx_frame_errors;
+       DEV_STATS_INC(dev, rx_errors);
+       DEV_STATS_INC(dev, rx_frame_errors);
        goto packet_processed;
 dishonest_packet_size:
        net_dbg_ratelimited("%s: Packet has incorrect size from peer %llu (%pISpfsc)\n",
                            dev->name, peer->internal_id, &peer->endpoint.addr);
-       ++dev->stats.rx_errors;
-       ++dev->stats.rx_length_errors;
+       DEV_STATS_INC(dev, rx_errors);
+       DEV_STATS_INC(dev, rx_length_errors);
        goto packet_processed;
 packet_processed:
        dev_kfree_skb(skb);
index 95c853b59e1dae1df8b4e5cbf4e3541e35806b82..0d48e0f4a1ba3e1f11825136a65de0867b204496 100644 (file)
@@ -333,7 +333,8 @@ err:
 void wg_packet_purge_staged_packets(struct wg_peer *peer)
 {
        spin_lock_bh(&peer->staged_packet_queue.lock);
-       peer->device->dev->stats.tx_dropped += peer->staged_packet_queue.qlen;
+       DEV_STATS_ADD(peer->device->dev, tx_dropped,
+                     peer->staged_packet_queue.qlen);
        __skb_queue_purge(&peer->staged_packet_queue);
        spin_unlock_bh(&peer->staged_packet_queue.lock);
 }