net: make softnet_data.dropped an atomic_t
authorEric Dumazet <edumazet@google.com>
Fri, 29 Mar 2024 15:42:21 +0000 (15:42 +0000)
committerDavid S. Miller <davem@davemloft.net>
Mon, 1 Apr 2024 10:28:32 +0000 (11:28 +0100)
If under extreme cpu backlog pressure enqueue_to_backlog() has
to drop a packet, it could do this without dirtying a cache line
and potentially slowing down the target cpu.

Move sd->dropped into a separate cache line, and make it atomic.

In non pressure mode, this field is not touched, no need to consume
valuable space in a hot cache line.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
include/linux/netdevice.h
net/core/dev.c
net/core/net-procfs.c

index 0cd9ee83f55419e3a65189b65c1999e4f7c7f708..be9f071a340d2ac3a0b02f85d6abe2b61fee0267 100644 (file)
@@ -3237,10 +3237,11 @@ struct softnet_data {
        unsigned int            input_queue_tail;
 #endif
        unsigned int            received_rps;
-       unsigned int            dropped;
        struct sk_buff_head     input_pkt_queue;
        struct napi_struct      backlog;
 
+       atomic_t                dropped ____cacheline_aligned_in_smp;
+
        /* Another possibly contended cache line */
        spinlock_t              defer_lock ____cacheline_aligned_in_smp;
        int                     defer_count;
index 4ad7836365e68f700b26dba2c50515a8c18329cf..02c98f115243202c409ee00c16e08fb0cf4d9ab9 100644 (file)
@@ -4800,17 +4800,22 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
        struct softnet_data *sd;
        unsigned long flags;
        unsigned int qlen;
+       int max_backlog;
 
        reason = SKB_DROP_REASON_DEV_READY;
        if (!netif_running(skb->dev))
                goto bad_dev;
 
+       reason = SKB_DROP_REASON_CPU_BACKLOG;
        sd = &per_cpu(softnet_data, cpu);
 
+       qlen = skb_queue_len_lockless(&sd->input_pkt_queue);
+       max_backlog = READ_ONCE(net_hotdata.max_backlog);
+       if (unlikely(qlen > max_backlog))
+               goto cpu_backlog_drop;
        backlog_lock_irq_save(sd, &flags);
        qlen = skb_queue_len(&sd->input_pkt_queue);
-       if (qlen <= READ_ONCE(net_hotdata.max_backlog) &&
-           !skb_flow_limit(skb, qlen)) {
+       if (qlen <= max_backlog && !skb_flow_limit(skb, qlen)) {
                if (qlen) {
 enqueue:
                        __skb_queue_tail(&sd->input_pkt_queue, skb);
@@ -4826,11 +4831,11 @@ enqueue:
                        napi_schedule_rps(sd);
                goto enqueue;
        }
-       reason = SKB_DROP_REASON_CPU_BACKLOG;
 
-       sd->dropped++;
        backlog_unlock_irq_restore(sd, &flags);
 
+cpu_backlog_drop:
+       atomic_inc(&sd->dropped);
 bad_dev:
        dev_core_stats_rx_dropped_inc(skb->dev);
        kfree_skb_reason(skb, reason);
index a97eceb84e61ec347ad132ff0f22c8ce82f12e90..fa6d3969734a6ec154c3444d1b25ee93edfc5588 100644 (file)
@@ -144,7 +144,8 @@ static int softnet_seq_show(struct seq_file *seq, void *v)
        seq_printf(seq,
                   "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x "
                   "%08x %08x\n",
-                  sd->processed, sd->dropped, sd->time_squeeze, 0,
+                  sd->processed, atomic_read(&sd->dropped),
+                  sd->time_squeeze, 0,
                   0, 0, 0, 0, /* was fastroute */
                   0,   /* was cpu_collision */
                   sd->received_rps, flow_limit_count,