tcp: annotate lockless accesses to sk->sk_err_soft
authorEric Dumazet <edumazet@google.com>
Wed, 15 Mar 2023 20:57:41 +0000 (20:57 +0000)
committerDavid S. Miller <davem@davemloft.net>
Fri, 17 Mar 2023 08:25:05 +0000 (08:25 +0000)
This field can be read/written without lock synchronization.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_timer.c
net/ipv6/tcp_ipv6.c

index cc072d2cfcd82c8b91b83ac4cb9466a278763c82..8b5b6ca6617d0f6e2b03cf7164a6e8929fc521e1 100644 (file)
@@ -3874,7 +3874,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
        /* We passed data and got it acked, remove any soft error
         * log. Something worked...
         */
-       sk->sk_err_soft = 0;
+       WRITE_ONCE(sk->sk_err_soft, 0);
        icsk->icsk_probes_out = 0;
        tp->rcv_tstamp = tcp_jiffies32;
        if (!prior_packets)
index ea370afa70ed979266dbeea474b034e833b15db4..4f6894469b620a75963b9329fc9944d835671515 100644 (file)
@@ -361,7 +361,7 @@ void tcp_v4_mtu_reduced(struct sock *sk)
         * for the case, if this connection will not able to recover.
         */
        if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
-               sk->sk_err_soft = EMSGSIZE;
+               WRITE_ONCE(sk->sk_err_soft, EMSGSIZE);
 
        mtu = dst_mtu(dst);
 
@@ -602,7 +602,7 @@ int tcp_v4_err(struct sk_buff *skb, u32 info)
 
                        tcp_done(sk);
                } else {
-                       sk->sk_err_soft = err;
+                       WRITE_ONCE(sk->sk_err_soft, err);
                }
                goto out;
        }
@@ -628,7 +628,7 @@ int tcp_v4_err(struct sk_buff *skb, u32 info)
                sk->sk_err = err;
                sk_error_report(sk);
        } else  { /* Only an error on timeout */
-               sk->sk_err_soft = err;
+               WRITE_ONCE(sk->sk_err_soft, err);
        }
 
 out:
index cb79127f45c341e13bb66f8dc61c4fa84dbd340d..8823e2182713a26fa42ce44a21b9ec7a4d7e1c73 100644 (file)
@@ -67,7 +67,7 @@ u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when)
 
 static void tcp_write_err(struct sock *sk)
 {
-       sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT;
+       sk->sk_err = READ_ONCE(sk->sk_err_soft) ? : ETIMEDOUT;
        sk_error_report(sk);
 
        tcp_write_queue_purge(sk);
@@ -110,7 +110,7 @@ static int tcp_out_of_resources(struct sock *sk, bool do_reset)
                shift++;
 
        /* If some dubious ICMP arrived, penalize even more. */
-       if (sk->sk_err_soft)
+       if (READ_ONCE(sk->sk_err_soft))
                shift++;
 
        if (tcp_check_oom(sk, shift)) {
@@ -146,7 +146,7 @@ static int tcp_orphan_retries(struct sock *sk, bool alive)
        int retries = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_orphan_retries); /* May be zero. */
 
        /* We know from an ICMP that something is wrong. */
-       if (sk->sk_err_soft && !alive)
+       if (READ_ONCE(sk->sk_err_soft) && !alive)
                retries = 0;
 
        /* However, if socket sent something recently, select some safe
index 1bf93b61aa06ffe9536fb5a041e7724fa9eef5b1..dc963eebc668f7d24981de21650608a27e431d41 100644 (file)
@@ -497,8 +497,9 @@ static int tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
                        sk_error_report(sk);            /* Wake people up to see the error (see connect in sock.c) */
 
                        tcp_done(sk);
-               } else
-                       sk->sk_err_soft = err;
+               } else {
+                       WRITE_ONCE(sk->sk_err_soft, err);
+               }
                goto out;
        case TCP_LISTEN:
                break;
@@ -514,9 +515,9 @@ static int tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
        if (!sock_owned_by_user(sk) && np->recverr) {
                sk->sk_err = err;
                sk_error_report(sk);
-       } else
-               sk->sk_err_soft = err;
-
+       } else {
+               WRITE_ONCE(sk->sk_err_soft, err);
+       }
 out:
        bh_unlock_sock(sk);
        sock_put(sk);