tcp: Fix data-races around sysctl_tcp_reordering.
authorKuniyuki Iwashima <kuniyu@amazon.com>
Fri, 15 Jul 2022 17:17:49 +0000 (10:17 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 29 Jul 2022 15:25:18 +0000 (17:25 +0200)
[ Upstream commit 46778cd16e6a5ad1b2e3a91f6c057c907379418e ]

While reading sysctl_tcp_reordering, it can be changed concurrently.
Thus, we need to add READ_ONCE() to its readers.

Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Sasha Levin <sashal@kernel.org>
net/ipv4/tcp.c
net/ipv4/tcp_input.c
net/ipv4/tcp_metrics.c

index e22a61b2ba82c8a33d93f6d24c7dde5e273eb83a..480fac19a074f3218415ccd000ef8cdcd1d8babc 100644 (file)
@@ -447,7 +447,7 @@ void tcp_init_sock(struct sock *sk)
        tp->snd_cwnd_clamp = ~0;
        tp->mss_cache = TCP_MSS_DEFAULT;
 
-       tp->reordering = sock_net(sk)->ipv4.sysctl_tcp_reordering;
+       tp->reordering = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reordering);
        tcp_assign_congestion_control(sk);
 
        tp->tsoffset = 0;
index 134e36f46e91441c37ba503bb52d7791ff6e4d0c..06802295e170e9558ccebbba9bf3e3afa3e319b0 100644 (file)
@@ -2131,6 +2131,7 @@ void tcp_enter_loss(struct sock *sk)
        struct tcp_sock *tp = tcp_sk(sk);
        struct net *net = sock_net(sk);
        bool new_recovery = icsk->icsk_ca_state < TCP_CA_Recovery;
+       u8 reordering;
 
        tcp_timeout_mark_lost(sk);
 
@@ -2151,10 +2152,12 @@ void tcp_enter_loss(struct sock *sk)
        /* Timeout in disordered state after receiving substantial DUPACKs
         * suggests that the degree of reordering is over-estimated.
         */
+       reordering = READ_ONCE(net->ipv4.sysctl_tcp_reordering);
        if (icsk->icsk_ca_state <= TCP_CA_Disorder &&
-           tp->sacked_out >= net->ipv4.sysctl_tcp_reordering)
+           tp->sacked_out >= reordering)
                tp->reordering = min_t(unsigned int, tp->reordering,
-                                      net->ipv4.sysctl_tcp_reordering);
+                                      reordering);
+
        tcp_set_ca_state(sk, TCP_CA_Loss);
        tp->high_seq = tp->snd_nxt;
        tcp_ecn_queue_cwr(tp);
@@ -3457,7 +3460,8 @@ static inline bool tcp_may_raise_cwnd(const struct sock *sk, const int flag)
         * new SACK or ECE mark may first advance cwnd here and later reduce
         * cwnd in tcp_fastretrans_alert() based on more states.
         */
-       if (tcp_sk(sk)->reordering > sock_net(sk)->ipv4.sysctl_tcp_reordering)
+       if (tcp_sk(sk)->reordering >
+           READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reordering))
                return flag & FLAG_FORWARD_PROGRESS;
 
        return flag & FLAG_DATA_ACKED;
index 7029b0e98edb285102dcab4521f296511a70dc57..a501150deaa3b326f1cffdbdd7924d856dde6682 100644 (file)
@@ -428,7 +428,8 @@ void tcp_update_metrics(struct sock *sk)
                if (!tcp_metric_locked(tm, TCP_METRIC_REORDERING)) {
                        val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
                        if (val < tp->reordering &&
-                           tp->reordering != net->ipv4.sysctl_tcp_reordering)
+                           tp->reordering !=
+                           READ_ONCE(net->ipv4.sysctl_tcp_reordering))
                                tcp_metric_set(tm, TCP_METRIC_REORDERING,
                                               tp->reordering);
                }