tcp: adjust rcv_ssthresh according to sk_reserved_mem
authorWei Wang <weiwan@google.com>
Wed, 29 Sep 2021 17:25:13 +0000 (10:25 -0700)
committerDavid S. Miller <davem@davemloft.net>
Thu, 30 Sep 2021 12:36:46 +0000 (13:36 +0100)
When user sets SO_RESERVE_MEM socket option, in order to utilize the
reserved memory when in memory pressure state, we adjust rcv_ssthresh
according to the available reserved memory for the socket, instead of
using 4 * advmss always.

Signed-off-by: Wei Wang <weiwan@google.com>
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
include/net/tcp.h
net/ipv4/tcp_input.c
net/ipv4/tcp_output.c

index 32cf6c01f40336d52705c05d372a1cf56af5d8ba..4c2898ac65698a097ccaedcc59542041f15f4f70 100644 (file)
@@ -1421,6 +1421,17 @@ static inline int tcp_full_space(const struct sock *sk)
        return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf));
 }
 
+static inline void tcp_adjust_rcv_ssthresh(struct sock *sk)
+{
+       int unused_mem = sk_unused_reserved_mem(sk);
+       struct tcp_sock *tp = tcp_sk(sk);
+
+       tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
+       if (unused_mem)
+               tp->rcv_ssthresh = max_t(u32, tp->rcv_ssthresh,
+                                        tcp_win_from_space(sk, unused_mem));
+}
+
 void tcp_cleanup_rbuf(struct sock *sk, int copied);
 
 /* We provision sk_rcvbuf around 200% of sk_rcvlowat.
index 06020395cc8d0a631813937d498a211f14777a69..246ab7b5e857eb9e802c4805075e89c98cf00636 100644 (file)
@@ -500,8 +500,11 @@ static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb,
 
        room = min_t(int, tp->window_clamp, tcp_space(sk)) - tp->rcv_ssthresh;
 
+       if (room <= 0)
+               return;
+
        /* Check #1 */
-       if (room > 0 && !tcp_under_memory_pressure(sk)) {
+       if (!tcp_under_memory_pressure(sk)) {
                unsigned int truesize = truesize_adjust(adjust, skb);
                int incr;
 
@@ -518,6 +521,11 @@ static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb,
                        tp->rcv_ssthresh += min(room, incr);
                        inet_csk(sk)->icsk_ack.quick |= 1;
                }
+       } else {
+               /* Under pressure:
+                * Adjust rcv_ssthresh according to reserved mem
+                */
+               tcp_adjust_rcv_ssthresh(sk);
        }
 }
 
@@ -5345,7 +5353,7 @@ static int tcp_prune_queue(struct sock *sk)
        if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
                tcp_clamp_window(sk);
        else if (tcp_under_memory_pressure(sk))
-               tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
+               tcp_adjust_rcv_ssthresh(sk);
 
        if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
                return 0;
index fdc39b4fbbfa0b63a20c2d5ffc3ba923d8d02814..3a01e5593a171d8e8978c11c9880eb9314feeda9 100644 (file)
@@ -2967,8 +2967,7 @@ u32 __tcp_select_window(struct sock *sk)
                icsk->icsk_ack.quick = 0;
 
                if (tcp_under_memory_pressure(sk))
-                       tp->rcv_ssthresh = min(tp->rcv_ssthresh,
-                                              4U * tp->advmss);
+                       tcp_adjust_rcv_ssthresh(sk);
 
                /* free_space might become our new window, make sure we don't
                 * increase it due to wscale.