This time period will grow exponentially when more blackhole issues
        get detected right after Fastopen is re-enabled and will reset to
        initial value when the blackhole issue goes away.
+       0 to disable the blackhole detection.
        By default, it is set to 1hr.
 
 tcp_syn_retries - INTEGER
 
 
 /* From tcp_fastopen.c */
 void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
-                           struct tcp_fastopen_cookie *cookie, int *syn_loss,
-                           unsigned long *last_syn_loss);
+                           struct tcp_fastopen_cookie *cookie);
 void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
                            struct tcp_fastopen_cookie *cookie, bool syn_lost,
                            u16 try_exp);
 void tcp_fastopen_active_disable(struct sock *sk);
 bool tcp_fastopen_active_should_disable(struct sock *sk);
 void tcp_fastopen_active_disable_ofo_check(struct sock *sk);
-void tcp_fastopen_active_timeout_reset(void);
+void tcp_fastopen_active_detect_blackhole(struct sock *sk, bool expired);
 
 /* Latencies incurred by various limits for a sender. They are
  * chronograph-like stats that are mutually exclusive.
 
 bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss,
                               struct tcp_fastopen_cookie *cookie)
 {
-       unsigned long last_syn_loss = 0;
        const struct dst_entry *dst;
-       int syn_loss = 0;
 
-       tcp_fastopen_cache_get(sk, mss, cookie, &syn_loss, &last_syn_loss);
-
-       /* Recurring FO SYN losses: no cookie or data in SYN */
-       if (syn_loss > 1 &&
-           time_before(jiffies, last_syn_loss + (60*HZ << syn_loss))) {
-               cookie->len = -1;
-               return false;
-       }
+       tcp_fastopen_cache_get(sk, mss, cookie);
 
        /* Firewall blackhole issue check */
        if (tcp_fastopen_active_should_disable(sk)) {
  * following circumstances:
  *   1. client side TFO socket receives out of order FIN
  *   2. client side TFO socket receives out of order RST
+ *   3. client side TFO socket has timed out three times consecutively during
+ *      or after handshake
  * We disable active side TFO globally for 1hr at first. Then if it
  * happens again, we disable it for 2h, then 4h, 8h, ...
  * And we reset the timeout back to 1hr when we see a successful active
                dst_release(dst);
        }
 }
+
+void tcp_fastopen_active_detect_blackhole(struct sock *sk, bool expired)
+{
+       u32 timeouts = inet_csk(sk)->icsk_retransmits;
+       struct tcp_sock *tp = tcp_sk(sk);
+
+       /* Broken middle-boxes may black-hole Fast Open connection during or
+        * even after the handshake. Be extremely conservative and pause
+        * Fast Open globally after hitting the third consecutive timeout or
+        * exceeding the configured timeout limit.
+        */
+       if ((tp->syn_fastopen || tp->syn_data || tp->syn_data_acked) &&
+           (timeouts == 2 || (timeouts < 2 && expired))) {
+               tcp_fastopen_active_disable(sk);
+               NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVEFAIL);
+       }
+}
 
 static DEFINE_SEQLOCK(fastopen_seqlock);
 
 void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
-                           struct tcp_fastopen_cookie *cookie,
-                           int *syn_loss, unsigned long *last_syn_loss)
+                           struct tcp_fastopen_cookie *cookie)
 {
        struct tcp_metrics_block *tm;
 
                        *cookie = tfom->cookie;
                        if (cookie->len <= 0 && tfom->try_exp == 1)
                                cookie->exp = true;
-                       *syn_loss = tfom->syn_loss;
-                       *last_syn_loss = *syn_loss ? tfom->last_syn_loss : 0;
                } while (read_seqretry(&fastopen_seqlock, seq));
        }
        rcu_read_unlock();
 
        if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
                if (icsk->icsk_retransmits) {
                        dst_negative_advice(sk);
-                       if (tp->syn_fastopen || tp->syn_data)
-                               tcp_fastopen_cache_set(sk, 0, NULL, true, 0);
-                       if (tp->syn_data && icsk->icsk_retransmits == 1)
-                               NET_INC_STATS(sock_net(sk),
-                                             LINUX_MIB_TCPFASTOPENACTIVEFAIL);
                } else if (!tp->syn_data && !tp->syn_fastopen) {
                        sk_rethink_txhash(sk);
                }
                expired = icsk->icsk_retransmits >= retry_until;
        } else {
                if (retransmits_timed_out(sk, net->ipv4.sysctl_tcp_retries1, 0)) {
-                       /* Some middle-boxes may black-hole Fast Open _after_
-                        * the handshake. Therefore we conservatively disable
-                        * Fast Open on this path on recurring timeouts after
-                        * successful Fast Open.
-                        */
-                       if (tp->syn_data_acked) {
-                               tcp_fastopen_cache_set(sk, 0, NULL, true, 0);
-                               if (icsk->icsk_retransmits == net->ipv4.sysctl_tcp_retries1)
-                                       NET_INC_STATS(sock_net(sk),
-                                                     LINUX_MIB_TCPFASTOPENACTIVEFAIL);
-                       }
                        /* Black hole detection */
                        tcp_mtu_probing(icsk, sk);
 
                expired = retransmits_timed_out(sk, retry_until,
                                                icsk->icsk_user_timeout);
        }
+       tcp_fastopen_active_detect_blackhole(sk, expired);
        if (expired) {
                /* Has it gone just too far? */
                tcp_write_err(sk);