#define BPF_SOCK_OPS_TEST_FLAG(TP, ARG) 0
 #endif
 
+       u16 timeout_rehash;     /* Timeout-triggered rehash attempts */
+
        u32 rcv_ooopack; /* Received out-of-order packets, for tcpinfo */
 
 /* Receiver side RTT estimation */
 
        LINUX_MIB_TCPRCVQDROP,                  /* TCPRcvQDrop */
        LINUX_MIB_TCPWQUEUETOOBIG,              /* TCPWqueueTooBig */
        LINUX_MIB_TCPFASTOPENPASSIVEALTKEY,     /* TCPFastOpenPassiveAltKey */
+       LINUX_MIB_TCPTIMEOUTREHASH,             /* TCPTimeoutRehash */
+       LINUX_MIB_TCPDUPLICATEDATAREHASH,       /* TCPDuplicateDataRehash */
        __LINUX_MIB_MAX
 };
 
 
        TCP_NLA_DSACK_DUPS,     /* DSACK blocks received */
        TCP_NLA_REORD_SEEN,     /* reordering events seen */
        TCP_NLA_SRTT,           /* smoothed RTT in usecs */
+       TCP_NLA_TIMEOUT_REHASH, /* Timeout-triggered rehash attempts */
 };
 
 /* for TCP_MD5SIG socket option */
 
        SNMP_MIB_ITEM("TCPRcvQDrop", LINUX_MIB_TCPRCVQDROP),
        SNMP_MIB_ITEM("TCPWqueueTooBig", LINUX_MIB_TCPWQUEUETOOBIG),
        SNMP_MIB_ITEM("TCPFastOpenPassiveAltKey", LINUX_MIB_TCPFASTOPENPASSIVEALTKEY),
+       SNMP_MIB_ITEM("TcpTimeoutRehash", LINUX_MIB_TCPTIMEOUTREHASH),
+       SNMP_MIB_ITEM("TcpDuplicateDataRehash", LINUX_MIB_TCPDUPLICATEDATAREHASH),
        SNMP_MIB_SENTINEL
 };
 
 
                nla_total_size(sizeof(u32)) + /* TCP_NLA_DSACK_DUPS */
                nla_total_size(sizeof(u32)) + /* TCP_NLA_REORD_SEEN */
                nla_total_size(sizeof(u32)) + /* TCP_NLA_SRTT */
+               nla_total_size(sizeof(u16)) + /* TCP_NLA_TIMEOUT_REHASH */
                0;
 }
 
        nla_put_u32(stats, TCP_NLA_DSACK_DUPS, tp->dsack_dups);
        nla_put_u32(stats, TCP_NLA_REORD_SEEN, tp->reord_seen);
        nla_put_u32(stats, TCP_NLA_SRTT, tp->srtt_us >> 3);
+       nla_put_u16(stats, TCP_NLA_TIMEOUT_REHASH, tp->timeout_rehash);
 
        return stats;
 }
 
         * The receiver remembers and reflects via DSACKs. Leverage the
         * DSACK state and change the txhash to re-route speculatively.
         */
-       if (TCP_SKB_CB(skb)->seq == tcp_sk(sk)->duplicate_sack[0].start_seq)
+       if (TCP_SKB_CB(skb)->seq == tcp_sk(sk)->duplicate_sack[0].start_seq) {
                sk_rethink_txhash(sk);
+               NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDUPLICATEDATAREHASH);
+       }
 }
 
 static void tcp_send_dupack(struct sock *sk, const struct sk_buff *skb)
 
                        dst_negative_advice(sk);
                } else {
                        sk_rethink_txhash(sk);
+                       tp->timeout_rehash++;
+                       __NET_INC_STATS(sock_net(sk),
+                                       LINUX_MIB_TCPTIMEOUTREHASH);
                }
                retry_until = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries;
                expired = icsk->icsk_retransmits >= retry_until;
                        dst_negative_advice(sk);
                } else {
                        sk_rethink_txhash(sk);
+                       tp->timeout_rehash++;
+                       __NET_INC_STATS(sock_net(sk),
+                                       LINUX_MIB_TCPTIMEOUTREHASH);
                }
 
                retry_until = net->ipv4.sysctl_tcp_retries2;