* udp packet drop out of
                                         * udp_memory_allocated.
                                         */
+       SKB_DROP_REASON_TCP_MD5NOTFOUND,        /* no MD5 hash and one
+                                                * expected, corresponding
+                                                * to LINUX_MIB_TCPMD5NOTFOUND
+                                                */
+       SKB_DROP_REASON_TCP_MD5UNEXPECTED,      /* MD5 hash and we're not
+                                                * expecting one, corresponding
+                                                * to LINUX_MIB_TCPMD5UNEXPECTED
+                                                */
+       SKB_DROP_REASON_TCP_MD5FAILURE, /* MD5 hash and its wrong,
+                                        * corresponding to
+                                        * LINUX_MIB_TCPMD5FAILURE
+                                        */
        SKB_DROP_REASON_MAX,
 };
 
 
        EM(SKB_DROP_REASON_IP_NOPROTO, IP_NOPROTO)              \
        EM(SKB_DROP_REASON_SOCKET_RCVBUFF, SOCKET_RCVBUFF)      \
        EM(SKB_DROP_REASON_PROTO_MEM, PROTO_MEM)                \
+       EM(SKB_DROP_REASON_TCP_MD5NOTFOUND, TCP_MD5NOTFOUND)    \
+       EM(SKB_DROP_REASON_TCP_MD5UNEXPECTED,                   \
+          TCP_MD5UNEXPECTED)                                   \
+       EM(SKB_DROP_REASON_TCP_MD5FAILURE, TCP_MD5FAILURE)      \
        EMe(SKB_DROP_REASON_MAX, MAX)
 
 #undef EM
 
 /* Called with rcu_read_lock() */
 static bool tcp_v4_inbound_md5_hash(const struct sock *sk,
                                    const struct sk_buff *skb,
-                                   int dif, int sdif)
+                                   int dif, int sdif,
+                                   enum skb_drop_reason *reason)
 {
 #ifdef CONFIG_TCP_MD5SIG
        /*
                return false;
 
        if (hash_expected && !hash_location) {
+               *reason = SKB_DROP_REASON_TCP_MD5NOTFOUND;
                NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
                return true;
        }
 
        if (!hash_expected && hash_location) {
+               *reason = SKB_DROP_REASON_TCP_MD5UNEXPECTED;
                NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
                return true;
        }
                                      NULL, skb);
 
        if (genhash || memcmp(hash_location, newhash, 16) != 0) {
+               *reason = SKB_DROP_REASON_TCP_MD5FAILURE;
                NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
                net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s L3 index %d\n",
                                     &iph->saddr, ntohs(th->source),
 int tcp_v4_rcv(struct sk_buff *skb)
 {
        struct net *net = dev_net(skb->dev);
+       enum skb_drop_reason drop_reason;
        int sdif = inet_sdif(skb);
        int dif = inet_iif(skb);
        const struct iphdr *iph;
        const struct tcphdr *th;
        bool refcounted;
        struct sock *sk;
-       int drop_reason;
        int ret;
 
        drop_reason = SKB_DROP_REASON_NOT_SPECIFIED;
                struct sock *nsk;
 
                sk = req->rsk_listener;
-               if (unlikely(tcp_v4_inbound_md5_hash(sk, skb, dif, sdif))) {
+               if (unlikely(tcp_v4_inbound_md5_hash(sk, skb, dif, sdif,
+                                                    &drop_reason))) {
                        sk_drops_add(sk, skb);
                        reqsk_put(req);
                        goto discard_it;
                goto discard_and_relse;
        }
 
-       if (tcp_v4_inbound_md5_hash(sk, skb, dif, sdif))
+       if (tcp_v4_inbound_md5_hash(sk, skb, dif, sdif, &drop_reason))
                goto discard_and_relse;
 
        nf_reset_ct(skb);
 
 
 static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
                                    const struct sk_buff *skb,
-                                   int dif, int sdif)
+                                   int dif, int sdif,
+                                   enum skb_drop_reason *reason)
 {
 #ifdef CONFIG_TCP_MD5SIG
        const __u8 *hash_location = NULL;
                return false;
 
        if (hash_expected && !hash_location) {
+               *reason = SKB_DROP_REASON_TCP_MD5NOTFOUND;
                NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
                return true;
        }
 
        if (!hash_expected && hash_location) {
+               *reason = SKB_DROP_REASON_TCP_MD5UNEXPECTED;
                NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
                return true;
        }
                                      NULL, skb);
 
        if (genhash || memcmp(hash_location, newhash, 16) != 0) {
+               *reason = SKB_DROP_REASON_TCP_MD5FAILURE;
                NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
                net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u L3 index %d\n",
                                     genhash ? "failed" : "mismatch",
                struct sock *nsk;
 
                sk = req->rsk_listener;
-               if (tcp_v6_inbound_md5_hash(sk, skb, dif, sdif)) {
+               if (tcp_v6_inbound_md5_hash(sk, skb, dif, sdif,
+                                           &drop_reason)) {
                        sk_drops_add(sk, skb);
                        reqsk_put(req);
                        goto discard_it;
                goto discard_and_relse;
        }
 
-       if (tcp_v6_inbound_md5_hash(sk, skb, dif, sdif))
+       if (tcp_v6_inbound_md5_hash(sk, skb, dif, sdif, &drop_reason))
                goto discard_and_relse;
 
        if (tcp_filter(sk, skb)) {