*/
 static int udp_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
 {
+       int drop_reason = SKB_DROP_REASON_NOT_SPECIFIED;
        struct udp_sock *up = udp_sk(sk);
        int is_udplite = IS_UDPLITE(sk);
 
        /*
         *      Charge it to the socket, dropping if the queue is full.
         */
-       if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
+       if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
+               drop_reason = SKB_DROP_REASON_XFRM_POLICY;
                goto drop;
+       }
        nf_reset_ct(skb);
 
        if (static_branch_unlikely(&udp_encap_needed_key) && up->encap_type) {
            udp_lib_checksum_complete(skb))
                        goto csum_error;
 
-       if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr)))
+       if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr))) {
+               drop_reason = SKB_DROP_REASON_SOCKET_FILTER;
                goto drop;
+       }
 
        udp_csum_pull_header(skb);
 
        return __udp_queue_rcv_skb(sk, skb);
 
 csum_error:
+       drop_reason = SKB_DROP_REASON_UDP_CSUM;
        __UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
 drop:
        __UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
        atomic_inc(&sk->sk_drops);
-       kfree_skb(skb);
+       kfree_skb_reason(skb, drop_reason);
        return -1;
 }