XA_STATE_ORDER(xas, &folio->mapping->i_pages, folio->index, new_order);
        struct anon_vma *anon_vma = NULL;
        struct address_space *mapping = NULL;
+       bool is_thp = folio_test_pmd_mappable(folio);
        int extra_pins, ret;
        pgoff_t end;
        bool is_hzp;
                i_mmap_unlock_read(mapping);
 out:
        xas_destroy(&xas);
-       count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED);
+       if (is_thp)
+               count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED);
        return ret;
 }
 
 
        spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
        if (list_empty(&folio->_deferred_list)) {
-               count_vm_event(THP_DEFERRED_SPLIT_PAGE);
+               if (folio_test_pmd_mappable(folio))
+                       count_vm_event(THP_DEFERRED_SPLIT_PAGE);
                list_add_tail(&folio->_deferred_list, &ds_queue->split_queue);
                ds_queue->split_queue_len++;
 #ifdef CONFIG_MEMCG