*/
        stage2_put_pte(ptep, mmu, addr, level, mm_ops);
 
-       if (need_flush) {
-               kvm_pte_t *pte_follow = kvm_pte_follow(pte, mm_ops);
-
-               dcache_clean_inval_poc((unsigned long)pte_follow,
-                                   (unsigned long)pte_follow +
-                                           kvm_granule_size(level));
-       }
+       if (need_flush && mm_ops->dcache_clean_inval_poc)
+               mm_ops->dcache_clean_inval_poc(kvm_pte_follow(pte, mm_ops),
+                                              kvm_granule_size(level));
 
        if (childp)
                mm_ops->put_page(childp);
        struct kvm_pgtable *pgt = arg;
        struct kvm_pgtable_mm_ops *mm_ops = pgt->mm_ops;
        kvm_pte_t pte = *ptep;
-       kvm_pte_t *pte_follow;
 
        if (!kvm_pte_valid(pte) || !stage2_pte_cacheable(pgt, pte))
                return 0;
 
-       pte_follow = kvm_pte_follow(pte, mm_ops);
-       dcache_clean_inval_poc((unsigned long)pte_follow,
-                           (unsigned long)pte_follow +
-                                   kvm_granule_size(level));
+       if (mm_ops->dcache_clean_inval_poc)
+               mm_ops->dcache_clean_inval_poc(kvm_pte_follow(pte, mm_ops),
+                                              kvm_granule_size(level));
        return 0;
 }