int (*enable_svm)(struct kvm *kvm);
        int (*svm_off)(struct kvm *kvm);
        int (*enable_dawr1)(struct kvm *kvm);
+       bool (*hash_v3_possible)(void);
 };
 
 extern struct kvmppc_ops *kvmppc_hv_ops;
 
        return 0;
 }
 
+static bool kvmppc_hash_v3_possible(void)
+{
+       if (radix_enabled() && no_mixing_hpt_and_radix)
+               return false;
+
+       return cpu_has_feature(CPU_FTR_ARCH_300) &&
+               cpu_has_feature(CPU_FTR_HVMODE);
+}
+
 static struct kvmppc_ops kvm_ops_hv = {
        .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_hv,
        .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_hv,
        .enable_svm = kvmhv_enable_svm,
        .svm_off = kvmhv_svm_off,
        .enable_dawr1 = kvmhv_enable_dawr1,
+       .hash_v3_possible = kvmppc_hash_v3_possible,
 };
 
 static int kvm_init_subcore_bitmap(void)
 
                r = !!(hv_enabled && radix_enabled());
                break;
        case KVM_CAP_PPC_MMU_HASH_V3:
-               r = !!(hv_enabled && cpu_has_feature(CPU_FTR_ARCH_300) &&
-                      cpu_has_feature(CPU_FTR_HVMODE));
+               r = !!(hv_enabled && kvmppc_hv_ops->hash_v3_possible &&
+                      kvmppc_hv_ops->hash_v3_possible());
                break;
        case KVM_CAP_PPC_NESTED_HV:
                r = !!(hv_enabled && kvmppc_hv_ops->enable_nested &&