return alternative_has_cap_unlikely(ARM64_HAS_TLB_RANGE);
 }
 
+static inline bool system_supports_lpa2(void)
+{
+       return cpus_have_final_cap(ARM64_HAS_LPA2);
+}
+
 int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt);
 bool try_emulate_mrs(struct pt_regs *regs, u32 isn);
 
 
        return !meltdown_safe;
 }
 
+#if defined(ID_AA64MMFR0_EL1_TGRAN_LPA2) && defined(ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_LPA2)
+static bool has_lpa2_at_stage1(u64 mmfr0)
+{
+       unsigned int tgran;
+
+       tgran = cpuid_feature_extract_unsigned_field(mmfr0,
+                                       ID_AA64MMFR0_EL1_TGRAN_SHIFT);
+       return tgran == ID_AA64MMFR0_EL1_TGRAN_LPA2;
+}
+
+static bool has_lpa2_at_stage2(u64 mmfr0)
+{
+       unsigned int tgran;
+
+       tgran = cpuid_feature_extract_unsigned_field(mmfr0,
+                                       ID_AA64MMFR0_EL1_TGRAN_2_SHIFT);
+       return tgran == ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_LPA2;
+}
+
+static bool has_lpa2(const struct arm64_cpu_capabilities *entry, int scope)
+{
+       u64 mmfr0;
+
+       mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
+       return has_lpa2_at_stage1(mmfr0) && has_lpa2_at_stage2(mmfr0);
+}
+#else
+static bool has_lpa2(const struct arm64_cpu_capabilities *entry, int scope)
+{
+       return false;
+}
+#endif
+
 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
 #define KPTI_NG_TEMP_VA                (-(1UL << PMD_SHIFT))
 
                .matches = has_cpuid_feature,
                ARM64_CPUID_FIELDS(ID_AA64MMFR2_EL1, EVT, IMP)
        },
+       {
+               .desc = "52-bit Virtual Addressing for KVM (LPA2)",
+               .capability = ARM64_HAS_LPA2,
+               .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+               .matches = has_lpa2,
+       },
        {},
 };