From: Adrian Hunter Date: Mon, 31 Jan 2022 07:24:50 +0000 (+0200) Subject: x86: Share definition of __is_canonical_address() X-Git-Url: http://git.maquefel.me/?a=commitdiff_plain;h=1fb85d06ad6754796cd1b920639ca9d8840abefd;p=linux.git x86: Share definition of __is_canonical_address() Reduce code duplication by moving canonical address code to a common header file. Signed-off-by: Adrian Hunter Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20220131072453.2839535-3-adrian.hunter@intel.com --- diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c index 0ebcf9a56f97a..93676a5b66dba 100644 --- a/arch/x86/events/intel/pt.c +++ b/arch/x86/events/intel/pt.c @@ -1350,20 +1350,10 @@ static void pt_addr_filters_fini(struct perf_event *event) } #ifdef CONFIG_X86_64 -static u64 canonical_address(u64 vaddr, u8 vaddr_bits) -{ - return ((s64)vaddr << (64 - vaddr_bits)) >> (64 - vaddr_bits); -} - -static u64 is_canonical_address(u64 vaddr, u8 vaddr_bits) -{ - return canonical_address(vaddr, vaddr_bits) == vaddr; -} - /* Clamp to a canonical address greater-than-or-equal-to the address given */ static u64 clamp_to_ge_canonical_addr(u64 vaddr, u8 vaddr_bits) { - return is_canonical_address(vaddr, vaddr_bits) ? + return __is_canonical_address(vaddr, vaddr_bits) ? vaddr : -BIT_ULL(vaddr_bits - 1); } @@ -1371,7 +1361,7 @@ static u64 clamp_to_ge_canonical_addr(u64 vaddr, u8 vaddr_bits) /* Clamp to a canonical address less-than-or-equal-to the address given */ static u64 clamp_to_le_canonical_addr(u64 vaddr, u8 vaddr_bits) { - return is_canonical_address(vaddr, vaddr_bits) ? + return __is_canonical_address(vaddr, vaddr_bits) ? vaddr : BIT_ULL(vaddr_bits - 1) - 1; } diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h index 4d5810c8fab74..9cc82f305f4bf 100644 --- a/arch/x86/include/asm/page.h +++ b/arch/x86/include/asm/page.h @@ -71,6 +71,16 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr, extern bool __virt_addr_valid(unsigned long kaddr); #define virt_addr_valid(kaddr) __virt_addr_valid((unsigned long) (kaddr)) +static __always_inline u64 __canonical_address(u64 vaddr, u8 vaddr_bits) +{ + return ((s64)vaddr << (64 - vaddr_bits)) >> (64 - vaddr_bits); +} + +static __always_inline u64 __is_canonical_address(u64 vaddr, u8 vaddr_bits) +{ + return __canonical_address(vaddr, vaddr_bits) == vaddr; +} + #endif /* __ASSEMBLY__ */ #include diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 5719d8cfdbd90..40da8c7f3019e 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -665,7 +665,7 @@ static inline u8 ctxt_virt_addr_bits(struct x86_emulate_ctxt *ctxt) static inline bool emul_is_noncanonical_address(u64 la, struct x86_emulate_ctxt *ctxt) { - return get_canonical(la, ctxt_virt_addr_bits(ctxt)) != la; + return !__is_canonical_address(la, ctxt_virt_addr_bits(ctxt)); } /* @@ -715,7 +715,7 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt, case X86EMUL_MODE_PROT64: *linear = la; va_bits = ctxt_virt_addr_bits(ctxt); - if (get_canonical(la, va_bits) != la) + if (!__is_canonical_address(la, va_bits)) goto bad; *max_size = min_t(u64, ~0u, (1ull << va_bits) - la); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 74b53a16f38a7..197209f456a64 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1735,7 +1735,7 @@ static int __kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data, * value, and that something deterministic happens if the guest * invokes 64-bit SYSENTER. */ - data = get_canonical(data, vcpu_virt_addr_bits(vcpu)); + data = __canonical_address(data, vcpu_virt_addr_bits(vcpu)); break; case MSR_TSC_AUX: if (!kvm_is_supported_user_return_msr(MSR_TSC_AUX)) diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index 635b75f9e1454..fc4b68ab8d71d 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -211,14 +211,9 @@ static inline u8 vcpu_virt_addr_bits(struct kvm_vcpu *vcpu) return kvm_read_cr4_bits(vcpu, X86_CR4_LA57) ? 57 : 48; } -static inline u64 get_canonical(u64 la, u8 vaddr_bits) -{ - return ((int64_t)la << (64 - vaddr_bits)) >> (64 - vaddr_bits); -} - static inline bool is_noncanonical_address(u64 la, struct kvm_vcpu *vcpu) { - return get_canonical(la, vcpu_virt_addr_bits(vcpu)) != la; + return !__is_canonical_address(la, vcpu_virt_addr_bits(vcpu)); } static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu, diff --git a/arch/x86/mm/maccess.c b/arch/x86/mm/maccess.c index 92ec176a72937..5a53c2cc169cc 100644 --- a/arch/x86/mm/maccess.c +++ b/arch/x86/mm/maccess.c @@ -4,11 +4,6 @@ #include #ifdef CONFIG_X86_64 -static __always_inline u64 canonical_address(u64 vaddr, u8 vaddr_bits) -{ - return ((s64)vaddr << (64 - vaddr_bits)) >> (64 - vaddr_bits); -} - bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size) { unsigned long vaddr = (unsigned long)unsafe_src; @@ -19,7 +14,7 @@ bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size) * we also need to include the userspace guard page. */ return vaddr >= TASK_SIZE_MAX + PAGE_SIZE && - canonical_address(vaddr, boot_cpu_data.x86_virt_bits) == vaddr; + __is_canonical_address(vaddr, boot_cpu_data.x86_virt_bits); } #else bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)