From: Linus Torvalds Date: Thu, 13 Jan 2022 00:31:19 +0000 (-0800) Subject: Merge tag 'x86_core_for_v5.17_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git... X-Git-Url: http://git.maquefel.me/?a=commitdiff_plain;h=64ad9461521b1a357846ef6cedc4bccd48a046e0;p=linux.git Merge tag 'x86_core_for_v5.17_rc1' of git://git./linux/kernel/git/tip/tip Pull x86 core updates from Borislav Petkov: - Get rid of all the .fixup sections because this generates misleading/wrong stacktraces and confuse RELIABLE_STACKTRACE and LIVEPATCH as the backtrace misses the function which is being fixed up. - Add Straight Line Speculation mitigation support which uses a new compiler switch -mharden-sls= which sticks an INT3 after a RET or an indirect branch in order to block speculation after them. Reportedly, CPUs do speculate behind such insns. - The usual set of cleanups and improvements * tag 'x86_core_for_v5.17_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (32 commits) x86/entry_32: Fix segment exceptions objtool: Remove .fixup handling x86: Remove .fixup section x86/word-at-a-time: Remove .fixup usage x86/usercopy: Remove .fixup usage x86/usercopy_32: Simplify __copy_user_intel_nocache() x86/sgx: Remove .fixup usage x86/checksum_32: Remove .fixup usage x86/vmx: Remove .fixup usage x86/kvm: Remove .fixup usage x86/segment: Remove .fixup usage x86/fpu: Remove .fixup usage x86/xen: Remove .fixup usage x86/uaccess: Remove .fixup usage x86/futex: Remove .fixup usage x86/msr: Remove .fixup usage x86/extable: Extend extable functionality x86/entry_32: Remove .fixup usage x86/entry_64: Remove .fixup usage x86/copy_mc_64: Remove .fixup usage ... --- 64ad9461521b1a357846ef6cedc4bccd48a046e0 diff --cc arch/x86/lib/copy_user_64.S index a2cbeae4b180d,e6ac38587b40d..8ca5ecf16dc47 --- a/arch/x86/lib/copy_user_64.S +++ b/arch/x86/lib/copy_user_64.S @@@ -200,19 -190,16 +190,16 @@@ EXPORT_SYMBOL(copy_user_generic_string */ SYM_FUNC_START(copy_user_enhanced_fast_string) ASM_STAC - cmpl $64,%edx - jb .L_copy_short_string /* less then 64 bytes, avoid the costly 'rep' */ + /* CPUs without FSRM should avoid rep movsb for short copies */ + ALTERNATIVE "cmpl $64, %edx; jb .L_copy_short_string", "", X86_FEATURE_FSRM movl %edx,%ecx - 1: rep - movsb + 1: rep movsb xorl %eax,%eax ASM_CLAC - ret + RET - .section .fixup,"ax" 12: movl %ecx,%edx /* ecx is zerorest also */ jmp .Lcopy_user_handle_tail - .previous _ASM_EXTABLE_CPY(1b, 12b) SYM_FUNC_END(copy_user_enhanced_fast_string) @@@ -241,14 -224,14 +228,19 @@@ SYM_CODE_START_LOCAL(.Lcopy_user_handle 1: rep movsb 2: mov %ecx,%eax ASM_CLAC - ret + RET +3: + movl %edx,%eax + ASM_CLAC + RET + _ASM_EXTABLE_CPY(1b, 2b) + + .Lcopy_user_handle_align: + addl %ecx,%edx /* ecx is zerorest also */ + jmp .Lcopy_user_handle_tail + SYM_CODE_END(.Lcopy_user_handle_tail) /* diff --cc arch/x86/platform/efi/efi_thunk_64.S index 5b7c6e09954ec,f2a8eec69f8f6..25799d7686240 --- a/arch/x86/platform/efi/efi_thunk_64.S +++ b/arch/x86/platform/efi/efi_thunk_64.S @@@ -70,10 -60,10 +70,10 @@@ SYM_CODE_START(__efi64_thunk pushq %rdi /* EFI runtime service address */ lretq -1: movq 24(%rsp), %rsp +1: movq 0x20(%rsp), %rsp pop %rbx pop %rbp - retq + RET .code32 2: pushl $__KERNEL_CS