x86/asm: Make some functions local labels
authorJiri Slaby <jslaby@suse.cz>
Fri, 6 Sep 2019 07:55:50 +0000 (09:55 +0200)
committerBorislav Petkov <bp@suse.de>
Fri, 6 Sep 2019 08:41:11 +0000 (10:41 +0200)
Boris suggests to make a local label (prepend ".L") to these functions
to eliminate them from the symbol table. These are functions with very
local names and really should not be visible anywhere.

Note that objtool won't see these functions anymore (to generate ORC
debug info). But all the functions are not annotated with ENDPROC, so
they won't have objtool's attention anyway.

Signed-off-by: Jiri Slaby <jslaby@suse.cz>
Signed-off-by: Borislav Petkov <bp@suse.de>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Cao jin <caoj.fnst@cn.fujitsu.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steve Winslow <swinslow@gmail.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Wei Huang <wei@redhat.com>
Cc: x86-ml <x86@kernel.org>
Cc: Xiaoyao Li <xiaoyao.li@linux.intel.com>
Link: https://lkml.kernel.org/r/20190906075550.23435-2-jslaby@suse.cz
arch/x86/boot/compressed/head_32.S
arch/x86/boot/compressed/head_64.S
arch/x86/entry/entry_64.S
arch/x86/lib/copy_user_64.S
arch/x86/lib/getuser.S
arch/x86/lib/putuser.S

index 37380c0d59996b8ed688d0e350111343e5dca0ae..5e30eaaf8576fb7ce0cca4c94fa931888c812101 100644 (file)
@@ -140,7 +140,7 @@ ENTRY(startup_32)
 /*
  * Jump to the relocated address.
  */
-       leal    relocated(%ebx), %eax
+       leal    .Lrelocated(%ebx), %eax
        jmp     *%eax
 ENDPROC(startup_32)
 
@@ -209,7 +209,7 @@ ENDPROC(efi32_stub_entry)
 #endif
 
        .text
-relocated:
+.Lrelocated:
 
 /*
  * Clear BSS (stack is currently empty)
index 6233ae35d0d9a9d6682be147a1d3f305c59b3e30..d98cd483377eb7461d99ba563c7aaac466674466 100644 (file)
@@ -87,7 +87,7 @@ ENTRY(startup_32)
 
        call    verify_cpu
        testl   %eax, %eax
-       jnz     no_longmode
+       jnz     .Lno_longmode
 
 /*
  * Compute the delta between where we were compiled to run at
@@ -322,7 +322,7 @@ ENTRY(startup_64)
 1:     popq    %rdi
        subq    $1b, %rdi
 
-       call    adjust_got
+       call    .Ladjust_got
 
        /*
         * At this point we are in long mode with 4-level paging enabled,
@@ -421,7 +421,7 @@ trampoline_return:
 
        /* The new adjustment is the relocation address */
        movq    %rbx, %rdi
-       call    adjust_got
+       call    .Ladjust_got
 
 /*
  * Copy the compressed kernel to the end of our buffer
@@ -440,7 +440,7 @@ trampoline_return:
 /*
  * Jump to the relocated address.
  */
-       leaq    relocated(%rbx), %rax
+       leaq    .Lrelocated(%rbx), %rax
        jmp     *%rax
 
 #ifdef CONFIG_EFI_STUB
@@ -511,7 +511,7 @@ ENDPROC(efi64_stub_entry)
 #endif
 
        .text
-relocated:
+.Lrelocated:
 
 /*
  * Clear BSS (stack is currently empty)
@@ -548,7 +548,7 @@ relocated:
  * first time we touch GOT).
  * RDI is the new adjustment to apply.
  */
-adjust_got:
+.Ladjust_got:
        /* Walk through the GOT adding the address to the entries */
        leaq    _got(%rip), %rdx
        leaq    _egot(%rip), %rcx
@@ -622,7 +622,7 @@ ENTRY(trampoline_32bit_src)
        movl    %eax, %cr4
 
        /* Calculate address of paging_enabled() once we are executing in the trampoline */
-       leal    paging_enabled - trampoline_32bit_src + TRAMPOLINE_32BIT_CODE_OFFSET(%ecx), %eax
+       leal    .Lpaging_enabled - trampoline_32bit_src + TRAMPOLINE_32BIT_CODE_OFFSET(%ecx), %eax
 
        /* Prepare the stack for far return to Long Mode */
        pushl   $__KERNEL_CS
@@ -635,7 +635,7 @@ ENTRY(trampoline_32bit_src)
        lret
 
        .code64
-paging_enabled:
+.Lpaging_enabled:
        /* Return from the trampoline */
        jmp     *%rdi
 
@@ -647,7 +647,7 @@ paging_enabled:
        .org    trampoline_32bit_src + TRAMPOLINE_32BIT_CODE_SIZE
 
        .code32
-no_longmode:
+.Lno_longmode:
        /* This isn't an x86-64 CPU, so hang intentionally, we cannot continue */
 1:
        hlt
index be9ca198c581aea7ed29f4417aae9c1c1b835473..cf273242691bb3d0f42aa14696b7e4faafd5b10d 100644 (file)
@@ -1058,10 +1058,10 @@ ENTRY(native_load_gs_index)
 ENDPROC(native_load_gs_index)
 EXPORT_SYMBOL(native_load_gs_index)
 
-       _ASM_EXTABLE(.Lgs_change, bad_gs)
+       _ASM_EXTABLE(.Lgs_change, .Lbad_gs)
        .section .fixup, "ax"
        /* running with kernelgs */
-bad_gs:
+.Lbad_gs:
        SWAPGS                                  /* switch back to user gs */
 .macro ZAP_GS
        /* This can't be a string because the preprocessor needs to see it. */
index 4fe1601dbc5d857dfb3639600629584fd8f9c15b..86976b55ae743ef3b534d05475e2dcced6241962 100644 (file)
@@ -33,7 +33,7 @@
 102:
        .section .fixup,"ax"
 103:   addl %ecx,%edx                  /* ecx is zerorest also */
-       jmp copy_user_handle_tail
+       jmp .Lcopy_user_handle_tail
        .previous
 
        _ASM_EXTABLE_UA(100b, 103b)
@@ -113,7 +113,7 @@ ENTRY(copy_user_generic_unrolled)
 40:    leal (%rdx,%rcx,8),%edx
        jmp 60f
 50:    movl %ecx,%edx
-60:    jmp copy_user_handle_tail /* ecx is zerorest also */
+60:    jmp .Lcopy_user_handle_tail /* ecx is zerorest also */
        .previous
 
        _ASM_EXTABLE_UA(1b, 30b)
@@ -177,7 +177,7 @@ ENTRY(copy_user_generic_string)
        .section .fixup,"ax"
 11:    leal (%rdx,%rcx,8),%ecx
 12:    movl %ecx,%edx          /* ecx is zerorest also */
-       jmp copy_user_handle_tail
+       jmp .Lcopy_user_handle_tail
        .previous
 
        _ASM_EXTABLE_UA(1b, 11b)
@@ -210,7 +210,7 @@ ENTRY(copy_user_enhanced_fast_string)
 
        .section .fixup,"ax"
 12:    movl %ecx,%edx          /* ecx is zerorest also */
-       jmp copy_user_handle_tail
+       jmp .Lcopy_user_handle_tail
        .previous
 
        _ASM_EXTABLE_UA(1b, 12b)
@@ -231,7 +231,7 @@ EXPORT_SYMBOL(copy_user_enhanced_fast_string)
  * eax uncopied bytes or 0 if successful.
  */
 ALIGN;
-copy_user_handle_tail:
+.Lcopy_user_handle_tail:
        movl %edx,%ecx
 1:     rep movsb
 2:     mov %ecx,%eax
@@ -239,7 +239,7 @@ copy_user_handle_tail:
        ret
 
        _ASM_EXTABLE_UA(1b, 2b)
-END(copy_user_handle_tail)
+END(.Lcopy_user_handle_tail)
 
 /*
  * copy_user_nocache - Uncached memory copy with exception handling
@@ -364,7 +364,7 @@ ENTRY(__copy_user_nocache)
        movl %ecx,%edx
 .L_fixup_handle_tail:
        sfence
-       jmp copy_user_handle_tail
+       jmp .Lcopy_user_handle_tail
        .previous
 
        _ASM_EXTABLE_UA(1b, .L_fixup_4x8b_copy)
index 304f958c27b29cd54aed4f736e5e4601bce5ae94..9578eb88fc878ce265defa0eb89d8ca318c1248a 100644 (file)
@@ -115,7 +115,7 @@ ENDPROC(__get_user_8)
 EXPORT_SYMBOL(__get_user_8)
 
 
-bad_get_user_clac:
+.Lbad_get_user_clac:
        ASM_CLAC
 bad_get_user:
        xor %edx,%edx
@@ -123,7 +123,7 @@ bad_get_user:
        ret
 
 #ifdef CONFIG_X86_32
-bad_get_user_8_clac:
+.Lbad_get_user_8_clac:
        ASM_CLAC
 bad_get_user_8:
        xor %edx,%edx
@@ -132,12 +132,12 @@ bad_get_user_8:
        ret
 #endif
 
-       _ASM_EXTABLE_UA(1b, bad_get_user_clac)
-       _ASM_EXTABLE_UA(2b, bad_get_user_clac)
-       _ASM_EXTABLE_UA(3b, bad_get_user_clac)
+       _ASM_EXTABLE_UA(1b, .Lbad_get_user_clac)
+       _ASM_EXTABLE_UA(2b, .Lbad_get_user_clac)
+       _ASM_EXTABLE_UA(3b, .Lbad_get_user_clac)
 #ifdef CONFIG_X86_64
-       _ASM_EXTABLE_UA(4b, bad_get_user_clac)
+       _ASM_EXTABLE_UA(4b, .Lbad_get_user_clac)
 #else
-       _ASM_EXTABLE_UA(4b, bad_get_user_8_clac)
-       _ASM_EXTABLE_UA(5b, bad_get_user_8_clac)
+       _ASM_EXTABLE_UA(4b, .Lbad_get_user_8_clac)
+       _ASM_EXTABLE_UA(5b, .Lbad_get_user_8_clac)
 #endif
index 14bf78341d3c731ab3d3e242003c21dbd8c33a50..126dd6a9ec9b54c69e7ab2e25ee6ecae2151ee34 100644 (file)
@@ -37,7 +37,7 @@
 ENTRY(__put_user_1)
        ENTER
        cmp TASK_addr_limit(%_ASM_BX),%_ASM_CX
-       jae bad_put_user
+       jae .Lbad_put_user
        ASM_STAC
 1:     movb %al,(%_ASM_CX)
        xor %eax,%eax
@@ -51,7 +51,7 @@ ENTRY(__put_user_2)
        mov TASK_addr_limit(%_ASM_BX),%_ASM_BX
        sub $1,%_ASM_BX
        cmp %_ASM_BX,%_ASM_CX
-       jae bad_put_user
+       jae .Lbad_put_user
        ASM_STAC
 2:     movw %ax,(%_ASM_CX)
        xor %eax,%eax
@@ -65,7 +65,7 @@ ENTRY(__put_user_4)
        mov TASK_addr_limit(%_ASM_BX),%_ASM_BX
        sub $3,%_ASM_BX
        cmp %_ASM_BX,%_ASM_CX
-       jae bad_put_user
+       jae .Lbad_put_user
        ASM_STAC
 3:     movl %eax,(%_ASM_CX)
        xor %eax,%eax
@@ -79,7 +79,7 @@ ENTRY(__put_user_8)
        mov TASK_addr_limit(%_ASM_BX),%_ASM_BX
        sub $7,%_ASM_BX
        cmp %_ASM_BX,%_ASM_CX
-       jae bad_put_user
+       jae .Lbad_put_user
        ASM_STAC
 4:     mov %_ASM_AX,(%_ASM_CX)
 #ifdef CONFIG_X86_32
@@ -91,16 +91,16 @@ ENTRY(__put_user_8)
 ENDPROC(__put_user_8)
 EXPORT_SYMBOL(__put_user_8)
 
-bad_put_user_clac:
+.Lbad_put_user_clac:
        ASM_CLAC
-bad_put_user:
+.Lbad_put_user:
        movl $-EFAULT,%eax
        RET
 
-       _ASM_EXTABLE_UA(1b, bad_put_user_clac)
-       _ASM_EXTABLE_UA(2b, bad_put_user_clac)
-       _ASM_EXTABLE_UA(3b, bad_put_user_clac)
-       _ASM_EXTABLE_UA(4b, bad_put_user_clac)
+       _ASM_EXTABLE_UA(1b, .Lbad_put_user_clac)
+       _ASM_EXTABLE_UA(2b, .Lbad_put_user_clac)
+       _ASM_EXTABLE_UA(3b, .Lbad_put_user_clac)
+       _ASM_EXTABLE_UA(4b, .Lbad_put_user_clac)
 #ifdef CONFIG_X86_32
-       _ASM_EXTABLE_UA(5b, bad_put_user_clac)
+       _ASM_EXTABLE_UA(5b, .Lbad_put_user_clac)
 #endif