x86/asm: Do not annotate functions with GLOBAL
authorJiri Slaby <jslaby@suse.cz>
Fri, 11 Oct 2019 11:50:56 +0000 (13:50 +0200)
committerBorislav Petkov <bp@suse.de>
Fri, 18 Oct 2019 09:25:58 +0000 (11:25 +0200)
GLOBAL is an x86's custom macro and is going to die very soon. It was
meant for global symbols, but here, it was used for functions. Instead,
use the new macros SYM_FUNC_START* and SYM_CODE_START* (depending on the
type of the function) which are dedicated to global functions. And since
they both require a closing by SYM_*_END, do that here too.

startup_64, which does not use GLOBAL but uses .globl explicitly, is
converted too.

"No alignments" are preserved.

Signed-off-by: Jiri Slaby <jslaby@suse.cz>
Signed-off-by: Borislav Petkov <bp@suse.de>
Cc: Allison Randal <allison@lohutok.net>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Cao jin <caoj.fnst@cn.fujitsu.com>
Cc: Enrico Weigelt <info@metux.net>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Juergen Gross <jgross@suse.com>
Cc: Kate Stewart <kstewart@linuxfoundation.org>
Cc: linux-arch@vger.kernel.org
Cc: Maran Wilson <maran.wilson@oracle.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: x86-ml <x86@kernel.org>
Link: https://lkml.kernel.org/r/20191011115108.12392-17-jslaby@suse.cz
arch/x86/boot/copy.S
arch/x86/boot/pmjump.S
arch/x86/kernel/head_64.S

index 4c5f4f4ad035ef3d5ee939d570baf8d8b442e07d..6afd05e819d26831d739bb6c6aba27f4ba0c2137 100644 (file)
@@ -15,7 +15,7 @@
        .code16
        .text
 
-GLOBAL(memcpy)
+SYM_FUNC_START_NOALIGN(memcpy)
        pushw   %si
        pushw   %di
        movw    %ax, %di
@@ -29,9 +29,9 @@ GLOBAL(memcpy)
        popw    %di
        popw    %si
        retl
-ENDPROC(memcpy)
+SYM_FUNC_END(memcpy)
 
-GLOBAL(memset)
+SYM_FUNC_START_NOALIGN(memset)
        pushw   %di
        movw    %ax, %di
        movzbl  %dl, %eax
@@ -44,22 +44,22 @@ GLOBAL(memset)
        rep; stosb
        popw    %di
        retl
-ENDPROC(memset)
+SYM_FUNC_END(memset)
 
-GLOBAL(copy_from_fs)
+SYM_FUNC_START_NOALIGN(copy_from_fs)
        pushw   %ds
        pushw   %fs
        popw    %ds
        calll   memcpy
        popw    %ds
        retl
-ENDPROC(copy_from_fs)
+SYM_FUNC_END(copy_from_fs)
 
-GLOBAL(copy_to_fs)
+SYM_FUNC_START_NOALIGN(copy_to_fs)
        pushw   %es
        pushw   %fs
        popw    %es
        calll   memcpy
        popw    %es
        retl
-ENDPROC(copy_to_fs)
+SYM_FUNC_END(copy_to_fs)
index 81658fe353808738bb2eb0996a61d0a520bc66ab..cbec8bd0841fa7f35b4a8514496ad83ac1c0473b 100644 (file)
@@ -21,7 +21,7 @@
 /*
  * void protected_mode_jump(u32 entrypoint, u32 bootparams);
  */
-GLOBAL(protected_mode_jump)
+SYM_FUNC_START_NOALIGN(protected_mode_jump)
        movl    %edx, %esi              # Pointer to boot_params table
 
        xorl    %ebx, %ebx
@@ -42,7 +42,7 @@ GLOBAL(protected_mode_jump)
        .byte   0x66, 0xea              # ljmpl opcode
 2:     .long   .Lin_pm32               # offset
        .word   __BOOT_CS               # segment
-ENDPROC(protected_mode_jump)
+SYM_FUNC_END(protected_mode_jump)
 
        .code32
        .section ".text32","ax"
index 8b0926ac4ac6a9131f0816b6c18b1cf93640fb42..10f306e312441c42e68ecbd60e4440fcd5449848 100644 (file)
@@ -49,8 +49,7 @@ L3_START_KERNEL = pud_index(__START_KERNEL_map)
        .text
        __HEAD
        .code64
-       .globl startup_64
-startup_64:
+SYM_CODE_START_NOALIGN(startup_64)
        UNWIND_HINT_EMPTY
        /*
         * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,
@@ -90,6 +89,8 @@ startup_64:
        /* Form the CR3 value being sure to include the CR3 modifier */
        addq    $(early_top_pgt - __START_KERNEL_map), %rax
        jmp 1f
+SYM_CODE_END(startup_64)
+
 ENTRY(secondary_startup_64)
        UNWIND_HINT_EMPTY
        /*