powerpc/vdso: Use builtin symbols to locate fixup section
authorChristophe Leroy <christophe.leroy@csgroup.eu>
Sun, 27 Sep 2020 09:16:34 +0000 (09:16 +0000)
committerMichael Ellerman <mpe@ellerman.id.au>
Thu, 3 Dec 2020 14:01:17 +0000 (01:01 +1100)
Add builtin symbols to locate fixup section and use them
instead of locating sections through elf headers at runtime.

Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/2954526981859ca1ccfcfc7a7c4263920e9ddfcb.1601197618.git.christophe.leroy@csgroup.eu
arch/powerpc/kernel/vdso.c
arch/powerpc/kernel/vdso32/vdso32.lds.S
arch/powerpc/kernel/vdso64/vdso64.lds.S

index e10bc0d9856cc9a9d06908d0335f9aeb24aaffca..27449202c1d74ad14ff6e41dc581c41ede5b4579 100644 (file)
@@ -434,6 +434,12 @@ static int __init vdso_do_func_patch64(struct lib32_elfinfo *v32,
 
 #endif /* CONFIG_PPC64 */
 
+#define VDSO_DO_FIXUPS(type, value, bits, sec) do {                                    \
+       void *__start = (void *)VDSO##bits##_SYMBOL(&vdso##bits##_start, sec##_start);  \
+       void *__end = (void *)VDSO##bits##_SYMBOL(&vdso##bits##_start, sec##_end);      \
+                                                                                       \
+       do_##type##_fixups((value), __start, __end);                                    \
+} while (0)
 
 static __init int vdso_do_find_sections(struct lib32_elfinfo *v32,
                                        struct lib64_elfinfo *v64)
@@ -530,53 +536,20 @@ static __init int vdso_fixup_datapage(struct lib32_elfinfo *v32,
 static __init int vdso_fixup_features(struct lib32_elfinfo *v32,
                                      struct lib64_elfinfo *v64)
 {
-       unsigned long size;
-       void *start;
-
 #ifdef CONFIG_PPC64
-       start = find_section64(v64->hdr, "__ftr_fixup", &size);
-       if (start)
-               do_feature_fixups(cur_cpu_spec->cpu_features,
-                                 start, start + size);
-
-       start = find_section64(v64->hdr, "__mmu_ftr_fixup", &size);
-       if (start)
-               do_feature_fixups(cur_cpu_spec->mmu_features,
-                                 start, start + size);
-
-       start = find_section64(v64->hdr, "__fw_ftr_fixup", &size);
-       if (start)
-               do_feature_fixups(powerpc_firmware_features,
-                                 start, start + size);
-
-       start = find_section64(v64->hdr, "__lwsync_fixup", &size);
-       if (start)
-               do_lwsync_fixups(cur_cpu_spec->cpu_features,
-                                start, start + size);
+       VDSO_DO_FIXUPS(feature, cur_cpu_spec->cpu_features, 64, ftr_fixup);
+       VDSO_DO_FIXUPS(feature, cur_cpu_spec->mmu_features, 64, mmu_ftr_fixup);
+       VDSO_DO_FIXUPS(feature, powerpc_firmware_features, 64, fw_ftr_fixup);
+       VDSO_DO_FIXUPS(lwsync, cur_cpu_spec->cpu_features, 64, lwsync_fixup);
 #endif /* CONFIG_PPC64 */
 
 #ifdef CONFIG_VDSO32
-       start = find_section32(v32->hdr, "__ftr_fixup", &size);
-       if (start)
-               do_feature_fixups(cur_cpu_spec->cpu_features,
-                                 start, start + size);
-
-       start = find_section32(v32->hdr, "__mmu_ftr_fixup", &size);
-       if (start)
-               do_feature_fixups(cur_cpu_spec->mmu_features,
-                                 start, start + size);
-
+       VDSO_DO_FIXUPS(feature, cur_cpu_spec->cpu_features, 32, ftr_fixup);
+       VDSO_DO_FIXUPS(feature, cur_cpu_spec->mmu_features, 32, mmu_ftr_fixup);
 #ifdef CONFIG_PPC64
-       start = find_section32(v32->hdr, "__fw_ftr_fixup", &size);
-       if (start)
-               do_feature_fixups(powerpc_firmware_features,
-                                 start, start + size);
+       VDSO_DO_FIXUPS(feature, powerpc_firmware_features, 32, fw_ftr_fixup);
 #endif /* CONFIG_PPC64 */
-
-       start = find_section32(v32->hdr, "__lwsync_fixup", &size);
-       if (start)
-               do_lwsync_fixups(cur_cpu_spec->cpu_features,
-                                start, start + size);
+       VDSO_DO_FIXUPS(lwsync, cur_cpu_spec->cpu_features, 32, lwsync_fixup);
 #endif
 
        return 0;
index 078d75c0cd249d188b6dd21bd3d974a546966c7f..dc62772f028cef4948e4392890d160390514ba06 100644 (file)
@@ -38,17 +38,25 @@ SECTIONS
        PROVIDE(etext = .);
 
        . = ALIGN(8);
+       VDSO_ftr_fixup_start = .;
        __ftr_fixup     : { *(__ftr_fixup) }
+       VDSO_ftr_fixup_end = .;
 
        . = ALIGN(8);
+       VDSO_mmu_ftr_fixup_start = .;
        __mmu_ftr_fixup : { *(__mmu_ftr_fixup) }
+       VDSO_mmu_ftr_fixup_end = .;
 
        . = ALIGN(8);
+       VDSO_lwsync_fixup_start = .;
        __lwsync_fixup  : { *(__lwsync_fixup) }
+       VDSO_lwsync_fixup_end = .;
 
 #ifdef CONFIG_PPC64
        . = ALIGN(8);
+       VDSO_fw_ftr_fixup_start = .;
        __fw_ftr_fixup  : { *(__fw_ftr_fixup) }
+       VDSO_fw_ftr_fixup_end = .;
 #endif
 
        /*
index 1f06e4f730a8e4635dc6f15d8034a93d2d811ec0..913d34e8bd052411edcf2be5651d6982db597038 100644 (file)
@@ -39,16 +39,24 @@ SECTIONS
        PROVIDE(etext = .);
 
        . = ALIGN(8);
+       VDSO_ftr_fixup_start = .;
        __ftr_fixup     : { *(__ftr_fixup) }
+       VDSO_ftr_fixup_end = .;
 
        . = ALIGN(8);
+       VDSO_mmu_ftr_fixup_start = .;
        __mmu_ftr_fixup : { *(__mmu_ftr_fixup) }
+       VDSO_mmu_ftr_fixup_end = .;
 
        . = ALIGN(8);
+       VDSO_lwsync_fixup_start = .;
        __lwsync_fixup  : { *(__lwsync_fixup) }
+       VDSO_lwsync_fixup_end = .;
 
        . = ALIGN(8);
+       VDSO_fw_ftr_fixup_start = .;
        __fw_ftr_fixup  : { *(__fw_ftr_fixup) }
+       VDSO_fw_ftr_fixup_end = .;
 
        /*
         * Other stuff is appended to the text segment: