arm64: use mov_q instead of literal ldr
authorRemi Denis-Courmont <remi.denis.courmont@huawei.com>
Wed, 4 Mar 2020 09:36:31 +0000 (11:36 +0200)
committerCatalin Marinas <catalin.marinas@arm.com>
Tue, 24 Mar 2020 11:48:24 +0000 (11:48 +0000)
In practice, this requires only 2 instructions, or even only 1 for
the idmap_pg_dir size (with 4 or 64 KiB pages). Only the MAIR values
needed more than 2 instructions and it was already converted to mov_q
by 95b3f74bec203804658e17f86fe20755bb8abcb9.

Signed-off-by: Remi Denis-Courmont <remi.denis.courmont@huawei.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Mark Rutland <mark.rutland@arm.com>
arch/arm64/kernel/cpu-reset.S
arch/arm64/kernel/hyp-stub.S
arch/arm64/kernel/relocate_kernel.S
arch/arm64/kvm/hyp-init.S
arch/arm64/mm/proc.S

index 32c7bf858dd9af47ccc46bba98f10674ce84046a..38087b4c04322414d59e9649bef7ec0bef71230b 100644 (file)
@@ -32,7 +32,7 @@
 ENTRY(__cpu_soft_restart)
        /* Clear sctlr_el1 flags. */
        mrs     x12, sctlr_el1
-       ldr     x13, =SCTLR_ELx_FLAGS
+       mov_q   x13, SCTLR_ELx_FLAGS
        bic     x12, x12, x13
        pre_disable_mmu_workaround
        msr     sctlr_el1, x12
index 73d46070b31500117ade72ea1d2a6c2ed31feb2b..e473ead806ed0953f9ebeb3879e9e8af3822b548 100644 (file)
@@ -63,7 +63,7 @@ el1_sync:
        beq     9f                              // Nothing to reset!
 
        /* Someone called kvm_call_hyp() against the hyp-stub... */
-       ldr     x0, =HVC_STUB_ERR
+       mov_q   x0, HVC_STUB_ERR
        eret
 
 9:     mov     x0, xzr
index c1d7db71a7269c622c11f181ec5929a5fa351a7f..c40ce496c78b0ea2445b8aadd94ceb9aaaae7452 100644 (file)
@@ -41,7 +41,7 @@ ENTRY(arm64_relocate_new_kernel)
        cmp     x0, #CurrentEL_EL2
        b.ne    1f
        mrs     x0, sctlr_el2
-       ldr     x1, =SCTLR_ELx_FLAGS
+       mov_q   x1, SCTLR_ELx_FLAGS
        bic     x0, x0, x1
        pre_disable_mmu_workaround
        msr     sctlr_el2, x0
@@ -113,8 +113,6 @@ ENTRY(arm64_relocate_new_kernel)
 
 ENDPROC(arm64_relocate_new_kernel)
 
-.ltorg
-
 .align 3       /* To keep the 64-bit values below naturally aligned. */
 
 .Lcopy_end:
index 84f32cf5abc7e4425e165a1ad6897b5c282d2fb9..6e6ed5581eed157220a1b1ffcd660da21f38d62e 100644 (file)
@@ -60,7 +60,7 @@ alternative_else_nop_endif
        msr     ttbr0_el2, x4
 
        mrs     x4, tcr_el1
-       ldr     x5, =TCR_EL2_MASK
+       mov_q   x5, TCR_EL2_MASK
        and     x4, x4, x5
        mov     x5, #TCR_EL2_RES1
        orr     x4, x4, x5
@@ -102,7 +102,7 @@ alternative_else_nop_endif
         * as well as the EE bit on BE. Drop the A flag since the compiler
         * is allowed to generate unaligned accesses.
         */
-       ldr     x4, =(SCTLR_EL2_RES1 | (SCTLR_ELx_FLAGS & ~SCTLR_ELx_A))
+       mov_q   x4, (SCTLR_EL2_RES1 | (SCTLR_ELx_FLAGS & ~SCTLR_ELx_A))
 CPU_BE(        orr     x4, x4, #SCTLR_ELx_EE)
        msr     sctlr_el2, x4
        isb
@@ -142,7 +142,7 @@ reset:
         * case we coming via HVC_SOFT_RESTART.
         */
        mrs     x5, sctlr_el2
-       ldr     x6, =SCTLR_ELx_FLAGS
+       mov_q   x6, SCTLR_ELx_FLAGS
        bic     x5, x5, x6              // Clear SCTL_M and etc
        pre_disable_mmu_workaround
        msr     sctlr_el2, x5
@@ -155,11 +155,9 @@ reset:
        eret
 
 1:     /* Bad stub call */
-       ldr     x0, =HVC_STUB_ERR
+       mov_q   x0, HVC_STUB_ERR
        eret
 
 SYM_CODE_END(__kvm_handle_stub_hvc)
 
-       .ltorg
-
        .popsection
index aafed690241147f661b000532d4d0b788b37b04d..eb2ad57538873ef878323b76aeccab646606cf2e 100644 (file)
@@ -436,7 +436,7 @@ SYM_FUNC_START(__cpu_setup)
         * Set/prepare TCR and TTBR. We use 512GB (39-bit) address range for
         * both user and kernel.
         */
-       ldr     x10, =TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \
+       mov_q   x10, TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \
                        TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \
                        TCR_TBI0 | TCR_A1 | TCR_KASAN_FLAGS
        tcr_clear_errata_bits x10, x9, x5