From: Huacai Chen Date: Sat, 25 Feb 2023 07:52:56 +0000 (+0800) Subject: LoongArch: Make -mstrict-align configurable X-Git-Url: http://git.maquefel.me/?a=commitdiff_plain;h=41596803302d83a67a80dc1efef4e51ac46acabb;p=linux.git LoongArch: Make -mstrict-align configurable Introduce Kconfig option ARCH_STRICT_ALIGN to make -mstrict-align be configurable. Not all LoongArch cores support h/w unaligned access, we can use the -mstrict-align build parameter to prevent unaligned accesses. CPUs with h/w unaligned access support: Loongson-2K2000/2K3000/3A5000/3C5000/3D5000. CPUs without h/w unaligned access support: Loongson-2K500/2K1000. This option is enabled by default to make the kernel be able to run on all LoongArch systems. But you can disable it manually if you want to run kernel only on systems with h/w unaligned access support in order to optimise for performance. Reviewed-by: Arnd Bergmann Signed-off-by: Huacai Chen --- diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig index 9cc8b84f7eb03..0c1c6063cc661 100644 --- a/arch/loongarch/Kconfig +++ b/arch/loongarch/Kconfig @@ -94,6 +94,7 @@ config LOONGARCH select HAVE_DYNAMIC_FTRACE_WITH_ARGS select HAVE_DYNAMIC_FTRACE_WITH_REGS select HAVE_EBPF_JIT + select HAVE_EFFICIENT_UNALIGNED_ACCESS if !ARCH_STRICT_ALIGN select HAVE_EXIT_THREAD select HAVE_FAST_GUP select HAVE_FTRACE_MCOUNT_RECORD @@ -441,6 +442,24 @@ config ARCH_IOREMAP protection support. However, you can enable LoongArch DMW-based ioremap() for better performance. +config ARCH_STRICT_ALIGN + bool "Enable -mstrict-align to prevent unaligned accesses" if EXPERT + default y + help + Not all LoongArch cores support h/w unaligned access, we can use + -mstrict-align build parameter to prevent unaligned accesses. + + CPUs with h/w unaligned access support: + Loongson-2K2000/2K3000/3A5000/3C5000/3D5000. + + CPUs without h/w unaligned access support: + Loongson-2K500/2K1000. + + This option is enabled by default to make the kernel be able to run + on all LoongArch systems. But you can disable it manually if you want + to run kernel only on systems with h/w unaligned access support in + order to optimise for performance. + config KEXEC bool "Kexec system call" select KEXEC_CORE diff --git a/arch/loongarch/Makefile b/arch/loongarch/Makefile index 4402387d27551..6e1c931a8507e 100644 --- a/arch/loongarch/Makefile +++ b/arch/loongarch/Makefile @@ -91,10 +91,15 @@ KBUILD_CPPFLAGS += -DVMLINUX_LOAD_ADDRESS=$(load-y) # instead of .eh_frame so we don't discard them. KBUILD_CFLAGS += -fno-asynchronous-unwind-tables +ifdef CONFIG_ARCH_STRICT_ALIGN # Don't emit unaligned accesses. # Not all LoongArch cores support unaligned access, and as kernel we can't # rely on others to provide emulation for these accesses. KBUILD_CFLAGS += $(call cc-option,-mstrict-align) +else +# Optimise for performance on hardware supports unaligned access. +KBUILD_CFLAGS += $(call cc-option,-mno-strict-align) +endif KBUILD_CFLAGS += -isystem $(shell $(CC) -print-file-name=include) diff --git a/arch/loongarch/kernel/Makefile b/arch/loongarch/kernel/Makefile index c8cfbd562921d..df5dbabfe7a6c 100644 --- a/arch/loongarch/kernel/Makefile +++ b/arch/loongarch/kernel/Makefile @@ -8,13 +8,15 @@ extra-y := vmlinux.lds obj-y += head.o cpu-probe.o cacheinfo.o env.o setup.o entry.o genex.o \ traps.o irq.o idle.o process.o dma.o mem.o io.o reset.o switch.o \ elf.o syscall.o signal.o time.o topology.o inst.o ptrace.o vdso.o \ - alternative.o unaligned.o unwind.o + alternative.o unwind.o obj-$(CONFIG_ACPI) += acpi.o obj-$(CONFIG_EFI) += efi.o obj-$(CONFIG_CPU_HAS_FPU) += fpu.o +obj-$(CONFIG_ARCH_STRICT_ALIGN) += unaligned.o + ifdef CONFIG_FUNCTION_TRACER ifndef CONFIG_DYNAMIC_FTRACE obj-y += mcount.o ftrace.o diff --git a/arch/loongarch/kernel/traps.c b/arch/loongarch/kernel/traps.c index c38a146a973b4..05511203732c3 100644 --- a/arch/loongarch/kernel/traps.c +++ b/arch/loongarch/kernel/traps.c @@ -371,9 +371,14 @@ int no_unaligned_warning __read_mostly = 1; /* Only 1 warning by default */ asmlinkage void noinstr do_ale(struct pt_regs *regs) { - unsigned int *pc; irqentry_state_t state = irqentry_enter(regs); +#ifndef CONFIG_ARCH_STRICT_ALIGN + die_if_kernel("Kernel ale access", regs); + force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr); +#else + unsigned int *pc; + perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, regs->csr_badvaddr); /* @@ -397,8 +402,8 @@ asmlinkage void noinstr do_ale(struct pt_regs *regs) sigbus: die_if_kernel("Kernel ale access", regs); force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr); - out: +#endif irqentry_exit(regs, state); }