riscv: lib: Introduce has_fast_unaligned_access()
authorCharlie Jenkins <charlie@rivosinc.com>
Fri, 8 Mar 2024 18:25:55 +0000 (10:25 -0800)
committerPalmer Dabbelt <palmer@rivosinc.com>
Wed, 13 Mar 2024 14:30:28 +0000 (07:30 -0700)
Create has_fast_unaligned_access to avoid needing to explicitly check
the fast_misaligned_access_speed_key static key.

Signed-off-by: Charlie Jenkins <charlie@rivosinc.com>
Reviewed-by: Evan Green <evan@rivosinc.com>
Reviewed-by: Conor Dooley <conor.dooley@microchip.com>
Tested-by: Samuel Holland <samuel.holland@sifive.com>
Link: https://lore.kernel.org/r/20240308-disable_misaligned_probe_config-v9-1-a388770ba0ce@rivosinc.com
Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
arch/riscv/include/asm/cpufeature.h
arch/riscv/kernel/cpufeature.c
arch/riscv/lib/csum.c

index 5a626ed2c47a8915b3848df2e7f4a7ea0601bd71..466e1f591919bf9a4e320ee08ccadb31201897a4 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright 2022-2023 Rivos, Inc
+ * Copyright 2022-2024 Rivos, Inc
  */
 
 #ifndef _ASM_CPUFEATURE_H
@@ -53,6 +53,13 @@ static inline bool check_unaligned_access_emulated(int cpu)
 static inline void unaligned_emulation_finish(void) {}
 #endif
 
+DECLARE_STATIC_KEY_FALSE(fast_unaligned_access_speed_key);
+
+static __always_inline bool has_fast_unaligned_accesses(void)
+{
+       return static_branch_likely(&fast_unaligned_access_speed_key);
+}
+
 unsigned long riscv_get_elf_hwcap(void);
 
 struct riscv_isa_ext_data {
@@ -135,6 +142,4 @@ static __always_inline bool riscv_cpu_has_extension_unlikely(int cpu, const unsi
        return __riscv_isa_extension_available(hart_isa[cpu].isa, ext);
 }
 
-DECLARE_STATIC_KEY_FALSE(fast_misaligned_access_speed_key);
-
 #endif
index 89920f84d0a34385471e9afbf9c26d287cbbd838..7878cddccc0de1c052c82f5e60f865481f39a511 100644 (file)
@@ -810,14 +810,14 @@ static void check_unaligned_access_nonboot_cpu(void *param)
                check_unaligned_access(pages[cpu]);
 }
 
-DEFINE_STATIC_KEY_FALSE(fast_misaligned_access_speed_key);
+DEFINE_STATIC_KEY_FALSE(fast_unaligned_access_speed_key);
 
 static void modify_unaligned_access_branches(cpumask_t *mask, int weight)
 {
        if (cpumask_weight(mask) == weight)
-               static_branch_enable_cpuslocked(&fast_misaligned_access_speed_key);
+               static_branch_enable_cpuslocked(&fast_unaligned_access_speed_key);
        else
-               static_branch_disable_cpuslocked(&fast_misaligned_access_speed_key);
+               static_branch_disable_cpuslocked(&fast_unaligned_access_speed_key);
 }
 
 static void set_unaligned_access_static_branches_except_cpu(int cpu)
index af3df5274ccbae0118488080040f45881a3e025a..7178e0acfa22841da893fad4ae4079ccf350e44d 100644 (file)
@@ -3,7 +3,7 @@
  * Checksum library
  *
  * Influenced by arch/arm64/lib/csum.c
- * Copyright (C) 2023 Rivos Inc.
+ * Copyright (C) 2023-2024 Rivos Inc.
  */
 #include <linux/bitops.h>
 #include <linux/compiler.h>
@@ -318,10 +318,7 @@ unsigned int do_csum(const unsigned char *buff, int len)
         * branches. The largest chunk of overlap was delegated into the
         * do_csum_common function.
         */
-       if (static_branch_likely(&fast_misaligned_access_speed_key))
-               return do_csum_no_alignment(buff, len);
-
-       if (((unsigned long)buff & OFFSET_MASK) == 0)
+       if (has_fast_unaligned_accesses() || (((unsigned long)buff & OFFSET_MASK) == 0))
                return do_csum_no_alignment(buff, len);
 
        return do_csum_with_alignment(buff, len);