crypto: x86/sha - load modules based on CPU features
authorRoxana Nicolescu <roxana.nicolescu@canonical.com>
Fri, 15 Sep 2023 10:23:25 +0000 (12:23 +0200)
committerHerbert Xu <herbert@gondor.apana.org.au>
Wed, 20 Sep 2023 05:15:54 +0000 (13:15 +0800)
x86 optimized crypto modules are built as modules rather than build-in and
they are not loaded when the crypto API is initialized, resulting in the
generic builtin module (sha1-generic) being used instead.

It was discovered when creating a sha1/sha256 checksum of a 2Gb file by
using kcapi-tools because it would take significantly longer than creating
a sha512 checksum of the same file. trace-cmd showed that for sha1/256 the
generic module was used, whereas for sha512 the optimized module was used
instead.

Add module aliases() for these x86 optimized crypto modules based on CPU
feature bits so udev gets a chance to load them later in the boot
process. This resulted in ~3x decrease in the real-time execution of
kcapi-dsg.

Fix is inspired from commit
aa031b8f702e ("crypto: x86/sha512 - load based on CPU features")
where a similar fix was done for sha512.

Cc: stable@vger.kernel.org # 5.15+
Suggested-by: Dimitri John Ledkov <dimitri.ledkov@canonical.com>
Suggested-by: Julian Andres Klode <julian.klode@canonical.com>
Signed-off-by: Roxana Nicolescu <roxana.nicolescu@canonical.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
arch/x86/crypto/sha1_ssse3_glue.c
arch/x86/crypto/sha256_ssse3_glue.c

index 44340a1139e0b7cd57be7ee46491199be33ecd31..959afa705e95ca16699df719964bf222a10784e8 100644 (file)
 #include <linux/types.h>
 #include <crypto/sha1.h>
 #include <crypto/sha1_base.h>
+#include <asm/cpu_device_id.h>
 #include <asm/simd.h>
 
+static const struct x86_cpu_id module_cpu_ids[] = {
+       X86_MATCH_FEATURE(X86_FEATURE_AVX2, NULL),
+       X86_MATCH_FEATURE(X86_FEATURE_AVX, NULL),
+       X86_MATCH_FEATURE(X86_FEATURE_SSSE3, NULL),
+       {}
+};
+MODULE_DEVICE_TABLE(x86cpu, module_cpu_ids);
+
 static int sha1_update(struct shash_desc *desc, const u8 *data,
                             unsigned int len, sha1_block_fn *sha1_xform)
 {
@@ -301,6 +310,9 @@ static inline void unregister_sha1_ni(void) { }
 
 static int __init sha1_ssse3_mod_init(void)
 {
+       if (!x86_match_cpu(module_cpu_ids))
+               return -ENODEV;
+
        if (register_sha1_ssse3())
                goto fail;
 
index 3a5f6be7dbba4e5af1cc7e0103ff6cc9ebbf73b2..d25235f0ccafc3dbb66b79e6453284e4147fc991 100644 (file)
 #include <crypto/sha2.h>
 #include <crypto/sha256_base.h>
 #include <linux/string.h>
+#include <asm/cpu_device_id.h>
 #include <asm/simd.h>
 
 asmlinkage void sha256_transform_ssse3(struct sha256_state *state,
                                       const u8 *data, int blocks);
 
+static const struct x86_cpu_id module_cpu_ids[] = {
+       X86_MATCH_FEATURE(X86_FEATURE_AVX2, NULL),
+       X86_MATCH_FEATURE(X86_FEATURE_AVX, NULL),
+       X86_MATCH_FEATURE(X86_FEATURE_SSSE3, NULL),
+       {}
+};
+MODULE_DEVICE_TABLE(x86cpu, module_cpu_ids);
+
 static int _sha256_update(struct shash_desc *desc, const u8 *data,
                          unsigned int len, sha256_block_fn *sha256_xform)
 {
@@ -366,6 +375,9 @@ static inline void unregister_sha256_ni(void) { }
 
 static int __init sha256_ssse3_mod_init(void)
 {
+       if (!x86_match_cpu(module_cpu_ids))
+               return -ENODEV;
+
        if (register_sha256_ssse3())
                goto fail;