Revert "crypto: remove CONFIG_CRYPTO_STATS"
authorHerbert Xu <herbert@gondor.apana.org.au>
Wed, 13 Mar 2024 01:49:37 +0000 (09:49 +0800)
committerHerbert Xu <herbert@gondor.apana.org.au>
Wed, 13 Mar 2024 01:49:37 +0000 (09:49 +0800)
This reverts commit 2beb81fbf0c01a62515a1bcef326168494ee2bd0.

While removing CONFIG_CRYPTO_STATS is a worthy goal, this also
removed unrelated infrastructure such as crypto_comp_alg_common.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
33 files changed:
arch/s390/configs/debug_defconfig
arch/s390/configs/defconfig
crypto/Kconfig
crypto/Makefile
crypto/acompress.c
crypto/aead.c
crypto/ahash.c
crypto/akcipher.c
crypto/compress.h
crypto/crypto_user.c [deleted file]
crypto/crypto_user_base.c [new file with mode: 0644]
crypto/crypto_user_stat.c [new file with mode: 0644]
crypto/hash.h
crypto/kpp.c
crypto/lskcipher.c
crypto/rng.c
crypto/scompress.c
crypto/shash.c
crypto/sig.c
crypto/skcipher.c
crypto/skcipher.h
include/crypto/acompress.h
include/crypto/aead.h
include/crypto/akcipher.h
include/crypto/algapi.h
include/crypto/hash.h
include/crypto/internal/acompress.h
include/crypto/internal/cryptouser.h [new file with mode: 0644]
include/crypto/internal/scompress.h
include/crypto/kpp.h
include/crypto/rng.h
include/crypto/skcipher.h
include/uapi/linux/cryptouser.h

index 063f0c11087dda6ecf6c3333fa73bb5e38cabb9b..cae2dd34fbb49d16ee020e72fb669010dca832f8 100644 (file)
@@ -766,6 +766,7 @@ CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
 CONFIG_CRYPTO_USER_API_RNG=m
 CONFIG_CRYPTO_USER_API_AEAD=m
+CONFIG_CRYPTO_STATS=y
 CONFIG_CRYPTO_CRC32_S390=y
 CONFIG_CRYPTO_SHA512_S390=m
 CONFIG_CRYPTO_SHA1_S390=m
index ab608ce768b7bde4e53c1be521e8327fec2b3eb3..42b988873e5443df15b054d78610697fdf769293 100644 (file)
@@ -752,6 +752,7 @@ CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
 CONFIG_CRYPTO_USER_API_RNG=m
 CONFIG_CRYPTO_USER_API_AEAD=m
+CONFIG_CRYPTO_STATS=y
 CONFIG_CRYPTO_CRC32_S390=y
 CONFIG_CRYPTO_SHA512_S390=m
 CONFIG_CRYPTO_SHA1_S390=m
index f937142aa94d62370736dcfe72a3988c54754b1c..44661c2e30ca5de9387d4c09202a588b6f57e64f 100644 (file)
@@ -1456,6 +1456,26 @@ config CRYPTO_USER_API_ENABLE_OBSOLETE
          already been phased out from internal use by the kernel, and are
          only useful for userspace clients that still rely on them.
 
+config CRYPTO_STATS
+       bool "Crypto usage statistics"
+       depends on CRYPTO_USER
+       help
+         Enable the gathering of crypto stats.
+
+         Enabling this option reduces the performance of the crypto API.  It
+         should only be enabled when there is actually a use case for it.
+
+         This collects data sizes, numbers of requests, and numbers
+         of errors processed by:
+         - AEAD ciphers (encrypt, decrypt)
+         - asymmetric key ciphers (encrypt, decrypt, verify, sign)
+         - symmetric key ciphers (encrypt, decrypt)
+         - compression algorithms (compress, decompress)
+         - hash algorithms (hash)
+         - key-agreement protocol primitives (setsecret, generate
+           public key, compute shared secret)
+         - RNG (generate, seed)
+
 endmenu
 
 config CRYPTO_HASH_INFO
index de9a3312a2c848517f317ad114ea11541306610f..408f0a1f9ab91b84195a2b629d6704bc8c18db39 100644 (file)
@@ -69,6 +69,8 @@ cryptomgr-y := algboss.o testmgr.o
 
 obj-$(CONFIG_CRYPTO_MANAGER2) += cryptomgr.o
 obj-$(CONFIG_CRYPTO_USER) += crypto_user.o
+crypto_user-y := crypto_user_base.o
+crypto_user-$(CONFIG_CRYPTO_STATS) += crypto_user_stat.o
 obj-$(CONFIG_CRYPTO_CMAC) += cmac.o
 obj-$(CONFIG_CRYPTO_HMAC) += hmac.o
 obj-$(CONFIG_CRYPTO_VMAC) += vmac.o
index 484a865b23cd8ca557106277e799ffb9c200aed2..1c682810a484dcdf3368ef61197e6262926e75dd 100644 (file)
@@ -25,7 +25,7 @@ static const struct crypto_type crypto_acomp_type;
 
 static inline struct acomp_alg *__crypto_acomp_alg(struct crypto_alg *alg)
 {
-       return container_of(alg, struct acomp_alg, base);
+       return container_of(alg, struct acomp_alg, calg.base);
 }
 
 static inline struct acomp_alg *crypto_acomp_alg(struct crypto_acomp *tfm)
@@ -93,6 +93,32 @@ static unsigned int crypto_acomp_extsize(struct crypto_alg *alg)
        return extsize;
 }
 
+static inline int __crypto_acomp_report_stat(struct sk_buff *skb,
+                                            struct crypto_alg *alg)
+{
+       struct comp_alg_common *calg = __crypto_comp_alg_common(alg);
+       struct crypto_istat_compress *istat = comp_get_stat(calg);
+       struct crypto_stat_compress racomp;
+
+       memset(&racomp, 0, sizeof(racomp));
+
+       strscpy(racomp.type, "acomp", sizeof(racomp.type));
+       racomp.stat_compress_cnt = atomic64_read(&istat->compress_cnt);
+       racomp.stat_compress_tlen = atomic64_read(&istat->compress_tlen);
+       racomp.stat_decompress_cnt =  atomic64_read(&istat->decompress_cnt);
+       racomp.stat_decompress_tlen = atomic64_read(&istat->decompress_tlen);
+       racomp.stat_err_cnt = atomic64_read(&istat->err_cnt);
+
+       return nla_put(skb, CRYPTOCFGA_STAT_ACOMP, sizeof(racomp), &racomp);
+}
+
+#ifdef CONFIG_CRYPTO_STATS
+int crypto_acomp_report_stat(struct sk_buff *skb, struct crypto_alg *alg)
+{
+       return __crypto_acomp_report_stat(skb, alg);
+}
+#endif
+
 static const struct crypto_type crypto_acomp_type = {
        .extsize = crypto_acomp_extsize,
        .init_tfm = crypto_acomp_init_tfm,
@@ -101,6 +127,9 @@ static const struct crypto_type crypto_acomp_type = {
 #endif
 #if IS_ENABLED(CONFIG_CRYPTO_USER)
        .report = crypto_acomp_report,
+#endif
+#ifdef CONFIG_CRYPTO_STATS
+       .report_stat = crypto_acomp_report_stat,
 #endif
        .maskclear = ~CRYPTO_ALG_TYPE_MASK,
        .maskset = CRYPTO_ALG_TYPE_ACOMPRESS_MASK,
@@ -153,12 +182,24 @@ void acomp_request_free(struct acomp_req *req)
 }
 EXPORT_SYMBOL_GPL(acomp_request_free);
 
-int crypto_register_acomp(struct acomp_alg *alg)
+void comp_prepare_alg(struct comp_alg_common *alg)
 {
+       struct crypto_istat_compress *istat = comp_get_stat(alg);
        struct crypto_alg *base = &alg->base;
 
-       base->cra_type = &crypto_acomp_type;
        base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
+
+       if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+               memset(istat, 0, sizeof(*istat));
+}
+
+int crypto_register_acomp(struct acomp_alg *alg)
+{
+       struct crypto_alg *base = &alg->calg.base;
+
+       comp_prepare_alg(&alg->calg);
+
+       base->cra_type = &crypto_acomp_type;
        base->cra_flags |= CRYPTO_ALG_TYPE_ACOMPRESS;
 
        return crypto_register_alg(base);
index 0e75a69189df4c662695bdefa6b56bddeafa1bf4..54906633566a2357789d619916ec3a099c935064 100644 (file)
 
 #include "internal.h"
 
+static inline struct crypto_istat_aead *aead_get_stat(struct aead_alg *alg)
+{
+#ifdef CONFIG_CRYPTO_STATS
+       return &alg->stat;
+#else
+       return NULL;
+#endif
+}
+
 static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key,
                            unsigned int keylen)
 {
@@ -81,28 +90,62 @@ int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
 }
 EXPORT_SYMBOL_GPL(crypto_aead_setauthsize);
 
+static inline int crypto_aead_errstat(struct crypto_istat_aead *istat, int err)
+{
+       if (!IS_ENABLED(CONFIG_CRYPTO_STATS))
+               return err;
+
+       if (err && err != -EINPROGRESS && err != -EBUSY)
+               atomic64_inc(&istat->err_cnt);
+
+       return err;
+}
+
 int crypto_aead_encrypt(struct aead_request *req)
 {
        struct crypto_aead *aead = crypto_aead_reqtfm(req);
+       struct aead_alg *alg = crypto_aead_alg(aead);
+       struct crypto_istat_aead *istat;
+       int ret;
+
+       istat = aead_get_stat(alg);
+
+       if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
+               atomic64_inc(&istat->encrypt_cnt);
+               atomic64_add(req->cryptlen, &istat->encrypt_tlen);
+       }
 
        if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY)
-               return -ENOKEY;
+               ret = -ENOKEY;
+       else
+               ret = alg->encrypt(req);
 
-       return crypto_aead_alg(aead)->encrypt(req);
+       return crypto_aead_errstat(istat, ret);
 }
 EXPORT_SYMBOL_GPL(crypto_aead_encrypt);
 
 int crypto_aead_decrypt(struct aead_request *req)
 {
        struct crypto_aead *aead = crypto_aead_reqtfm(req);
+       struct aead_alg *alg = crypto_aead_alg(aead);
+       struct crypto_istat_aead *istat;
+       int ret;
 
-       if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY)
-               return -ENOKEY;
+       istat = aead_get_stat(alg);
 
-       if (req->cryptlen < crypto_aead_authsize(aead))
-               return -EINVAL;
+       if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
+               atomic64_inc(&istat->encrypt_cnt);
+               atomic64_add(req->cryptlen, &istat->encrypt_tlen);
+       }
+
+       if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY)
+               ret = -ENOKEY;
+       else if (req->cryptlen < crypto_aead_authsize(aead))
+               ret = -EINVAL;
+       else
+               ret = alg->decrypt(req);
 
-       return crypto_aead_alg(aead)->decrypt(req);
+       return crypto_aead_errstat(istat, ret);
 }
 EXPORT_SYMBOL_GPL(crypto_aead_decrypt);
 
@@ -172,6 +215,26 @@ static void crypto_aead_free_instance(struct crypto_instance *inst)
        aead->free(aead);
 }
 
+static int __maybe_unused crypto_aead_report_stat(
+       struct sk_buff *skb, struct crypto_alg *alg)
+{
+       struct aead_alg *aead = container_of(alg, struct aead_alg, base);
+       struct crypto_istat_aead *istat = aead_get_stat(aead);
+       struct crypto_stat_aead raead;
+
+       memset(&raead, 0, sizeof(raead));
+
+       strscpy(raead.type, "aead", sizeof(raead.type));
+
+       raead.stat_encrypt_cnt = atomic64_read(&istat->encrypt_cnt);
+       raead.stat_encrypt_tlen = atomic64_read(&istat->encrypt_tlen);
+       raead.stat_decrypt_cnt = atomic64_read(&istat->decrypt_cnt);
+       raead.stat_decrypt_tlen = atomic64_read(&istat->decrypt_tlen);
+       raead.stat_err_cnt = atomic64_read(&istat->err_cnt);
+
+       return nla_put(skb, CRYPTOCFGA_STAT_AEAD, sizeof(raead), &raead);
+}
+
 static const struct crypto_type crypto_aead_type = {
        .extsize = crypto_alg_extsize,
        .init_tfm = crypto_aead_init_tfm,
@@ -181,6 +244,9 @@ static const struct crypto_type crypto_aead_type = {
 #endif
 #if IS_ENABLED(CONFIG_CRYPTO_USER)
        .report = crypto_aead_report,
+#endif
+#ifdef CONFIG_CRYPTO_STATS
+       .report_stat = crypto_aead_report_stat,
 #endif
        .maskclear = ~CRYPTO_ALG_TYPE_MASK,
        .maskset = CRYPTO_ALG_TYPE_MASK,
@@ -211,6 +277,7 @@ EXPORT_SYMBOL_GPL(crypto_has_aead);
 
 static int aead_prepare_alg(struct aead_alg *alg)
 {
+       struct crypto_istat_aead *istat = aead_get_stat(alg);
        struct crypto_alg *base = &alg->base;
 
        if (max3(alg->maxauthsize, alg->ivsize, alg->chunksize) >
@@ -224,6 +291,9 @@ static int aead_prepare_alg(struct aead_alg *alg)
        base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
        base->cra_flags |= CRYPTO_ALG_TYPE_AEAD;
 
+       if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+               memset(istat, 0, sizeof(*istat));
+
        return 0;
 }
 
index bcd9de009a91b6f3a75964c87409fdb4284cc308..0ac83f7f701df283260383672e730f879375ed69 100644 (file)
 
 #define CRYPTO_ALG_TYPE_AHASH_MASK     0x0000000e
 
+static inline struct crypto_istat_hash *ahash_get_stat(struct ahash_alg *alg)
+{
+       return hash_get_stat(&alg->halg);
+}
+
+static inline int crypto_ahash_errstat(struct ahash_alg *alg, int err)
+{
+       if (!IS_ENABLED(CONFIG_CRYPTO_STATS))
+               return err;
+
+       if (err && err != -EINPROGRESS && err != -EBUSY)
+               atomic64_inc(&ahash_get_stat(alg)->err_cnt);
+
+       return err;
+}
+
 /*
  * For an ahash tfm that is using an shash algorithm (instead of an ahash
  * algorithm), this returns the underlying shash tfm.
@@ -328,47 +344,75 @@ static void ahash_restore_req(struct ahash_request *req, int err)
 int crypto_ahash_update(struct ahash_request *req)
 {
        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+       struct ahash_alg *alg;
 
        if (likely(tfm->using_shash))
                return shash_ahash_update(req, ahash_request_ctx(req));
 
-       return crypto_ahash_alg(tfm)->update(req);
+       alg = crypto_ahash_alg(tfm);
+       if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+               atomic64_add(req->nbytes, &ahash_get_stat(alg)->hash_tlen);
+       return crypto_ahash_errstat(alg, alg->update(req));
 }
 EXPORT_SYMBOL_GPL(crypto_ahash_update);
 
 int crypto_ahash_final(struct ahash_request *req)
 {
        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+       struct ahash_alg *alg;
 
        if (likely(tfm->using_shash))
                return crypto_shash_final(ahash_request_ctx(req), req->result);
 
-       return crypto_ahash_alg(tfm)->final(req);
+       alg = crypto_ahash_alg(tfm);
+       if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+               atomic64_inc(&ahash_get_stat(alg)->hash_cnt);
+       return crypto_ahash_errstat(alg, alg->final(req));
 }
 EXPORT_SYMBOL_GPL(crypto_ahash_final);
 
 int crypto_ahash_finup(struct ahash_request *req)
 {
        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+       struct ahash_alg *alg;
 
        if (likely(tfm->using_shash))
                return shash_ahash_finup(req, ahash_request_ctx(req));
 
-       return crypto_ahash_alg(tfm)->finup(req);
+       alg = crypto_ahash_alg(tfm);
+       if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
+               struct crypto_istat_hash *istat = ahash_get_stat(alg);
+
+               atomic64_inc(&istat->hash_cnt);
+               atomic64_add(req->nbytes, &istat->hash_tlen);
+       }
+       return crypto_ahash_errstat(alg, alg->finup(req));
 }
 EXPORT_SYMBOL_GPL(crypto_ahash_finup);
 
 int crypto_ahash_digest(struct ahash_request *req)
 {
        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+       struct ahash_alg *alg;
+       int err;
 
        if (likely(tfm->using_shash))
                return shash_ahash_digest(req, prepare_shash_desc(req, tfm));
 
+       alg = crypto_ahash_alg(tfm);
+       if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
+               struct crypto_istat_hash *istat = ahash_get_stat(alg);
+
+               atomic64_inc(&istat->hash_cnt);
+               atomic64_add(req->nbytes, &istat->hash_tlen);
+       }
+
        if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
-               return -ENOKEY;
+               err = -ENOKEY;
+       else
+               err = alg->digest(req);
 
-       return crypto_ahash_alg(tfm)->digest(req);
+       return crypto_ahash_errstat(alg, err);
 }
 EXPORT_SYMBOL_GPL(crypto_ahash_digest);
 
@@ -527,6 +571,12 @@ static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
                   __crypto_hash_alg_common(alg)->digestsize);
 }
 
+static int __maybe_unused crypto_ahash_report_stat(
+       struct sk_buff *skb, struct crypto_alg *alg)
+{
+       return crypto_hash_report_stat(skb, alg, "ahash");
+}
+
 static const struct crypto_type crypto_ahash_type = {
        .extsize = crypto_ahash_extsize,
        .init_tfm = crypto_ahash_init_tfm,
@@ -536,6 +586,9 @@ static const struct crypto_type crypto_ahash_type = {
 #endif
 #if IS_ENABLED(CONFIG_CRYPTO_USER)
        .report = crypto_ahash_report,
+#endif
+#ifdef CONFIG_CRYPTO_STATS
+       .report_stat = crypto_ahash_report_stat,
 #endif
        .maskclear = ~CRYPTO_ALG_TYPE_MASK,
        .maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
index e0ff5f4dda6d62c40d6478b5c727713d1d8ebc6a..52813f0b19e4e915bf7c53e178e97833caee72f0 100644 (file)
@@ -70,6 +70,30 @@ static void crypto_akcipher_free_instance(struct crypto_instance *inst)
        akcipher->free(akcipher);
 }
 
+static int __maybe_unused crypto_akcipher_report_stat(
+       struct sk_buff *skb, struct crypto_alg *alg)
+{
+       struct akcipher_alg *akcipher = __crypto_akcipher_alg(alg);
+       struct crypto_istat_akcipher *istat;
+       struct crypto_stat_akcipher rakcipher;
+
+       istat = akcipher_get_stat(akcipher);
+
+       memset(&rakcipher, 0, sizeof(rakcipher));
+
+       strscpy(rakcipher.type, "akcipher", sizeof(rakcipher.type));
+       rakcipher.stat_encrypt_cnt = atomic64_read(&istat->encrypt_cnt);
+       rakcipher.stat_encrypt_tlen = atomic64_read(&istat->encrypt_tlen);
+       rakcipher.stat_decrypt_cnt = atomic64_read(&istat->decrypt_cnt);
+       rakcipher.stat_decrypt_tlen = atomic64_read(&istat->decrypt_tlen);
+       rakcipher.stat_sign_cnt = atomic64_read(&istat->sign_cnt);
+       rakcipher.stat_verify_cnt = atomic64_read(&istat->verify_cnt);
+       rakcipher.stat_err_cnt = atomic64_read(&istat->err_cnt);
+
+       return nla_put(skb, CRYPTOCFGA_STAT_AKCIPHER,
+                      sizeof(rakcipher), &rakcipher);
+}
+
 static const struct crypto_type crypto_akcipher_type = {
        .extsize = crypto_alg_extsize,
        .init_tfm = crypto_akcipher_init_tfm,
@@ -79,6 +103,9 @@ static const struct crypto_type crypto_akcipher_type = {
 #endif
 #if IS_ENABLED(CONFIG_CRYPTO_USER)
        .report = crypto_akcipher_report,
+#endif
+#ifdef CONFIG_CRYPTO_STATS
+       .report_stat = crypto_akcipher_report_stat,
 #endif
        .maskclear = ~CRYPTO_ALG_TYPE_MASK,
        .maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
@@ -104,11 +131,15 @@ EXPORT_SYMBOL_GPL(crypto_alloc_akcipher);
 
 static void akcipher_prepare_alg(struct akcipher_alg *alg)
 {
+       struct crypto_istat_akcipher *istat = akcipher_get_stat(alg);
        struct crypto_alg *base = &alg->base;
 
        base->cra_type = &crypto_akcipher_type;
        base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
        base->cra_flags |= CRYPTO_ALG_TYPE_AKCIPHER;
+
+       if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+               memset(istat, 0, sizeof(*istat));
 }
 
 static int akcipher_default_op(struct akcipher_request *req)
index 23ea43810810c339a2b05a9b72fc89a361805a51..19f65516d699c0f6d5bb805cc3f64bcb51adbdcc 100644 (file)
 #include "internal.h"
 
 struct acomp_req;
+struct comp_alg_common;
 struct sk_buff;
 
 int crypto_init_scomp_ops_async(struct crypto_tfm *tfm);
 struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req);
 void crypto_acomp_scomp_free_ctx(struct acomp_req *req);
 
+int crypto_acomp_report_stat(struct sk_buff *skb, struct crypto_alg *alg);
+
+void comp_prepare_alg(struct comp_alg_common *alg);
+
 #endif /* _LOCAL_CRYPTO_COMPRESS_H */
diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c
deleted file mode 100644 (file)
index 6c57183..0000000
+++ /dev/null
@@ -1,522 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Crypto user configuration API.
- *
- * Copyright (C) 2011 secunet Security Networks AG
- * Copyright (C) 2011 Steffen Klassert <steffen.klassert@secunet.com>
- */
-
-#include <linux/module.h>
-#include <linux/crypto.h>
-#include <linux/cryptouser.h>
-#include <linux/sched.h>
-#include <linux/security.h>
-#include <net/netlink.h>
-#include <net/net_namespace.h>
-#include <net/sock.h>
-#include <crypto/internal/skcipher.h>
-#include <crypto/internal/rng.h>
-#include <crypto/akcipher.h>
-#include <crypto/kpp.h>
-
-#include "internal.h"
-
-#define null_terminated(x)     (strnlen(x, sizeof(x)) < sizeof(x))
-
-static DEFINE_MUTEX(crypto_cfg_mutex);
-
-struct crypto_dump_info {
-       struct sk_buff *in_skb;
-       struct sk_buff *out_skb;
-       u32 nlmsg_seq;
-       u16 nlmsg_flags;
-};
-
-static struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact)
-{
-       struct crypto_alg *q, *alg = NULL;
-
-       down_read(&crypto_alg_sem);
-
-       list_for_each_entry(q, &crypto_alg_list, cra_list) {
-               int match = 0;
-
-               if (crypto_is_larval(q))
-                       continue;
-
-               if ((q->cra_flags ^ p->cru_type) & p->cru_mask)
-                       continue;
-
-               if (strlen(p->cru_driver_name))
-                       match = !strcmp(q->cra_driver_name,
-                                       p->cru_driver_name);
-               else if (!exact)
-                       match = !strcmp(q->cra_name, p->cru_name);
-
-               if (!match)
-                       continue;
-
-               if (unlikely(!crypto_mod_get(q)))
-                       continue;
-
-               alg = q;
-               break;
-       }
-
-       up_read(&crypto_alg_sem);
-
-       return alg;
-}
-
-static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg)
-{
-       struct crypto_report_cipher rcipher;
-
-       memset(&rcipher, 0, sizeof(rcipher));
-
-       strscpy(rcipher.type, "cipher", sizeof(rcipher.type));
-
-       rcipher.blocksize = alg->cra_blocksize;
-       rcipher.min_keysize = alg->cra_cipher.cia_min_keysize;
-       rcipher.max_keysize = alg->cra_cipher.cia_max_keysize;
-
-       return nla_put(skb, CRYPTOCFGA_REPORT_CIPHER,
-                      sizeof(rcipher), &rcipher);
-}
-
-static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg)
-{
-       struct crypto_report_comp rcomp;
-
-       memset(&rcomp, 0, sizeof(rcomp));
-
-       strscpy(rcomp.type, "compression", sizeof(rcomp.type));
-
-       return nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS, sizeof(rcomp), &rcomp);
-}
-
-static int crypto_report_one(struct crypto_alg *alg,
-                            struct crypto_user_alg *ualg, struct sk_buff *skb)
-{
-       memset(ualg, 0, sizeof(*ualg));
-
-       strscpy(ualg->cru_name, alg->cra_name, sizeof(ualg->cru_name));
-       strscpy(ualg->cru_driver_name, alg->cra_driver_name,
-               sizeof(ualg->cru_driver_name));
-       strscpy(ualg->cru_module_name, module_name(alg->cra_module),
-               sizeof(ualg->cru_module_name));
-
-       ualg->cru_type = 0;
-       ualg->cru_mask = 0;
-       ualg->cru_flags = alg->cra_flags;
-       ualg->cru_refcnt = refcount_read(&alg->cra_refcnt);
-
-       if (nla_put_u32(skb, CRYPTOCFGA_PRIORITY_VAL, alg->cra_priority))
-               goto nla_put_failure;
-       if (alg->cra_flags & CRYPTO_ALG_LARVAL) {
-               struct crypto_report_larval rl;
-
-               memset(&rl, 0, sizeof(rl));
-               strscpy(rl.type, "larval", sizeof(rl.type));
-               if (nla_put(skb, CRYPTOCFGA_REPORT_LARVAL, sizeof(rl), &rl))
-                       goto nla_put_failure;
-               goto out;
-       }
-
-       if (alg->cra_type && alg->cra_type->report) {
-               if (alg->cra_type->report(skb, alg))
-                       goto nla_put_failure;
-
-               goto out;
-       }
-
-       switch (alg->cra_flags & (CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_LARVAL)) {
-       case CRYPTO_ALG_TYPE_CIPHER:
-               if (crypto_report_cipher(skb, alg))
-                       goto nla_put_failure;
-
-               break;
-       case CRYPTO_ALG_TYPE_COMPRESS:
-               if (crypto_report_comp(skb, alg))
-                       goto nla_put_failure;
-
-               break;
-       }
-
-out:
-       return 0;
-
-nla_put_failure:
-       return -EMSGSIZE;
-}
-
-static int crypto_report_alg(struct crypto_alg *alg,
-                            struct crypto_dump_info *info)
-{
-       struct sk_buff *in_skb = info->in_skb;
-       struct sk_buff *skb = info->out_skb;
-       struct nlmsghdr *nlh;
-       struct crypto_user_alg *ualg;
-       int err = 0;
-
-       nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, info->nlmsg_seq,
-                       CRYPTO_MSG_GETALG, sizeof(*ualg), info->nlmsg_flags);
-       if (!nlh) {
-               err = -EMSGSIZE;
-               goto out;
-       }
-
-       ualg = nlmsg_data(nlh);
-
-       err = crypto_report_one(alg, ualg, skb);
-       if (err) {
-               nlmsg_cancel(skb, nlh);
-               goto out;
-       }
-
-       nlmsg_end(skb, nlh);
-
-out:
-       return err;
-}
-
-static int crypto_report(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
-                        struct nlattr **attrs)
-{
-       struct net *net = sock_net(in_skb->sk);
-       struct crypto_user_alg *p = nlmsg_data(in_nlh);
-       struct crypto_alg *alg;
-       struct sk_buff *skb;
-       struct crypto_dump_info info;
-       int err;
-
-       if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
-               return -EINVAL;
-
-       alg = crypto_alg_match(p, 0);
-       if (!alg)
-               return -ENOENT;
-
-       err = -ENOMEM;
-       skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
-       if (!skb)
-               goto drop_alg;
-
-       info.in_skb = in_skb;
-       info.out_skb = skb;
-       info.nlmsg_seq = in_nlh->nlmsg_seq;
-       info.nlmsg_flags = 0;
-
-       err = crypto_report_alg(alg, &info);
-
-drop_alg:
-       crypto_mod_put(alg);
-
-       if (err) {
-               kfree_skb(skb);
-               return err;
-       }
-
-       return nlmsg_unicast(net->crypto_nlsk, skb, NETLINK_CB(in_skb).portid);
-}
-
-static int crypto_dump_report(struct sk_buff *skb, struct netlink_callback *cb)
-{
-       const size_t start_pos = cb->args[0];
-       size_t pos = 0;
-       struct crypto_dump_info info;
-       struct crypto_alg *alg;
-       int res;
-
-       info.in_skb = cb->skb;
-       info.out_skb = skb;
-       info.nlmsg_seq = cb->nlh->nlmsg_seq;
-       info.nlmsg_flags = NLM_F_MULTI;
-
-       down_read(&crypto_alg_sem);
-       list_for_each_entry(alg, &crypto_alg_list, cra_list) {
-               if (pos >= start_pos) {
-                       res = crypto_report_alg(alg, &info);
-                       if (res == -EMSGSIZE)
-                               break;
-                       if (res)
-                               goto out;
-               }
-               pos++;
-       }
-       cb->args[0] = pos;
-       res = skb->len;
-out:
-       up_read(&crypto_alg_sem);
-       return res;
-}
-
-static int crypto_dump_report_done(struct netlink_callback *cb)
-{
-       return 0;
-}
-
-static int crypto_update_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
-                            struct nlattr **attrs)
-{
-       struct crypto_alg *alg;
-       struct crypto_user_alg *p = nlmsg_data(nlh);
-       struct nlattr *priority = attrs[CRYPTOCFGA_PRIORITY_VAL];
-       LIST_HEAD(list);
-
-       if (!netlink_capable(skb, CAP_NET_ADMIN))
-               return -EPERM;
-
-       if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
-               return -EINVAL;
-
-       if (priority && !strlen(p->cru_driver_name))
-               return -EINVAL;
-
-       alg = crypto_alg_match(p, 1);
-       if (!alg)
-               return -ENOENT;
-
-       down_write(&crypto_alg_sem);
-
-       crypto_remove_spawns(alg, &list, NULL);
-
-       if (priority)
-               alg->cra_priority = nla_get_u32(priority);
-
-       up_write(&crypto_alg_sem);
-
-       crypto_mod_put(alg);
-       crypto_remove_final(&list);
-
-       return 0;
-}
-
-static int crypto_del_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
-                         struct nlattr **attrs)
-{
-       struct crypto_alg *alg;
-       struct crypto_user_alg *p = nlmsg_data(nlh);
-       int err;
-
-       if (!netlink_capable(skb, CAP_NET_ADMIN))
-               return -EPERM;
-
-       if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
-               return -EINVAL;
-
-       alg = crypto_alg_match(p, 1);
-       if (!alg)
-               return -ENOENT;
-
-       /* We can not unregister core algorithms such as aes-generic.
-        * We would loose the reference in the crypto_alg_list to this algorithm
-        * if we try to unregister. Unregistering such an algorithm without
-        * removing the module is not possible, so we restrict to crypto
-        * instances that are build from templates. */
-       err = -EINVAL;
-       if (!(alg->cra_flags & CRYPTO_ALG_INSTANCE))
-               goto drop_alg;
-
-       err = -EBUSY;
-       if (refcount_read(&alg->cra_refcnt) > 2)
-               goto drop_alg;
-
-       crypto_unregister_instance((struct crypto_instance *)alg);
-       err = 0;
-
-drop_alg:
-       crypto_mod_put(alg);
-       return err;
-}
-
-static int crypto_add_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
-                         struct nlattr **attrs)
-{
-       int exact = 0;
-       const char *name;
-       struct crypto_alg *alg;
-       struct crypto_user_alg *p = nlmsg_data(nlh);
-       struct nlattr *priority = attrs[CRYPTOCFGA_PRIORITY_VAL];
-
-       if (!netlink_capable(skb, CAP_NET_ADMIN))
-               return -EPERM;
-
-       if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
-               return -EINVAL;
-
-       if (strlen(p->cru_driver_name))
-               exact = 1;
-
-       if (priority && !exact)
-               return -EINVAL;
-
-       alg = crypto_alg_match(p, exact);
-       if (alg) {
-               crypto_mod_put(alg);
-               return -EEXIST;
-       }
-
-       if (strlen(p->cru_driver_name))
-               name = p->cru_driver_name;
-       else
-               name = p->cru_name;
-
-       alg = crypto_alg_mod_lookup(name, p->cru_type, p->cru_mask);
-       if (IS_ERR(alg))
-               return PTR_ERR(alg);
-
-       down_write(&crypto_alg_sem);
-
-       if (priority)
-               alg->cra_priority = nla_get_u32(priority);
-
-       up_write(&crypto_alg_sem);
-
-       crypto_mod_put(alg);
-
-       return 0;
-}
-
-static int crypto_del_rng(struct sk_buff *skb, struct nlmsghdr *nlh,
-                         struct nlattr **attrs)
-{
-       if (!netlink_capable(skb, CAP_NET_ADMIN))
-               return -EPERM;
-       return crypto_del_default_rng();
-}
-
-static int crypto_reportstat(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
-                            struct nlattr **attrs)
-{
-       /* No longer supported */
-       return -ENOTSUPP;
-}
-
-#define MSGSIZE(type) sizeof(struct type)
-
-static const int crypto_msg_min[CRYPTO_NR_MSGTYPES] = {
-       [CRYPTO_MSG_NEWALG      - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
-       [CRYPTO_MSG_DELALG      - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
-       [CRYPTO_MSG_UPDATEALG   - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
-       [CRYPTO_MSG_GETALG      - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
-       [CRYPTO_MSG_DELRNG      - CRYPTO_MSG_BASE] = 0,
-       [CRYPTO_MSG_GETSTAT     - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
-};
-
-static const struct nla_policy crypto_policy[CRYPTOCFGA_MAX+1] = {
-       [CRYPTOCFGA_PRIORITY_VAL]   = { .type = NLA_U32},
-};
-
-#undef MSGSIZE
-
-static const struct crypto_link {
-       int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **);
-       int (*dump)(struct sk_buff *, struct netlink_callback *);
-       int (*done)(struct netlink_callback *);
-} crypto_dispatch[CRYPTO_NR_MSGTYPES] = {
-       [CRYPTO_MSG_NEWALG      - CRYPTO_MSG_BASE] = { .doit = crypto_add_alg},
-       [CRYPTO_MSG_DELALG      - CRYPTO_MSG_BASE] = { .doit = crypto_del_alg},
-       [CRYPTO_MSG_UPDATEALG   - CRYPTO_MSG_BASE] = { .doit = crypto_update_alg},
-       [CRYPTO_MSG_GETALG      - CRYPTO_MSG_BASE] = { .doit = crypto_report,
-                                                      .dump = crypto_dump_report,
-                                                      .done = crypto_dump_report_done},
-       [CRYPTO_MSG_DELRNG      - CRYPTO_MSG_BASE] = { .doit = crypto_del_rng },
-       [CRYPTO_MSG_GETSTAT     - CRYPTO_MSG_BASE] = { .doit = crypto_reportstat},
-};
-
-static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
-                              struct netlink_ext_ack *extack)
-{
-       struct net *net = sock_net(skb->sk);
-       struct nlattr *attrs[CRYPTOCFGA_MAX+1];
-       const struct crypto_link *link;
-       int type, err;
-
-       type = nlh->nlmsg_type;
-       if (type > CRYPTO_MSG_MAX)
-               return -EINVAL;
-
-       type -= CRYPTO_MSG_BASE;
-       link = &crypto_dispatch[type];
-
-       if ((type == (CRYPTO_MSG_GETALG - CRYPTO_MSG_BASE) &&
-           (nlh->nlmsg_flags & NLM_F_DUMP))) {
-               struct crypto_alg *alg;
-               unsigned long dump_alloc = 0;
-
-               if (link->dump == NULL)
-                       return -EINVAL;
-
-               down_read(&crypto_alg_sem);
-               list_for_each_entry(alg, &crypto_alg_list, cra_list)
-                       dump_alloc += CRYPTO_REPORT_MAXSIZE;
-               up_read(&crypto_alg_sem);
-
-               {
-                       struct netlink_dump_control c = {
-                               .dump = link->dump,
-                               .done = link->done,
-                               .min_dump_alloc = min(dump_alloc, 65535UL),
-                       };
-                       err = netlink_dump_start(net->crypto_nlsk, skb, nlh, &c);
-               }
-
-               return err;
-       }
-
-       err = nlmsg_parse_deprecated(nlh, crypto_msg_min[type], attrs,
-                                    CRYPTOCFGA_MAX, crypto_policy, extack);
-       if (err < 0)
-               return err;
-
-       if (link->doit == NULL)
-               return -EINVAL;
-
-       return link->doit(skb, nlh, attrs);
-}
-
-static void crypto_netlink_rcv(struct sk_buff *skb)
-{
-       mutex_lock(&crypto_cfg_mutex);
-       netlink_rcv_skb(skb, &crypto_user_rcv_msg);
-       mutex_unlock(&crypto_cfg_mutex);
-}
-
-static int __net_init crypto_netlink_init(struct net *net)
-{
-       struct netlink_kernel_cfg cfg = {
-               .input  = crypto_netlink_rcv,
-       };
-
-       net->crypto_nlsk = netlink_kernel_create(net, NETLINK_CRYPTO, &cfg);
-       return net->crypto_nlsk == NULL ? -ENOMEM : 0;
-}
-
-static void __net_exit crypto_netlink_exit(struct net *net)
-{
-       netlink_kernel_release(net->crypto_nlsk);
-       net->crypto_nlsk = NULL;
-}
-
-static struct pernet_operations crypto_netlink_net_ops = {
-       .init = crypto_netlink_init,
-       .exit = crypto_netlink_exit,
-};
-
-static int __init crypto_user_init(void)
-{
-       return register_pernet_subsys(&crypto_netlink_net_ops);
-}
-
-static void __exit crypto_user_exit(void)
-{
-       unregister_pernet_subsys(&crypto_netlink_net_ops);
-}
-
-module_init(crypto_user_init);
-module_exit(crypto_user_exit);
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
-MODULE_DESCRIPTION("Crypto userspace configuration API");
-MODULE_ALIAS("net-pf-16-proto-21");
diff --git a/crypto/crypto_user_base.c b/crypto/crypto_user_base.c
new file mode 100644 (file)
index 0000000..3fa20f1
--- /dev/null
@@ -0,0 +1,516 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Crypto user configuration API.
+ *
+ * Copyright (C) 2011 secunet Security Networks AG
+ * Copyright (C) 2011 Steffen Klassert <steffen.klassert@secunet.com>
+ */
+
+#include <linux/module.h>
+#include <linux/crypto.h>
+#include <linux/cryptouser.h>
+#include <linux/sched.h>
+#include <linux/security.h>
+#include <net/netlink.h>
+#include <net/net_namespace.h>
+#include <net/sock.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/internal/rng.h>
+#include <crypto/akcipher.h>
+#include <crypto/kpp.h>
+#include <crypto/internal/cryptouser.h>
+
+#include "internal.h"
+
+#define null_terminated(x)     (strnlen(x, sizeof(x)) < sizeof(x))
+
+static DEFINE_MUTEX(crypto_cfg_mutex);
+
+struct crypto_dump_info {
+       struct sk_buff *in_skb;
+       struct sk_buff *out_skb;
+       u32 nlmsg_seq;
+       u16 nlmsg_flags;
+};
+
+struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact)
+{
+       struct crypto_alg *q, *alg = NULL;
+
+       down_read(&crypto_alg_sem);
+
+       list_for_each_entry(q, &crypto_alg_list, cra_list) {
+               int match = 0;
+
+               if (crypto_is_larval(q))
+                       continue;
+
+               if ((q->cra_flags ^ p->cru_type) & p->cru_mask)
+                       continue;
+
+               if (strlen(p->cru_driver_name))
+                       match = !strcmp(q->cra_driver_name,
+                                       p->cru_driver_name);
+               else if (!exact)
+                       match = !strcmp(q->cra_name, p->cru_name);
+
+               if (!match)
+                       continue;
+
+               if (unlikely(!crypto_mod_get(q)))
+                       continue;
+
+               alg = q;
+               break;
+       }
+
+       up_read(&crypto_alg_sem);
+
+       return alg;
+}
+
+static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg)
+{
+       struct crypto_report_cipher rcipher;
+
+       memset(&rcipher, 0, sizeof(rcipher));
+
+       strscpy(rcipher.type, "cipher", sizeof(rcipher.type));
+
+       rcipher.blocksize = alg->cra_blocksize;
+       rcipher.min_keysize = alg->cra_cipher.cia_min_keysize;
+       rcipher.max_keysize = alg->cra_cipher.cia_max_keysize;
+
+       return nla_put(skb, CRYPTOCFGA_REPORT_CIPHER,
+                      sizeof(rcipher), &rcipher);
+}
+
+static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg)
+{
+       struct crypto_report_comp rcomp;
+
+       memset(&rcomp, 0, sizeof(rcomp));
+
+       strscpy(rcomp.type, "compression", sizeof(rcomp.type));
+
+       return nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS, sizeof(rcomp), &rcomp);
+}
+
+static int crypto_report_one(struct crypto_alg *alg,
+                            struct crypto_user_alg *ualg, struct sk_buff *skb)
+{
+       memset(ualg, 0, sizeof(*ualg));
+
+       strscpy(ualg->cru_name, alg->cra_name, sizeof(ualg->cru_name));
+       strscpy(ualg->cru_driver_name, alg->cra_driver_name,
+               sizeof(ualg->cru_driver_name));
+       strscpy(ualg->cru_module_name, module_name(alg->cra_module),
+               sizeof(ualg->cru_module_name));
+
+       ualg->cru_type = 0;
+       ualg->cru_mask = 0;
+       ualg->cru_flags = alg->cra_flags;
+       ualg->cru_refcnt = refcount_read(&alg->cra_refcnt);
+
+       if (nla_put_u32(skb, CRYPTOCFGA_PRIORITY_VAL, alg->cra_priority))
+               goto nla_put_failure;
+       if (alg->cra_flags & CRYPTO_ALG_LARVAL) {
+               struct crypto_report_larval rl;
+
+               memset(&rl, 0, sizeof(rl));
+               strscpy(rl.type, "larval", sizeof(rl.type));
+               if (nla_put(skb, CRYPTOCFGA_REPORT_LARVAL, sizeof(rl), &rl))
+                       goto nla_put_failure;
+               goto out;
+       }
+
+       if (alg->cra_type && alg->cra_type->report) {
+               if (alg->cra_type->report(skb, alg))
+                       goto nla_put_failure;
+
+               goto out;
+       }
+
+       switch (alg->cra_flags & (CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_LARVAL)) {
+       case CRYPTO_ALG_TYPE_CIPHER:
+               if (crypto_report_cipher(skb, alg))
+                       goto nla_put_failure;
+
+               break;
+       case CRYPTO_ALG_TYPE_COMPRESS:
+               if (crypto_report_comp(skb, alg))
+                       goto nla_put_failure;
+
+               break;
+       }
+
+out:
+       return 0;
+
+nla_put_failure:
+       return -EMSGSIZE;
+}
+
+static int crypto_report_alg(struct crypto_alg *alg,
+                            struct crypto_dump_info *info)
+{
+       struct sk_buff *in_skb = info->in_skb;
+       struct sk_buff *skb = info->out_skb;
+       struct nlmsghdr *nlh;
+       struct crypto_user_alg *ualg;
+       int err = 0;
+
+       nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, info->nlmsg_seq,
+                       CRYPTO_MSG_GETALG, sizeof(*ualg), info->nlmsg_flags);
+       if (!nlh) {
+               err = -EMSGSIZE;
+               goto out;
+       }
+
+       ualg = nlmsg_data(nlh);
+
+       err = crypto_report_one(alg, ualg, skb);
+       if (err) {
+               nlmsg_cancel(skb, nlh);
+               goto out;
+       }
+
+       nlmsg_end(skb, nlh);
+
+out:
+       return err;
+}
+
+static int crypto_report(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
+                        struct nlattr **attrs)
+{
+       struct net *net = sock_net(in_skb->sk);
+       struct crypto_user_alg *p = nlmsg_data(in_nlh);
+       struct crypto_alg *alg;
+       struct sk_buff *skb;
+       struct crypto_dump_info info;
+       int err;
+
+       if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
+               return -EINVAL;
+
+       alg = crypto_alg_match(p, 0);
+       if (!alg)
+               return -ENOENT;
+
+       err = -ENOMEM;
+       skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+       if (!skb)
+               goto drop_alg;
+
+       info.in_skb = in_skb;
+       info.out_skb = skb;
+       info.nlmsg_seq = in_nlh->nlmsg_seq;
+       info.nlmsg_flags = 0;
+
+       err = crypto_report_alg(alg, &info);
+
+drop_alg:
+       crypto_mod_put(alg);
+
+       if (err) {
+               kfree_skb(skb);
+               return err;
+       }
+
+       return nlmsg_unicast(net->crypto_nlsk, skb, NETLINK_CB(in_skb).portid);
+}
+
+static int crypto_dump_report(struct sk_buff *skb, struct netlink_callback *cb)
+{
+       const size_t start_pos = cb->args[0];
+       size_t pos = 0;
+       struct crypto_dump_info info;
+       struct crypto_alg *alg;
+       int res;
+
+       info.in_skb = cb->skb;
+       info.out_skb = skb;
+       info.nlmsg_seq = cb->nlh->nlmsg_seq;
+       info.nlmsg_flags = NLM_F_MULTI;
+
+       down_read(&crypto_alg_sem);
+       list_for_each_entry(alg, &crypto_alg_list, cra_list) {
+               if (pos >= start_pos) {
+                       res = crypto_report_alg(alg, &info);
+                       if (res == -EMSGSIZE)
+                               break;
+                       if (res)
+                               goto out;
+               }
+               pos++;
+       }
+       cb->args[0] = pos;
+       res = skb->len;
+out:
+       up_read(&crypto_alg_sem);
+       return res;
+}
+
+static int crypto_dump_report_done(struct netlink_callback *cb)
+{
+       return 0;
+}
+
+static int crypto_update_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
+                            struct nlattr **attrs)
+{
+       struct crypto_alg *alg;
+       struct crypto_user_alg *p = nlmsg_data(nlh);
+       struct nlattr *priority = attrs[CRYPTOCFGA_PRIORITY_VAL];
+       LIST_HEAD(list);
+
+       if (!netlink_capable(skb, CAP_NET_ADMIN))
+               return -EPERM;
+
+       if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
+               return -EINVAL;
+
+       if (priority && !strlen(p->cru_driver_name))
+               return -EINVAL;
+
+       alg = crypto_alg_match(p, 1);
+       if (!alg)
+               return -ENOENT;
+
+       down_write(&crypto_alg_sem);
+
+       crypto_remove_spawns(alg, &list, NULL);
+
+       if (priority)
+               alg->cra_priority = nla_get_u32(priority);
+
+       up_write(&crypto_alg_sem);
+
+       crypto_mod_put(alg);
+       crypto_remove_final(&list);
+
+       return 0;
+}
+
+static int crypto_del_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
+                         struct nlattr **attrs)
+{
+       struct crypto_alg *alg;
+       struct crypto_user_alg *p = nlmsg_data(nlh);
+       int err;
+
+       if (!netlink_capable(skb, CAP_NET_ADMIN))
+               return -EPERM;
+
+       if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
+               return -EINVAL;
+
+       alg = crypto_alg_match(p, 1);
+       if (!alg)
+               return -ENOENT;
+
+       /* We can not unregister core algorithms such as aes-generic.
+        * We would loose the reference in the crypto_alg_list to this algorithm
+        * if we try to unregister. Unregistering such an algorithm without
+        * removing the module is not possible, so we restrict to crypto
+        * instances that are build from templates. */
+       err = -EINVAL;
+       if (!(alg->cra_flags & CRYPTO_ALG_INSTANCE))
+               goto drop_alg;
+
+       err = -EBUSY;
+       if (refcount_read(&alg->cra_refcnt) > 2)
+               goto drop_alg;
+
+       crypto_unregister_instance((struct crypto_instance *)alg);
+       err = 0;
+
+drop_alg:
+       crypto_mod_put(alg);
+       return err;
+}
+
+static int crypto_add_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
+                         struct nlattr **attrs)
+{
+       int exact = 0;
+       const char *name;
+       struct crypto_alg *alg;
+       struct crypto_user_alg *p = nlmsg_data(nlh);
+       struct nlattr *priority = attrs[CRYPTOCFGA_PRIORITY_VAL];
+
+       if (!netlink_capable(skb, CAP_NET_ADMIN))
+               return -EPERM;
+
+       if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
+               return -EINVAL;
+
+       if (strlen(p->cru_driver_name))
+               exact = 1;
+
+       if (priority && !exact)
+               return -EINVAL;
+
+       alg = crypto_alg_match(p, exact);
+       if (alg) {
+               crypto_mod_put(alg);
+               return -EEXIST;
+       }
+
+       if (strlen(p->cru_driver_name))
+               name = p->cru_driver_name;
+       else
+               name = p->cru_name;
+
+       alg = crypto_alg_mod_lookup(name, p->cru_type, p->cru_mask);
+       if (IS_ERR(alg))
+               return PTR_ERR(alg);
+
+       down_write(&crypto_alg_sem);
+
+       if (priority)
+               alg->cra_priority = nla_get_u32(priority);
+
+       up_write(&crypto_alg_sem);
+
+       crypto_mod_put(alg);
+
+       return 0;
+}
+
+static int crypto_del_rng(struct sk_buff *skb, struct nlmsghdr *nlh,
+                         struct nlattr **attrs)
+{
+       if (!netlink_capable(skb, CAP_NET_ADMIN))
+               return -EPERM;
+       return crypto_del_default_rng();
+}
+
+#define MSGSIZE(type) sizeof(struct type)
+
+static const int crypto_msg_min[CRYPTO_NR_MSGTYPES] = {
+       [CRYPTO_MSG_NEWALG      - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
+       [CRYPTO_MSG_DELALG      - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
+       [CRYPTO_MSG_UPDATEALG   - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
+       [CRYPTO_MSG_GETALG      - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
+       [CRYPTO_MSG_DELRNG      - CRYPTO_MSG_BASE] = 0,
+       [CRYPTO_MSG_GETSTAT     - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
+};
+
+static const struct nla_policy crypto_policy[CRYPTOCFGA_MAX+1] = {
+       [CRYPTOCFGA_PRIORITY_VAL]   = { .type = NLA_U32},
+};
+
+#undef MSGSIZE
+
+static const struct crypto_link {
+       int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **);
+       int (*dump)(struct sk_buff *, struct netlink_callback *);
+       int (*done)(struct netlink_callback *);
+} crypto_dispatch[CRYPTO_NR_MSGTYPES] = {
+       [CRYPTO_MSG_NEWALG      - CRYPTO_MSG_BASE] = { .doit = crypto_add_alg},
+       [CRYPTO_MSG_DELALG      - CRYPTO_MSG_BASE] = { .doit = crypto_del_alg},
+       [CRYPTO_MSG_UPDATEALG   - CRYPTO_MSG_BASE] = { .doit = crypto_update_alg},
+       [CRYPTO_MSG_GETALG      - CRYPTO_MSG_BASE] = { .doit = crypto_report,
+                                                      .dump = crypto_dump_report,
+                                                      .done = crypto_dump_report_done},
+       [CRYPTO_MSG_DELRNG      - CRYPTO_MSG_BASE] = { .doit = crypto_del_rng },
+       [CRYPTO_MSG_GETSTAT     - CRYPTO_MSG_BASE] = { .doit = crypto_reportstat},
+};
+
+static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
+                              struct netlink_ext_ack *extack)
+{
+       struct net *net = sock_net(skb->sk);
+       struct nlattr *attrs[CRYPTOCFGA_MAX+1];
+       const struct crypto_link *link;
+       int type, err;
+
+       type = nlh->nlmsg_type;
+       if (type > CRYPTO_MSG_MAX)
+               return -EINVAL;
+
+       type -= CRYPTO_MSG_BASE;
+       link = &crypto_dispatch[type];
+
+       if ((type == (CRYPTO_MSG_GETALG - CRYPTO_MSG_BASE) &&
+           (nlh->nlmsg_flags & NLM_F_DUMP))) {
+               struct crypto_alg *alg;
+               unsigned long dump_alloc = 0;
+
+               if (link->dump == NULL)
+                       return -EINVAL;
+
+               down_read(&crypto_alg_sem);
+               list_for_each_entry(alg, &crypto_alg_list, cra_list)
+                       dump_alloc += CRYPTO_REPORT_MAXSIZE;
+               up_read(&crypto_alg_sem);
+
+               {
+                       struct netlink_dump_control c = {
+                               .dump = link->dump,
+                               .done = link->done,
+                               .min_dump_alloc = min(dump_alloc, 65535UL),
+                       };
+                       err = netlink_dump_start(net->crypto_nlsk, skb, nlh, &c);
+               }
+
+               return err;
+       }
+
+       err = nlmsg_parse_deprecated(nlh, crypto_msg_min[type], attrs,
+                                    CRYPTOCFGA_MAX, crypto_policy, extack);
+       if (err < 0)
+               return err;
+
+       if (link->doit == NULL)
+               return -EINVAL;
+
+       return link->doit(skb, nlh, attrs);
+}
+
+static void crypto_netlink_rcv(struct sk_buff *skb)
+{
+       mutex_lock(&crypto_cfg_mutex);
+       netlink_rcv_skb(skb, &crypto_user_rcv_msg);
+       mutex_unlock(&crypto_cfg_mutex);
+}
+
+static int __net_init crypto_netlink_init(struct net *net)
+{
+       struct netlink_kernel_cfg cfg = {
+               .input  = crypto_netlink_rcv,
+       };
+
+       net->crypto_nlsk = netlink_kernel_create(net, NETLINK_CRYPTO, &cfg);
+       return net->crypto_nlsk == NULL ? -ENOMEM : 0;
+}
+
+static void __net_exit crypto_netlink_exit(struct net *net)
+{
+       netlink_kernel_release(net->crypto_nlsk);
+       net->crypto_nlsk = NULL;
+}
+
+static struct pernet_operations crypto_netlink_net_ops = {
+       .init = crypto_netlink_init,
+       .exit = crypto_netlink_exit,
+};
+
+static int __init crypto_user_init(void)
+{
+       return register_pernet_subsys(&crypto_netlink_net_ops);
+}
+
+static void __exit crypto_user_exit(void)
+{
+       unregister_pernet_subsys(&crypto_netlink_net_ops);
+}
+
+module_init(crypto_user_init);
+module_exit(crypto_user_exit);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
+MODULE_DESCRIPTION("Crypto userspace configuration API");
+MODULE_ALIAS("net-pf-16-proto-21");
diff --git a/crypto/crypto_user_stat.c b/crypto/crypto_user_stat.c
new file mode 100644 (file)
index 0000000..d4f3d39
--- /dev/null
@@ -0,0 +1,176 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Crypto user configuration API.
+ *
+ * Copyright (C) 2017-2018 Corentin Labbe <clabbe@baylibre.com>
+ *
+ */
+
+#include <crypto/algapi.h>
+#include <crypto/internal/cryptouser.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <net/netlink.h>
+#include <net/sock.h>
+
+#define null_terminated(x)     (strnlen(x, sizeof(x)) < sizeof(x))
+
+struct crypto_dump_info {
+       struct sk_buff *in_skb;
+       struct sk_buff *out_skb;
+       u32 nlmsg_seq;
+       u16 nlmsg_flags;
+};
+
+static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg)
+{
+       struct crypto_stat_cipher rcipher;
+
+       memset(&rcipher, 0, sizeof(rcipher));
+
+       strscpy(rcipher.type, "cipher", sizeof(rcipher.type));
+
+       return nla_put(skb, CRYPTOCFGA_STAT_CIPHER, sizeof(rcipher), &rcipher);
+}
+
+static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg)
+{
+       struct crypto_stat_compress rcomp;
+
+       memset(&rcomp, 0, sizeof(rcomp));
+
+       strscpy(rcomp.type, "compression", sizeof(rcomp.type));
+
+       return nla_put(skb, CRYPTOCFGA_STAT_COMPRESS, sizeof(rcomp), &rcomp);
+}
+
+static int crypto_reportstat_one(struct crypto_alg *alg,
+                                struct crypto_user_alg *ualg,
+                                struct sk_buff *skb)
+{
+       memset(ualg, 0, sizeof(*ualg));
+
+       strscpy(ualg->cru_name, alg->cra_name, sizeof(ualg->cru_name));
+       strscpy(ualg->cru_driver_name, alg->cra_driver_name,
+               sizeof(ualg->cru_driver_name));
+       strscpy(ualg->cru_module_name, module_name(alg->cra_module),
+               sizeof(ualg->cru_module_name));
+
+       ualg->cru_type = 0;
+       ualg->cru_mask = 0;
+       ualg->cru_flags = alg->cra_flags;
+       ualg->cru_refcnt = refcount_read(&alg->cra_refcnt);
+
+       if (nla_put_u32(skb, CRYPTOCFGA_PRIORITY_VAL, alg->cra_priority))
+               goto nla_put_failure;
+       if (alg->cra_flags & CRYPTO_ALG_LARVAL) {
+               struct crypto_stat_larval rl;
+
+               memset(&rl, 0, sizeof(rl));
+               strscpy(rl.type, "larval", sizeof(rl.type));
+               if (nla_put(skb, CRYPTOCFGA_STAT_LARVAL, sizeof(rl), &rl))
+                       goto nla_put_failure;
+               goto out;
+       }
+
+       if (alg->cra_type && alg->cra_type->report_stat) {
+               if (alg->cra_type->report_stat(skb, alg))
+                       goto nla_put_failure;
+               goto out;
+       }
+
+       switch (alg->cra_flags & (CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_LARVAL)) {
+       case CRYPTO_ALG_TYPE_CIPHER:
+               if (crypto_report_cipher(skb, alg))
+                       goto nla_put_failure;
+               break;
+       case CRYPTO_ALG_TYPE_COMPRESS:
+               if (crypto_report_comp(skb, alg))
+                       goto nla_put_failure;
+               break;
+       default:
+               pr_err("ERROR: Unhandled alg %d in %s\n",
+                      alg->cra_flags & (CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_LARVAL),
+                      __func__);
+       }
+
+out:
+       return 0;
+
+nla_put_failure:
+       return -EMSGSIZE;
+}
+
+static int crypto_reportstat_alg(struct crypto_alg *alg,
+                                struct crypto_dump_info *info)
+{
+       struct sk_buff *in_skb = info->in_skb;
+       struct sk_buff *skb = info->out_skb;
+       struct nlmsghdr *nlh;
+       struct crypto_user_alg *ualg;
+       int err = 0;
+
+       nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, info->nlmsg_seq,
+                       CRYPTO_MSG_GETSTAT, sizeof(*ualg), info->nlmsg_flags);
+       if (!nlh) {
+               err = -EMSGSIZE;
+               goto out;
+       }
+
+       ualg = nlmsg_data(nlh);
+
+       err = crypto_reportstat_one(alg, ualg, skb);
+       if (err) {
+               nlmsg_cancel(skb, nlh);
+               goto out;
+       }
+
+       nlmsg_end(skb, nlh);
+
+out:
+       return err;
+}
+
+int crypto_reportstat(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
+                     struct nlattr **attrs)
+{
+       struct net *net = sock_net(in_skb->sk);
+       struct crypto_user_alg *p = nlmsg_data(in_nlh);
+       struct crypto_alg *alg;
+       struct sk_buff *skb;
+       struct crypto_dump_info info;
+       int err;
+
+       if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
+               return -EINVAL;
+
+       alg = crypto_alg_match(p, 0);
+       if (!alg)
+               return -ENOENT;
+
+       err = -ENOMEM;
+       skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
+       if (!skb)
+               goto drop_alg;
+
+       info.in_skb = in_skb;
+       info.out_skb = skb;
+       info.nlmsg_seq = in_nlh->nlmsg_seq;
+       info.nlmsg_flags = 0;
+
+       err = crypto_reportstat_alg(alg, &info);
+
+drop_alg:
+       crypto_mod_put(alg);
+
+       if (err) {
+               kfree_skb(skb);
+               return err;
+       }
+
+       return nlmsg_unicast(net->crypto_nlsk, skb, NETLINK_CB(in_skb).portid);
+}
+
+MODULE_LICENSE("GPL");
index cf9aee07f77d4985929df7189a1dff2b9eb73fc3..93f6ba0df263e51e8b4c35927c2873cdb565c138 100644 (file)
@@ -8,9 +8,39 @@
 #define _LOCAL_CRYPTO_HASH_H
 
 #include <crypto/internal/hash.h>
+#include <linux/cryptouser.h>
 
 #include "internal.h"
 
+static inline struct crypto_istat_hash *hash_get_stat(
+       struct hash_alg_common *alg)
+{
+#ifdef CONFIG_CRYPTO_STATS
+       return &alg->stat;
+#else
+       return NULL;
+#endif
+}
+
+static inline int crypto_hash_report_stat(struct sk_buff *skb,
+                                         struct crypto_alg *alg,
+                                         const char *type)
+{
+       struct hash_alg_common *halg = __crypto_hash_alg_common(alg);
+       struct crypto_istat_hash *istat = hash_get_stat(halg);
+       struct crypto_stat_hash rhash;
+
+       memset(&rhash, 0, sizeof(rhash));
+
+       strscpy(rhash.type, type, sizeof(rhash.type));
+
+       rhash.stat_hash_cnt = atomic64_read(&istat->hash_cnt);
+       rhash.stat_hash_tlen = atomic64_read(&istat->hash_tlen);
+       rhash.stat_err_cnt = atomic64_read(&istat->err_cnt);
+
+       return nla_put(skb, CRYPTOCFGA_STAT_HASH, sizeof(rhash), &rhash);
+}
+
 extern const struct crypto_type crypto_shash_type;
 
 int hash_prepare_alg(struct hash_alg_common *alg);
index ecc63a1a948dfebb1ba751aa129b47012e7e01c9..33d44e59387ff3370d4dd2a7f5ae036163aea39f 100644 (file)
@@ -66,6 +66,29 @@ static void crypto_kpp_free_instance(struct crypto_instance *inst)
        kpp->free(kpp);
 }
 
+static int __maybe_unused crypto_kpp_report_stat(
+       struct sk_buff *skb, struct crypto_alg *alg)
+{
+       struct kpp_alg *kpp = __crypto_kpp_alg(alg);
+       struct crypto_istat_kpp *istat;
+       struct crypto_stat_kpp rkpp;
+
+       istat = kpp_get_stat(kpp);
+
+       memset(&rkpp, 0, sizeof(rkpp));
+
+       strscpy(rkpp.type, "kpp", sizeof(rkpp.type));
+
+       rkpp.stat_setsecret_cnt = atomic64_read(&istat->setsecret_cnt);
+       rkpp.stat_generate_public_key_cnt =
+               atomic64_read(&istat->generate_public_key_cnt);
+       rkpp.stat_compute_shared_secret_cnt =
+               atomic64_read(&istat->compute_shared_secret_cnt);
+       rkpp.stat_err_cnt = atomic64_read(&istat->err_cnt);
+
+       return nla_put(skb, CRYPTOCFGA_STAT_KPP, sizeof(rkpp), &rkpp);
+}
+
 static const struct crypto_type crypto_kpp_type = {
        .extsize = crypto_alg_extsize,
        .init_tfm = crypto_kpp_init_tfm,
@@ -75,6 +98,9 @@ static const struct crypto_type crypto_kpp_type = {
 #endif
 #if IS_ENABLED(CONFIG_CRYPTO_USER)
        .report = crypto_kpp_report,
+#endif
+#ifdef CONFIG_CRYPTO_STATS
+       .report_stat = crypto_kpp_report_stat,
 #endif
        .maskclear = ~CRYPTO_ALG_TYPE_MASK,
        .maskset = CRYPTO_ALG_TYPE_MASK,
@@ -105,11 +131,15 @@ EXPORT_SYMBOL_GPL(crypto_has_kpp);
 
 static void kpp_prepare_alg(struct kpp_alg *alg)
 {
+       struct crypto_istat_kpp *istat = kpp_get_stat(alg);
        struct crypto_alg *base = &alg->base;
 
        base->cra_type = &crypto_kpp_type;
        base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
        base->cra_flags |= CRYPTO_ALG_TYPE_KPP;
+
+       if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+               memset(istat, 0, sizeof(*istat));
 }
 
 int crypto_register_kpp(struct kpp_alg *alg)
index 0a800292ca4e31b02ebeaff2984f452f9584c64a..0b6dd8aa21f2edace686fb5531705698e7acc18d 100644 (file)
@@ -29,6 +29,25 @@ static inline struct lskcipher_alg *__crypto_lskcipher_alg(
        return container_of(alg, struct lskcipher_alg, co.base);
 }
 
+static inline struct crypto_istat_cipher *lskcipher_get_stat(
+       struct lskcipher_alg *alg)
+{
+       return skcipher_get_stat_common(&alg->co);
+}
+
+static inline int crypto_lskcipher_errstat(struct lskcipher_alg *alg, int err)
+{
+       struct crypto_istat_cipher *istat = lskcipher_get_stat(alg);
+
+       if (!IS_ENABLED(CONFIG_CRYPTO_STATS))
+               return err;
+
+       if (err)
+               atomic64_inc(&istat->err_cnt);
+
+       return err;
+}
+
 static int lskcipher_setkey_unaligned(struct crypto_lskcipher *tfm,
                                      const u8 *key, unsigned int keylen)
 {
@@ -128,13 +147,20 @@ static int crypto_lskcipher_crypt(struct crypto_lskcipher *tfm, const u8 *src,
                                               u32 flags))
 {
        unsigned long alignmask = crypto_lskcipher_alignmask(tfm);
+       struct lskcipher_alg *alg = crypto_lskcipher_alg(tfm);
+       int ret;
 
        if (((unsigned long)src | (unsigned long)dst | (unsigned long)iv) &
-           alignmask)
-               return crypto_lskcipher_crypt_unaligned(tfm, src, dst, len, iv,
-                                                       crypt);
+           alignmask) {
+               ret = crypto_lskcipher_crypt_unaligned(tfm, src, dst, len, iv,
+                                                      crypt);
+               goto out;
+       }
 
-       return crypt(tfm, src, dst, len, iv, CRYPTO_LSKCIPHER_FLAG_FINAL);
+       ret = crypt(tfm, src, dst, len, iv, CRYPTO_LSKCIPHER_FLAG_FINAL);
+
+out:
+       return crypto_lskcipher_errstat(alg, ret);
 }
 
 int crypto_lskcipher_encrypt(struct crypto_lskcipher *tfm, const u8 *src,
@@ -142,6 +168,13 @@ int crypto_lskcipher_encrypt(struct crypto_lskcipher *tfm, const u8 *src,
 {
        struct lskcipher_alg *alg = crypto_lskcipher_alg(tfm);
 
+       if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
+               struct crypto_istat_cipher *istat = lskcipher_get_stat(alg);
+
+               atomic64_inc(&istat->encrypt_cnt);
+               atomic64_add(len, &istat->encrypt_tlen);
+       }
+
        return crypto_lskcipher_crypt(tfm, src, dst, len, iv, alg->encrypt);
 }
 EXPORT_SYMBOL_GPL(crypto_lskcipher_encrypt);
@@ -151,6 +184,13 @@ int crypto_lskcipher_decrypt(struct crypto_lskcipher *tfm, const u8 *src,
 {
        struct lskcipher_alg *alg = crypto_lskcipher_alg(tfm);
 
+       if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
+               struct crypto_istat_cipher *istat = lskcipher_get_stat(alg);
+
+               atomic64_inc(&istat->decrypt_cnt);
+               atomic64_add(len, &istat->decrypt_tlen);
+       }
+
        return crypto_lskcipher_crypt(tfm, src, dst, len, iv, alg->decrypt);
 }
 EXPORT_SYMBOL_GPL(crypto_lskcipher_decrypt);
@@ -282,6 +322,28 @@ static int __maybe_unused crypto_lskcipher_report(
                       sizeof(rblkcipher), &rblkcipher);
 }
 
+static int __maybe_unused crypto_lskcipher_report_stat(
+       struct sk_buff *skb, struct crypto_alg *alg)
+{
+       struct lskcipher_alg *skcipher = __crypto_lskcipher_alg(alg);
+       struct crypto_istat_cipher *istat;
+       struct crypto_stat_cipher rcipher;
+
+       istat = lskcipher_get_stat(skcipher);
+
+       memset(&rcipher, 0, sizeof(rcipher));
+
+       strscpy(rcipher.type, "cipher", sizeof(rcipher.type));
+
+       rcipher.stat_encrypt_cnt = atomic64_read(&istat->encrypt_cnt);
+       rcipher.stat_encrypt_tlen = atomic64_read(&istat->encrypt_tlen);
+       rcipher.stat_decrypt_cnt =  atomic64_read(&istat->decrypt_cnt);
+       rcipher.stat_decrypt_tlen = atomic64_read(&istat->decrypt_tlen);
+       rcipher.stat_err_cnt =  atomic64_read(&istat->err_cnt);
+
+       return nla_put(skb, CRYPTOCFGA_STAT_CIPHER, sizeof(rcipher), &rcipher);
+}
+
 static const struct crypto_type crypto_lskcipher_type = {
        .extsize = crypto_alg_extsize,
        .init_tfm = crypto_lskcipher_init_tfm,
@@ -291,6 +353,9 @@ static const struct crypto_type crypto_lskcipher_type = {
 #endif
 #if IS_ENABLED(CONFIG_CRYPTO_USER)
        .report = crypto_lskcipher_report,
+#endif
+#ifdef CONFIG_CRYPTO_STATS
+       .report_stat = crypto_lskcipher_report_stat,
 #endif
        .maskclear = ~CRYPTO_ALG_TYPE_MASK,
        .maskset = CRYPTO_ALG_TYPE_MASK,
index 9d8804e464226d1c7ec4e1d694bfef222ca9dde1..279dffdebf59815b5eccb59d69118c60a3be7205 100644 (file)
@@ -30,24 +30,30 @@ static int crypto_default_rng_refcnt;
 
 int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen)
 {
+       struct rng_alg *alg = crypto_rng_alg(tfm);
        u8 *buf = NULL;
        int err;
 
+       if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+               atomic64_inc(&rng_get_stat(alg)->seed_cnt);
+
        if (!seed && slen) {
                buf = kmalloc(slen, GFP_KERNEL);
+               err = -ENOMEM;
                if (!buf)
-                       return -ENOMEM;
+                       goto out;
 
                err = get_random_bytes_wait(buf, slen);
                if (err)
-                       goto out;
+                       goto free_buf;
                seed = buf;
        }
 
-       err = crypto_rng_alg(tfm)->seed(tfm, seed, slen);
-out:
+       err = alg->seed(tfm, seed, slen);
+free_buf:
        kfree_sensitive(buf);
-       return err;
+out:
+       return crypto_rng_errstat(alg, err);
 }
 EXPORT_SYMBOL_GPL(crypto_rng_reset);
 
@@ -85,6 +91,27 @@ static void crypto_rng_show(struct seq_file *m, struct crypto_alg *alg)
        seq_printf(m, "seedsize     : %u\n", seedsize(alg));
 }
 
+static int __maybe_unused crypto_rng_report_stat(
+       struct sk_buff *skb, struct crypto_alg *alg)
+{
+       struct rng_alg *rng = __crypto_rng_alg(alg);
+       struct crypto_istat_rng *istat;
+       struct crypto_stat_rng rrng;
+
+       istat = rng_get_stat(rng);
+
+       memset(&rrng, 0, sizeof(rrng));
+
+       strscpy(rrng.type, "rng", sizeof(rrng.type));
+
+       rrng.stat_generate_cnt = atomic64_read(&istat->generate_cnt);
+       rrng.stat_generate_tlen = atomic64_read(&istat->generate_tlen);
+       rrng.stat_seed_cnt = atomic64_read(&istat->seed_cnt);
+       rrng.stat_err_cnt = atomic64_read(&istat->err_cnt);
+
+       return nla_put(skb, CRYPTOCFGA_STAT_RNG, sizeof(rrng), &rrng);
+}
+
 static const struct crypto_type crypto_rng_type = {
        .extsize = crypto_alg_extsize,
        .init_tfm = crypto_rng_init_tfm,
@@ -93,6 +120,9 @@ static const struct crypto_type crypto_rng_type = {
 #endif
 #if IS_ENABLED(CONFIG_CRYPTO_USER)
        .report = crypto_rng_report,
+#endif
+#ifdef CONFIG_CRYPTO_STATS
+       .report_stat = crypto_rng_report_stat,
 #endif
        .maskclear = ~CRYPTO_ALG_TYPE_MASK,
        .maskset = CRYPTO_ALG_TYPE_MASK,
@@ -169,6 +199,7 @@ EXPORT_SYMBOL_GPL(crypto_del_default_rng);
 
 int crypto_register_rng(struct rng_alg *alg)
 {
+       struct crypto_istat_rng *istat = rng_get_stat(alg);
        struct crypto_alg *base = &alg->base;
 
        if (alg->seedsize > PAGE_SIZE / 8)
@@ -178,6 +209,9 @@ int crypto_register_rng(struct rng_alg *alg)
        base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
        base->cra_flags |= CRYPTO_ALG_TYPE_RNG;
 
+       if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+               memset(istat, 0, sizeof(*istat));
+
        return crypto_register_alg(base);
 }
 EXPORT_SYMBOL_GPL(crypto_register_rng);
index 93daf3eb98429bd0ef148874c7b9f62d33a07111..60bbb7ea406028835bf59680fff68aaf75db330a 100644 (file)
@@ -270,6 +270,9 @@ static const struct crypto_type crypto_scomp_type = {
 #endif
 #if IS_ENABLED(CONFIG_CRYPTO_USER)
        .report = crypto_scomp_report,
+#endif
+#ifdef CONFIG_CRYPTO_STATS
+       .report_stat = crypto_acomp_report_stat,
 #endif
        .maskclear = ~CRYPTO_ALG_TYPE_MASK,
        .maskset = CRYPTO_ALG_TYPE_MASK,
@@ -279,10 +282,11 @@ static const struct crypto_type crypto_scomp_type = {
 
 int crypto_register_scomp(struct scomp_alg *alg)
 {
-       struct crypto_alg *base = &alg->base;
+       struct crypto_alg *base = &alg->calg.base;
+
+       comp_prepare_alg(&alg->calg);
 
        base->cra_type = &crypto_scomp_type;
-       base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
        base->cra_flags |= CRYPTO_ALG_TYPE_SCOMPRESS;
 
        return crypto_register_alg(base);
index 0ffe671b519e107c985a84ca82d10f38ed660462..c3f7f6a2528036e2ec836bbce9344f8bca70aec8 100644 (file)
 
 #include "hash.h"
 
+static inline struct crypto_istat_hash *shash_get_stat(struct shash_alg *alg)
+{
+       return hash_get_stat(&alg->halg);
+}
+
+static inline int crypto_shash_errstat(struct shash_alg *alg, int err)
+{
+       if (IS_ENABLED(CONFIG_CRYPTO_STATS) && err)
+               atomic64_inc(&shash_get_stat(alg)->err_cnt);
+       return err;
+}
+
 int shash_no_setkey(struct crypto_shash *tfm, const u8 *key,
                    unsigned int keylen)
 {
@@ -49,13 +61,29 @@ EXPORT_SYMBOL_GPL(crypto_shash_setkey);
 int crypto_shash_update(struct shash_desc *desc, const u8 *data,
                        unsigned int len)
 {
-       return crypto_shash_alg(desc->tfm)->update(desc, data, len);
+       struct shash_alg *shash = crypto_shash_alg(desc->tfm);
+       int err;
+
+       if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+               atomic64_add(len, &shash_get_stat(shash)->hash_tlen);
+
+       err = shash->update(desc, data, len);
+
+       return crypto_shash_errstat(shash, err);
 }
 EXPORT_SYMBOL_GPL(crypto_shash_update);
 
 int crypto_shash_final(struct shash_desc *desc, u8 *out)
 {
-       return crypto_shash_alg(desc->tfm)->final(desc, out);
+       struct shash_alg *shash = crypto_shash_alg(desc->tfm);
+       int err;
+
+       if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+               atomic64_inc(&shash_get_stat(shash)->hash_cnt);
+
+       err = shash->final(desc, out);
+
+       return crypto_shash_errstat(shash, err);
 }
 EXPORT_SYMBOL_GPL(crypto_shash_final);
 
@@ -71,7 +99,20 @@ static int shash_default_finup(struct shash_desc *desc, const u8 *data,
 int crypto_shash_finup(struct shash_desc *desc, const u8 *data,
                       unsigned int len, u8 *out)
 {
-       return crypto_shash_alg(desc->tfm)->finup(desc, data, len, out);
+       struct crypto_shash *tfm = desc->tfm;
+       struct shash_alg *shash = crypto_shash_alg(tfm);
+       int err;
+
+       if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
+               struct crypto_istat_hash *istat = shash_get_stat(shash);
+
+               atomic64_inc(&istat->hash_cnt);
+               atomic64_add(len, &istat->hash_tlen);
+       }
+
+       err = shash->finup(desc, data, len, out);
+
+       return crypto_shash_errstat(shash, err);
 }
 EXPORT_SYMBOL_GPL(crypto_shash_finup);
 
@@ -88,11 +129,22 @@ int crypto_shash_digest(struct shash_desc *desc, const u8 *data,
                        unsigned int len, u8 *out)
 {
        struct crypto_shash *tfm = desc->tfm;
+       struct shash_alg *shash = crypto_shash_alg(tfm);
+       int err;
+
+       if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
+               struct crypto_istat_hash *istat = shash_get_stat(shash);
+
+               atomic64_inc(&istat->hash_cnt);
+               atomic64_add(len, &istat->hash_tlen);
+       }
 
        if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
-               return -ENOKEY;
+               err = -ENOKEY;
+       else
+               err = shash->digest(desc, data, len, out);
 
-       return crypto_shash_alg(desc->tfm)->digest(desc, data, len, out);
+       return crypto_shash_errstat(shash, err);
 }
 EXPORT_SYMBOL_GPL(crypto_shash_digest);
 
@@ -213,6 +265,12 @@ static void crypto_shash_show(struct seq_file *m, struct crypto_alg *alg)
        seq_printf(m, "digestsize   : %u\n", salg->digestsize);
 }
 
+static int __maybe_unused crypto_shash_report_stat(
+       struct sk_buff *skb, struct crypto_alg *alg)
+{
+       return crypto_hash_report_stat(skb, alg, "shash");
+}
+
 const struct crypto_type crypto_shash_type = {
        .extsize = crypto_alg_extsize,
        .init_tfm = crypto_shash_init_tfm,
@@ -222,6 +280,9 @@ const struct crypto_type crypto_shash_type = {
 #endif
 #if IS_ENABLED(CONFIG_CRYPTO_USER)
        .report = crypto_shash_report,
+#endif
+#ifdef CONFIG_CRYPTO_STATS
+       .report_stat = crypto_shash_report_stat,
 #endif
        .maskclear = ~CRYPTO_ALG_TYPE_MASK,
        .maskset = CRYPTO_ALG_TYPE_MASK,
@@ -289,6 +350,7 @@ EXPORT_SYMBOL_GPL(crypto_clone_shash);
 
 int hash_prepare_alg(struct hash_alg_common *alg)
 {
+       struct crypto_istat_hash *istat = hash_get_stat(alg);
        struct crypto_alg *base = &alg->base;
 
        if (alg->digestsize > HASH_MAX_DIGESTSIZE)
@@ -300,6 +362,9 @@ int hash_prepare_alg(struct hash_alg_common *alg)
 
        base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
 
+       if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+               memset(istat, 0, sizeof(*istat));
+
        return 0;
 }
 
index 7645bedf3a1fd4b6703be78336132b218fac6c4a..224c470192977b49fa42eeea9e69a48423a31652 100644 (file)
@@ -45,6 +45,16 @@ static int __maybe_unused crypto_sig_report(struct sk_buff *skb,
        return nla_put(skb, CRYPTOCFGA_REPORT_AKCIPHER, sizeof(rsig), &rsig);
 }
 
+static int __maybe_unused crypto_sig_report_stat(struct sk_buff *skb,
+                                                struct crypto_alg *alg)
+{
+       struct crypto_stat_akcipher rsig = {};
+
+       strscpy(rsig.type, "sig", sizeof(rsig.type));
+
+       return nla_put(skb, CRYPTOCFGA_STAT_AKCIPHER, sizeof(rsig), &rsig);
+}
+
 static const struct crypto_type crypto_sig_type = {
        .extsize = crypto_alg_extsize,
        .init_tfm = crypto_sig_init_tfm,
@@ -53,6 +63,9 @@ static const struct crypto_type crypto_sig_type = {
 #endif
 #if IS_ENABLED(CONFIG_CRYPTO_USER)
        .report = crypto_sig_report,
+#endif
+#ifdef CONFIG_CRYPTO_STATS
+       .report_stat = crypto_sig_report_stat,
 #endif
        .maskclear = ~CRYPTO_ALG_TYPE_MASK,
        .maskset = CRYPTO_ALG_TYPE_SIG_MASK,
index ceed7f33a67ba5755244bcbbbc78fa0945c02353..bc70e159d27df586166f1975e5696c48760f39a1 100644 (file)
@@ -89,6 +89,25 @@ static inline struct skcipher_alg *__crypto_skcipher_alg(
        return container_of(alg, struct skcipher_alg, base);
 }
 
+static inline struct crypto_istat_cipher *skcipher_get_stat(
+       struct skcipher_alg *alg)
+{
+       return skcipher_get_stat_common(&alg->co);
+}
+
+static inline int crypto_skcipher_errstat(struct skcipher_alg *alg, int err)
+{
+       struct crypto_istat_cipher *istat = skcipher_get_stat(alg);
+
+       if (!IS_ENABLED(CONFIG_CRYPTO_STATS))
+               return err;
+
+       if (err && err != -EINPROGRESS && err != -EBUSY)
+               atomic64_inc(&istat->err_cnt);
+
+       return err;
+}
+
 static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
 {
        u8 *addr;
@@ -635,12 +654,23 @@ int crypto_skcipher_encrypt(struct skcipher_request *req)
 {
        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
        struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+       int ret;
+
+       if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
+               struct crypto_istat_cipher *istat = skcipher_get_stat(alg);
+
+               atomic64_inc(&istat->encrypt_cnt);
+               atomic64_add(req->cryptlen, &istat->encrypt_tlen);
+       }
 
        if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
-               return -ENOKEY;
-       if (alg->co.base.cra_type != &crypto_skcipher_type)
-               return crypto_lskcipher_encrypt_sg(req);
-       return alg->encrypt(req);
+               ret = -ENOKEY;
+       else if (alg->co.base.cra_type != &crypto_skcipher_type)
+               ret = crypto_lskcipher_encrypt_sg(req);
+       else
+               ret = alg->encrypt(req);
+
+       return crypto_skcipher_errstat(alg, ret);
 }
 EXPORT_SYMBOL_GPL(crypto_skcipher_encrypt);
 
@@ -648,12 +678,23 @@ int crypto_skcipher_decrypt(struct skcipher_request *req)
 {
        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
        struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+       int ret;
+
+       if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
+               struct crypto_istat_cipher *istat = skcipher_get_stat(alg);
+
+               atomic64_inc(&istat->decrypt_cnt);
+               atomic64_add(req->cryptlen, &istat->decrypt_tlen);
+       }
 
        if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
-               return -ENOKEY;
-       if (alg->co.base.cra_type != &crypto_skcipher_type)
-               return crypto_lskcipher_decrypt_sg(req);
-       return alg->decrypt(req);
+               ret = -ENOKEY;
+       else if (alg->co.base.cra_type != &crypto_skcipher_type)
+               ret = crypto_lskcipher_decrypt_sg(req);
+       else
+               ret = alg->decrypt(req);
+
+       return crypto_skcipher_errstat(alg, ret);
 }
 EXPORT_SYMBOL_GPL(crypto_skcipher_decrypt);
 
@@ -805,6 +846,28 @@ static int __maybe_unused crypto_skcipher_report(
                       sizeof(rblkcipher), &rblkcipher);
 }
 
+static int __maybe_unused crypto_skcipher_report_stat(
+       struct sk_buff *skb, struct crypto_alg *alg)
+{
+       struct skcipher_alg *skcipher = __crypto_skcipher_alg(alg);
+       struct crypto_istat_cipher *istat;
+       struct crypto_stat_cipher rcipher;
+
+       istat = skcipher_get_stat(skcipher);
+
+       memset(&rcipher, 0, sizeof(rcipher));
+
+       strscpy(rcipher.type, "cipher", sizeof(rcipher.type));
+
+       rcipher.stat_encrypt_cnt = atomic64_read(&istat->encrypt_cnt);
+       rcipher.stat_encrypt_tlen = atomic64_read(&istat->encrypt_tlen);
+       rcipher.stat_decrypt_cnt =  atomic64_read(&istat->decrypt_cnt);
+       rcipher.stat_decrypt_tlen = atomic64_read(&istat->decrypt_tlen);
+       rcipher.stat_err_cnt =  atomic64_read(&istat->err_cnt);
+
+       return nla_put(skb, CRYPTOCFGA_STAT_CIPHER, sizeof(rcipher), &rcipher);
+}
+
 static const struct crypto_type crypto_skcipher_type = {
        .extsize = crypto_skcipher_extsize,
        .init_tfm = crypto_skcipher_init_tfm,
@@ -814,6 +877,9 @@ static const struct crypto_type crypto_skcipher_type = {
 #endif
 #if IS_ENABLED(CONFIG_CRYPTO_USER)
        .report = crypto_skcipher_report,
+#endif
+#ifdef CONFIG_CRYPTO_STATS
+       .report_stat = crypto_skcipher_report_stat,
 #endif
        .maskclear = ~CRYPTO_ALG_TYPE_MASK,
        .maskset = CRYPTO_ALG_TYPE_SKCIPHER_MASK,
@@ -869,6 +935,7 @@ EXPORT_SYMBOL_GPL(crypto_has_skcipher);
 
 int skcipher_prepare_alg_common(struct skcipher_alg_common *alg)
 {
+       struct crypto_istat_cipher *istat = skcipher_get_stat_common(alg);
        struct crypto_alg *base = &alg->base;
 
        if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8 ||
@@ -881,6 +948,9 @@ int skcipher_prepare_alg_common(struct skcipher_alg_common *alg)
 
        base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
 
+       if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+               memset(istat, 0, sizeof(*istat));
+
        return 0;
 }
 
index 703651367dd87ae76a5e53670898d188d063a0cf..16c9484360dab0da2fa0bdeb731adcd63462c45e 100644 (file)
 #include <crypto/internal/skcipher.h>
 #include "internal.h"
 
+static inline struct crypto_istat_cipher *skcipher_get_stat_common(
+       struct skcipher_alg_common *alg)
+{
+#ifdef CONFIG_CRYPTO_STATS
+       return &alg->stat;
+#else
+       return NULL;
+#endif
+}
+
 int crypto_lskcipher_encrypt_sg(struct skcipher_request *req);
 int crypto_lskcipher_decrypt_sg(struct skcipher_request *req);
 int crypto_init_lskcipher_ops_sg(struct crypto_tfm *tfm);
index d042c90e09076ff7f10e8cfd9c9672836ae58b70..574cffc90730f5f3296262375a08474dab6ec59d 100644 (file)
@@ -56,6 +56,35 @@ struct crypto_acomp {
        struct crypto_tfm base;
 };
 
+/*
+ * struct crypto_istat_compress - statistics for compress algorithm
+ * @compress_cnt:      number of compress requests
+ * @compress_tlen:     total data size handled by compress requests
+ * @decompress_cnt:    number of decompress requests
+ * @decompress_tlen:   total data size handled by decompress requests
+ * @err_cnt:           number of error for compress requests
+ */
+struct crypto_istat_compress {
+       atomic64_t compress_cnt;
+       atomic64_t compress_tlen;
+       atomic64_t decompress_cnt;
+       atomic64_t decompress_tlen;
+       atomic64_t err_cnt;
+};
+
+#ifdef CONFIG_CRYPTO_STATS
+#define COMP_ALG_COMMON_STATS struct crypto_istat_compress stat;
+#else
+#define COMP_ALG_COMMON_STATS
+#endif
+
+#define COMP_ALG_COMMON {                      \
+       COMP_ALG_COMMON_STATS                   \
+                                               \
+       struct crypto_alg base;                 \
+}
+struct comp_alg_common COMP_ALG_COMMON;
+
 /**
  * DOC: Asynchronous Compression API
  *
@@ -103,11 +132,23 @@ static inline struct crypto_tfm *crypto_acomp_tfm(struct crypto_acomp *tfm)
        return &tfm->base;
 }
 
+static inline struct comp_alg_common *__crypto_comp_alg_common(
+       struct crypto_alg *alg)
+{
+       return container_of(alg, struct comp_alg_common, base);
+}
+
 static inline struct crypto_acomp *__crypto_acomp_tfm(struct crypto_tfm *tfm)
 {
        return container_of(tfm, struct crypto_acomp, base);
 }
 
+static inline struct comp_alg_common *crypto_comp_alg_common(
+       struct crypto_acomp *tfm)
+{
+       return __crypto_comp_alg_common(crypto_acomp_tfm(tfm)->__crt_alg);
+}
+
 static inline unsigned int crypto_acomp_reqsize(struct crypto_acomp *tfm)
 {
        return tfm->reqsize;
@@ -214,6 +255,27 @@ static inline void acomp_request_set_params(struct acomp_req *req,
                req->flags |= CRYPTO_ACOMP_ALLOC_OUTPUT;
 }
 
+static inline struct crypto_istat_compress *comp_get_stat(
+       struct comp_alg_common *alg)
+{
+#ifdef CONFIG_CRYPTO_STATS
+       return &alg->stat;
+#else
+       return NULL;
+#endif
+}
+
+static inline int crypto_comp_errstat(struct comp_alg_common *alg, int err)
+{
+       if (!IS_ENABLED(CONFIG_CRYPTO_STATS))
+               return err;
+
+       if (err && err != -EINPROGRESS && err != -EBUSY)
+               atomic64_inc(&comp_get_stat(alg)->err_cnt);
+
+       return err;
+}
+
 /**
  * crypto_acomp_compress() -- Invoke asynchronous compress operation
  *
@@ -225,7 +287,19 @@ static inline void acomp_request_set_params(struct acomp_req *req,
  */
 static inline int crypto_acomp_compress(struct acomp_req *req)
 {
-       return crypto_acomp_reqtfm(req)->compress(req);
+       struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
+       struct comp_alg_common *alg;
+
+       alg = crypto_comp_alg_common(tfm);
+
+       if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
+               struct crypto_istat_compress *istat = comp_get_stat(alg);
+
+               atomic64_inc(&istat->compress_cnt);
+               atomic64_add(req->slen, &istat->compress_tlen);
+       }
+
+       return crypto_comp_errstat(alg, tfm->compress(req));
 }
 
 /**
@@ -239,7 +313,19 @@ static inline int crypto_acomp_compress(struct acomp_req *req)
  */
 static inline int crypto_acomp_decompress(struct acomp_req *req)
 {
-       return crypto_acomp_reqtfm(req)->decompress(req);
+       struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
+       struct comp_alg_common *alg;
+
+       alg = crypto_comp_alg_common(tfm);
+
+       if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
+               struct crypto_istat_compress *istat = comp_get_stat(alg);
+
+               atomic64_inc(&istat->decompress_cnt);
+               atomic64_add(req->slen, &istat->decompress_tlen);
+       }
+
+       return crypto_comp_errstat(alg, tfm->decompress(req));
 }
 
 #endif
index 0e8a416386780deacd9399aa7e1df98d550b03e1..51382befbe37abcd73cbce1d5f6d0ca53b7af841 100644 (file)
@@ -101,6 +101,22 @@ struct aead_request {
        void *__ctx[] CRYPTO_MINALIGN_ATTR;
 };
 
+/*
+ * struct crypto_istat_aead - statistics for AEAD algorithm
+ * @encrypt_cnt:       number of encrypt requests
+ * @encrypt_tlen:      total data size handled by encrypt requests
+ * @decrypt_cnt:       number of decrypt requests
+ * @decrypt_tlen:      total data size handled by decrypt requests
+ * @err_cnt:           number of error for AEAD requests
+ */
+struct crypto_istat_aead {
+       atomic64_t encrypt_cnt;
+       atomic64_t encrypt_tlen;
+       atomic64_t decrypt_cnt;
+       atomic64_t decrypt_tlen;
+       atomic64_t err_cnt;
+};
+
 /**
  * struct aead_alg - AEAD cipher definition
  * @maxauthsize: Set the maximum authentication tag size supported by the
@@ -119,6 +135,7 @@ struct aead_request {
  * @setkey: see struct skcipher_alg
  * @encrypt: see struct skcipher_alg
  * @decrypt: see struct skcipher_alg
+ * @stat: statistics for AEAD algorithm
  * @ivsize: see struct skcipher_alg
  * @chunksize: see struct skcipher_alg
  * @init: Initialize the cryptographic transformation object. This function
@@ -145,6 +162,10 @@ struct aead_alg {
        int (*init)(struct crypto_aead *tfm);
        void (*exit)(struct crypto_aead *tfm);
 
+#ifdef CONFIG_CRYPTO_STATS
+       struct crypto_istat_aead stat;
+#endif
+
        unsigned int ivsize;
        unsigned int maxauthsize;
        unsigned int chunksize;
index 18a10cad07aaaf0955fe00442d4d22a32d46cee1..31c111bebb68883437e8bf5360986c0e44ce8fe4 100644 (file)
@@ -54,6 +54,26 @@ struct crypto_akcipher {
        struct crypto_tfm base;
 };
 
+/*
+ * struct crypto_istat_akcipher - statistics for akcipher algorithm
+ * @encrypt_cnt:       number of encrypt requests
+ * @encrypt_tlen:      total data size handled by encrypt requests
+ * @decrypt_cnt:       number of decrypt requests
+ * @decrypt_tlen:      total data size handled by decrypt requests
+ * @verify_cnt:                number of verify operation
+ * @sign_cnt:          number of sign requests
+ * @err_cnt:           number of error for akcipher requests
+ */
+struct crypto_istat_akcipher {
+       atomic64_t encrypt_cnt;
+       atomic64_t encrypt_tlen;
+       atomic64_t decrypt_cnt;
+       atomic64_t decrypt_tlen;
+       atomic64_t verify_cnt;
+       atomic64_t sign_cnt;
+       atomic64_t err_cnt;
+};
+
 /**
  * struct akcipher_alg - generic public key algorithm
  *
@@ -90,6 +110,7 @@ struct crypto_akcipher {
  * @exit:      Deinitialize the cryptographic transformation object. This is a
  *             counterpart to @init, used to remove various changes set in
  *             @init.
+ * @stat:      Statistics for akcipher algorithm
  *
  * @base:      Common crypto API algorithm data structure
  */
@@ -106,6 +127,10 @@ struct akcipher_alg {
        int (*init)(struct crypto_akcipher *tfm);
        void (*exit)(struct crypto_akcipher *tfm);
 
+#ifdef CONFIG_CRYPTO_STATS
+       struct crypto_istat_akcipher stat;
+#endif
+
        struct crypto_alg base;
 };
 
@@ -277,6 +302,27 @@ static inline unsigned int crypto_akcipher_maxsize(struct crypto_akcipher *tfm)
        return alg->max_size(tfm);
 }
 
+static inline struct crypto_istat_akcipher *akcipher_get_stat(
+       struct akcipher_alg *alg)
+{
+#ifdef CONFIG_CRYPTO_STATS
+       return &alg->stat;
+#else
+       return NULL;
+#endif
+}
+
+static inline int crypto_akcipher_errstat(struct akcipher_alg *alg, int err)
+{
+       if (!IS_ENABLED(CONFIG_CRYPTO_STATS))
+               return err;
+
+       if (err && err != -EINPROGRESS && err != -EBUSY)
+               atomic64_inc(&akcipher_get_stat(alg)->err_cnt);
+
+       return err;
+}
+
 /**
  * crypto_akcipher_encrypt() - Invoke public key encrypt operation
  *
@@ -290,8 +336,16 @@ static inline unsigned int crypto_akcipher_maxsize(struct crypto_akcipher *tfm)
 static inline int crypto_akcipher_encrypt(struct akcipher_request *req)
 {
        struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+       struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
 
-       return crypto_akcipher_alg(tfm)->encrypt(req);
+       if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
+               struct crypto_istat_akcipher *istat = akcipher_get_stat(alg);
+
+               atomic64_inc(&istat->encrypt_cnt);
+               atomic64_add(req->src_len, &istat->encrypt_tlen);
+       }
+
+       return crypto_akcipher_errstat(alg, alg->encrypt(req));
 }
 
 /**
@@ -307,8 +361,16 @@ static inline int crypto_akcipher_encrypt(struct akcipher_request *req)
 static inline int crypto_akcipher_decrypt(struct akcipher_request *req)
 {
        struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+       struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
 
-       return crypto_akcipher_alg(tfm)->decrypt(req);
+       if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
+               struct crypto_istat_akcipher *istat = akcipher_get_stat(alg);
+
+               atomic64_inc(&istat->decrypt_cnt);
+               atomic64_add(req->src_len, &istat->decrypt_tlen);
+       }
+
+       return crypto_akcipher_errstat(alg, alg->decrypt(req));
 }
 
 /**
@@ -360,8 +422,12 @@ int crypto_akcipher_sync_decrypt(struct crypto_akcipher *tfm,
 static inline int crypto_akcipher_sign(struct akcipher_request *req)
 {
        struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+       struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
 
-       return crypto_akcipher_alg(tfm)->sign(req);
+       if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+               atomic64_inc(&akcipher_get_stat(alg)->sign_cnt);
+
+       return crypto_akcipher_errstat(alg, alg->sign(req));
 }
 
 /**
@@ -381,8 +447,12 @@ static inline int crypto_akcipher_sign(struct akcipher_request *req)
 static inline int crypto_akcipher_verify(struct akcipher_request *req)
 {
        struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+       struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
+
+       if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+               atomic64_inc(&akcipher_get_stat(alg)->verify_cnt);
 
-       return crypto_akcipher_alg(tfm)->verify(req);
+       return crypto_akcipher_errstat(alg, alg->verify(req));
 }
 
 /**
index 156de41ca760acf9035bfae9e8ed90091af13a68..7a4a71af653fa84b80f888ec695c9deacc91e77b 100644 (file)
@@ -61,6 +61,9 @@ struct crypto_type {
        void (*show)(struct seq_file *m, struct crypto_alg *alg);
        int (*report)(struct sk_buff *skb, struct crypto_alg *alg);
        void (*free)(struct crypto_instance *inst);
+#ifdef CONFIG_CRYPTO_STATS
+       int (*report_stat)(struct sk_buff *skb, struct crypto_alg *alg);
+#endif
 
        unsigned int type;
        unsigned int maskclear;
index 0014bdd81ab7d575843058006a34707a69f63883..5d61f576cfc8606c4da44f5c56101cceb554f1b5 100644 (file)
@@ -23,8 +23,27 @@ struct crypto_ahash;
  * crypto_unregister_shash().
  */
 
+/*
+ * struct crypto_istat_hash - statistics for has algorithm
+ * @hash_cnt:          number of hash requests
+ * @hash_tlen:         total data size hashed
+ * @err_cnt:           number of error for hash requests
+ */
+struct crypto_istat_hash {
+       atomic64_t hash_cnt;
+       atomic64_t hash_tlen;
+       atomic64_t err_cnt;
+};
+
+#ifdef CONFIG_CRYPTO_STATS
+#define HASH_ALG_COMMON_STAT struct crypto_istat_hash stat;
+#else
+#define HASH_ALG_COMMON_STAT
+#endif
+
 /*
  * struct hash_alg_common - define properties of message digest
+ * @stat: Statistics for hash algorithm.
  * @digestsize: Size of the result of the transformation. A buffer of this size
  *             must be available to the @final and @finup calls, so they can
  *             store the resulting hash into it. For various predefined sizes,
@@ -41,6 +60,8 @@ struct crypto_ahash;
  *       information.
  */
 #define HASH_ALG_COMMON {              \
+       HASH_ALG_COMMON_STAT            \
+                                       \
        unsigned int digestsize;        \
        unsigned int statesize;         \
                                        \
@@ -222,6 +243,7 @@ struct shash_alg {
        };
 };
 #undef HASH_ALG_COMMON
+#undef HASH_ALG_COMMON_STAT
 
 struct crypto_ahash {
        bool using_shash; /* Underlying algorithm is shash, not ahash */
index 475e60a9f9ea9dc06306ae5a35a7dfce7832c31a..4ac46bafba9d7c94699f8af2c0e77bbe12a19bd2 100644 (file)
@@ -31,7 +31,9 @@
  *             @init.
  *
  * @reqsize:   Context size for (de)compression requests
+ * @stat:      Statistics for compress algorithm
  * @base:      Common crypto API algorithm data structure
+ * @calg:      Cmonn algorithm data structure shared with scomp
  */
 struct acomp_alg {
        int (*compress)(struct acomp_req *req);
@@ -42,7 +44,10 @@ struct acomp_alg {
 
        unsigned int reqsize;
 
-       struct crypto_alg base;
+       union {
+               struct COMP_ALG_COMMON;
+               struct comp_alg_common calg;
+       };
 };
 
 /*
diff --git a/include/crypto/internal/cryptouser.h b/include/crypto/internal/cryptouser.h
new file mode 100644 (file)
index 0000000..fd54074
--- /dev/null
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/cryptouser.h>
+#include <net/netlink.h>
+
+struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact);
+
+#ifdef CONFIG_CRYPTO_STATS
+int crypto_reportstat(struct sk_buff *in_skb, struct nlmsghdr *in_nlh, struct nlattr **attrs);
+#else
+static inline int crypto_reportstat(struct sk_buff *in_skb,
+                                   struct nlmsghdr *in_nlh,
+                                   struct nlattr **attrs)
+{
+       return -ENOTSUPP;
+}
+#endif
index 5a75f2db18cefe6f43ccc6daa38b2b9b42874a1d..858fe3965ae347ef19d498bd91997e54766e302e 100644 (file)
@@ -27,7 +27,9 @@ struct crypto_scomp {
  * @free_ctx:  Function frees context allocated with alloc_ctx
  * @compress:  Function performs a compress operation
  * @decompress:        Function performs a de-compress operation
+ * @stat:      Statistics for compress algorithm
  * @base:      Common crypto API algorithm data structure
+ * @calg:      Cmonn algorithm data structure shared with acomp
  */
 struct scomp_alg {
        void *(*alloc_ctx)(struct crypto_scomp *tfm);
@@ -38,7 +40,11 @@ struct scomp_alg {
        int (*decompress)(struct crypto_scomp *tfm, const u8 *src,
                          unsigned int slen, u8 *dst, unsigned int *dlen,
                          void *ctx);
-       struct crypto_alg base;
+
+       union {
+               struct COMP_ALG_COMMON;
+               struct comp_alg_common calg;
+       };
 };
 
 static inline struct scomp_alg *__crypto_scomp_alg(struct crypto_alg *alg)
index 2d9c4de57b69489434258aad71246af35f8a4a2e..1988e24a0d1db6411573f64cd8499181b96cfcda 100644 (file)
@@ -51,6 +51,20 @@ struct crypto_kpp {
        struct crypto_tfm base;
 };
 
+/*
+ * struct crypto_istat_kpp - statistics for KPP algorithm
+ * @setsecret_cnt:             number of setsecrey operation
+ * @generate_public_key_cnt:   number of generate_public_key operation
+ * @compute_shared_secret_cnt: number of compute_shared_secret operation
+ * @err_cnt:                   number of error for KPP requests
+ */
+struct crypto_istat_kpp {
+       atomic64_t setsecret_cnt;
+       atomic64_t generate_public_key_cnt;
+       atomic64_t compute_shared_secret_cnt;
+       atomic64_t err_cnt;
+};
+
 /**
  * struct kpp_alg - generic key-agreement protocol primitives
  *
@@ -73,6 +87,7 @@ struct crypto_kpp {
  * @exit:              Undo everything @init did.
  *
  * @base:              Common crypto API algorithm data structure
+ * @stat:              Statistics for KPP algorithm
  */
 struct kpp_alg {
        int (*set_secret)(struct crypto_kpp *tfm, const void *buffer,
@@ -85,6 +100,10 @@ struct kpp_alg {
        int (*init)(struct crypto_kpp *tfm);
        void (*exit)(struct crypto_kpp *tfm);
 
+#ifdef CONFIG_CRYPTO_STATS
+       struct crypto_istat_kpp stat;
+#endif
+
        struct crypto_alg base;
 };
 
@@ -272,6 +291,26 @@ struct kpp_secret {
        unsigned short len;
 };
 
+static inline struct crypto_istat_kpp *kpp_get_stat(struct kpp_alg *alg)
+{
+#ifdef CONFIG_CRYPTO_STATS
+       return &alg->stat;
+#else
+       return NULL;
+#endif
+}
+
+static inline int crypto_kpp_errstat(struct kpp_alg *alg, int err)
+{
+       if (!IS_ENABLED(CONFIG_CRYPTO_STATS))
+               return err;
+
+       if (err && err != -EINPROGRESS && err != -EBUSY)
+               atomic64_inc(&kpp_get_stat(alg)->err_cnt);
+
+       return err;
+}
+
 /**
  * crypto_kpp_set_secret() - Invoke kpp operation
  *
@@ -290,7 +329,12 @@ struct kpp_secret {
 static inline int crypto_kpp_set_secret(struct crypto_kpp *tfm,
                                        const void *buffer, unsigned int len)
 {
-       return crypto_kpp_alg(tfm)->set_secret(tfm, buffer, len);
+       struct kpp_alg *alg = crypto_kpp_alg(tfm);
+
+       if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+               atomic64_inc(&kpp_get_stat(alg)->setsecret_cnt);
+
+       return crypto_kpp_errstat(alg, alg->set_secret(tfm, buffer, len));
 }
 
 /**
@@ -309,8 +353,12 @@ static inline int crypto_kpp_set_secret(struct crypto_kpp *tfm,
 static inline int crypto_kpp_generate_public_key(struct kpp_request *req)
 {
        struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
+       struct kpp_alg *alg = crypto_kpp_alg(tfm);
+
+       if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+               atomic64_inc(&kpp_get_stat(alg)->generate_public_key_cnt);
 
-       return crypto_kpp_alg(tfm)->generate_public_key(req);
+       return crypto_kpp_errstat(alg, alg->generate_public_key(req));
 }
 
 /**
@@ -326,8 +374,12 @@ static inline int crypto_kpp_generate_public_key(struct kpp_request *req)
 static inline int crypto_kpp_compute_shared_secret(struct kpp_request *req)
 {
        struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
+       struct kpp_alg *alg = crypto_kpp_alg(tfm);
+
+       if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+               atomic64_inc(&kpp_get_stat(alg)->compute_shared_secret_cnt);
 
-       return crypto_kpp_alg(tfm)->compute_shared_secret(req);
+       return crypto_kpp_errstat(alg, alg->compute_shared_secret(req));
 }
 
 /**
index 5ac4388f50e13715a68f78e70a2398a3200920f8..6abe5102e5fb1004a928c554db692ff7d93b92a9 100644 (file)
 
 struct crypto_rng;
 
+/*
+ * struct crypto_istat_rng: statistics for RNG algorithm
+ * @generate_cnt:      number of RNG generate requests
+ * @generate_tlen:     total data size of generated data by the RNG
+ * @seed_cnt:          number of times the RNG was seeded
+ * @err_cnt:           number of error for RNG requests
+ */
+struct crypto_istat_rng {
+       atomic64_t generate_cnt;
+       atomic64_t generate_tlen;
+       atomic64_t seed_cnt;
+       atomic64_t err_cnt;
+};
+
 /**
  * struct rng_alg - random number generator definition
  *
@@ -32,6 +46,7 @@ struct crypto_rng;
  *             size of the seed is defined with @seedsize .
  * @set_ent:   Set entropy that would otherwise be obtained from
  *             entropy source.  Internal use only.
+ * @stat:      Statistics for rng algorithm
  * @seedsize:  The seed size required for a random number generator
  *             initialization defined with this variable. Some
  *             random number generators does not require a seed
@@ -48,6 +63,10 @@ struct rng_alg {
        void (*set_ent)(struct crypto_rng *tfm, const u8 *data,
                        unsigned int len);
 
+#ifdef CONFIG_CRYPTO_STATS
+       struct crypto_istat_rng stat;
+#endif
+
        unsigned int seedsize;
 
        struct crypto_alg base;
@@ -125,6 +144,26 @@ static inline void crypto_free_rng(struct crypto_rng *tfm)
        crypto_destroy_tfm(tfm, crypto_rng_tfm(tfm));
 }
 
+static inline struct crypto_istat_rng *rng_get_stat(struct rng_alg *alg)
+{
+#ifdef CONFIG_CRYPTO_STATS
+       return &alg->stat;
+#else
+       return NULL;
+#endif
+}
+
+static inline int crypto_rng_errstat(struct rng_alg *alg, int err)
+{
+       if (!IS_ENABLED(CONFIG_CRYPTO_STATS))
+               return err;
+
+       if (err && err != -EINPROGRESS && err != -EBUSY)
+               atomic64_inc(&rng_get_stat(alg)->err_cnt);
+
+       return err;
+}
+
 /**
  * crypto_rng_generate() - get random number
  * @tfm: cipher handle
@@ -143,7 +182,17 @@ static inline int crypto_rng_generate(struct crypto_rng *tfm,
                                      const u8 *src, unsigned int slen,
                                      u8 *dst, unsigned int dlen)
 {
-       return crypto_rng_alg(tfm)->generate(tfm, src, slen, dst, dlen);
+       struct rng_alg *alg = crypto_rng_alg(tfm);
+
+       if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
+               struct crypto_istat_rng *istat = rng_get_stat(alg);
+
+               atomic64_inc(&istat->generate_cnt);
+               atomic64_add(dlen, &istat->generate_tlen);
+       }
+
+       return crypto_rng_errstat(alg,
+                                 alg->generate(tfm, src, slen, dst, dlen));
 }
 
 /**
index 74d47e23374e646d79b54fefcb294c40bc8fe607..c8857d7bdb37f1150b9060599482d5a8892c045a 100644 (file)
@@ -64,6 +64,28 @@ struct crypto_lskcipher {
        struct crypto_tfm base;
 };
 
+/*
+ * struct crypto_istat_cipher - statistics for cipher algorithm
+ * @encrypt_cnt:       number of encrypt requests
+ * @encrypt_tlen:      total data size handled by encrypt requests
+ * @decrypt_cnt:       number of decrypt requests
+ * @decrypt_tlen:      total data size handled by decrypt requests
+ * @err_cnt:           number of error for cipher requests
+ */
+struct crypto_istat_cipher {
+       atomic64_t encrypt_cnt;
+       atomic64_t encrypt_tlen;
+       atomic64_t decrypt_cnt;
+       atomic64_t decrypt_tlen;
+       atomic64_t err_cnt;
+};
+
+#ifdef CONFIG_CRYPTO_STATS
+#define SKCIPHER_ALG_COMMON_STAT struct crypto_istat_cipher stat;
+#else
+#define SKCIPHER_ALG_COMMON_STAT
+#endif
+
 /*
  * struct skcipher_alg_common - common properties of skcipher_alg
  * @min_keysize: Minimum key size supported by the transformation. This is the
@@ -81,6 +103,7 @@ struct crypto_lskcipher {
  * @chunksize: Equal to the block size except for stream ciphers such as
  *            CTR where it is set to the underlying block size.
  * @statesize: Size of the internal state for the algorithm.
+ * @stat: Statistics for cipher algorithm
  * @base: Definition of a generic crypto algorithm.
  */
 #define SKCIPHER_ALG_COMMON {          \
@@ -90,6 +113,8 @@ struct crypto_lskcipher {
        unsigned int chunksize;         \
        unsigned int statesize;         \
                                        \
+       SKCIPHER_ALG_COMMON_STAT        \
+                                       \
        struct crypto_alg base;         \
 }
 struct skcipher_alg_common SKCIPHER_ALG_COMMON;
index e163670d60f7d6cf41ab369b8ab10cadd4d32717..5730c67f0617c5f3f8b7b4a0e4c11a2ab00d7dd0 100644 (file)
@@ -54,16 +54,16 @@ enum crypto_attr_type_t {
        CRYPTOCFGA_REPORT_AKCIPHER,     /* struct crypto_report_akcipher */
        CRYPTOCFGA_REPORT_KPP,          /* struct crypto_report_kpp */
        CRYPTOCFGA_REPORT_ACOMP,        /* struct crypto_report_acomp */
-       CRYPTOCFGA_STAT_LARVAL,         /* No longer supported */
-       CRYPTOCFGA_STAT_HASH,           /* No longer supported */
-       CRYPTOCFGA_STAT_BLKCIPHER,      /* No longer supported */
-       CRYPTOCFGA_STAT_AEAD,           /* No longer supported */
-       CRYPTOCFGA_STAT_COMPRESS,       /* No longer supported */
-       CRYPTOCFGA_STAT_RNG,            /* No longer supported */
-       CRYPTOCFGA_STAT_CIPHER,         /* No longer supported */
-       CRYPTOCFGA_STAT_AKCIPHER,       /* No longer supported */
-       CRYPTOCFGA_STAT_KPP,            /* No longer supported */
-       CRYPTOCFGA_STAT_ACOMP,          /* No longer supported */
+       CRYPTOCFGA_STAT_LARVAL,         /* struct crypto_stat */
+       CRYPTOCFGA_STAT_HASH,           /* struct crypto_stat */
+       CRYPTOCFGA_STAT_BLKCIPHER,      /* struct crypto_stat */
+       CRYPTOCFGA_STAT_AEAD,           /* struct crypto_stat */
+       CRYPTOCFGA_STAT_COMPRESS,       /* struct crypto_stat */
+       CRYPTOCFGA_STAT_RNG,            /* struct crypto_stat */
+       CRYPTOCFGA_STAT_CIPHER,         /* struct crypto_stat */
+       CRYPTOCFGA_STAT_AKCIPHER,       /* struct crypto_stat */
+       CRYPTOCFGA_STAT_KPP,            /* struct crypto_stat */
+       CRYPTOCFGA_STAT_ACOMP,          /* struct crypto_stat */
        __CRYPTOCFGA_MAX
 
 #define CRYPTOCFGA_MAX (__CRYPTOCFGA_MAX - 1)
@@ -79,7 +79,6 @@ struct crypto_user_alg {
        __u32 cru_flags;
 };
 
-/* No longer supported, do not use. */
 struct crypto_stat_aead {
        char type[CRYPTO_MAX_NAME];
        __u64 stat_encrypt_cnt;
@@ -89,7 +88,6 @@ struct crypto_stat_aead {
        __u64 stat_err_cnt;
 };
 
-/* No longer supported, do not use. */
 struct crypto_stat_akcipher {
        char type[CRYPTO_MAX_NAME];
        __u64 stat_encrypt_cnt;
@@ -101,7 +99,6 @@ struct crypto_stat_akcipher {
        __u64 stat_err_cnt;
 };
 
-/* No longer supported, do not use. */
 struct crypto_stat_cipher {
        char type[CRYPTO_MAX_NAME];
        __u64 stat_encrypt_cnt;
@@ -111,7 +108,6 @@ struct crypto_stat_cipher {
        __u64 stat_err_cnt;
 };
 
-/* No longer supported, do not use. */
 struct crypto_stat_compress {
        char type[CRYPTO_MAX_NAME];
        __u64 stat_compress_cnt;
@@ -121,7 +117,6 @@ struct crypto_stat_compress {
        __u64 stat_err_cnt;
 };
 
-/* No longer supported, do not use. */
 struct crypto_stat_hash {
        char type[CRYPTO_MAX_NAME];
        __u64 stat_hash_cnt;
@@ -129,7 +124,6 @@ struct crypto_stat_hash {
        __u64 stat_err_cnt;
 };
 
-/* No longer supported, do not use. */
 struct crypto_stat_kpp {
        char type[CRYPTO_MAX_NAME];
        __u64 stat_setsecret_cnt;
@@ -138,7 +132,6 @@ struct crypto_stat_kpp {
        __u64 stat_err_cnt;
 };
 
-/* No longer supported, do not use. */
 struct crypto_stat_rng {
        char type[CRYPTO_MAX_NAME];
        __u64 stat_generate_cnt;
@@ -147,7 +140,6 @@ struct crypto_stat_rng {
        __u64 stat_err_cnt;
 };
 
-/* No longer supported, do not use. */
 struct crypto_stat_larval {
        char type[CRYPTO_MAX_NAME];
 };