int crypto_ahash_final(struct ahash_request *req)
 {
+       struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+       struct crypto_alg *alg = tfm->base.__crt_alg;
+       unsigned int nbytes = req->nbytes;
        int ret;
 
+       crypto_stats_get(alg);
        ret = crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final);
-       crypto_stat_ahash_final(req, ret);
+       crypto_stats_ahash_final(nbytes, ret, alg);
        return ret;
 }
 EXPORT_SYMBOL_GPL(crypto_ahash_final);
 
 int crypto_ahash_finup(struct ahash_request *req)
 {
+       struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+       struct crypto_alg *alg = tfm->base.__crt_alg;
+       unsigned int nbytes = req->nbytes;
        int ret;
 
+       crypto_stats_get(alg);
        ret = crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup);
-       crypto_stat_ahash_final(req, ret);
+       crypto_stats_ahash_final(nbytes, ret, alg);
        return ret;
 }
 EXPORT_SYMBOL_GPL(crypto_ahash_finup);
 int crypto_ahash_digest(struct ahash_request *req)
 {
        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+       struct crypto_alg *alg = tfm->base.__crt_alg;
+       unsigned int nbytes = req->nbytes;
        int ret;
 
+       crypto_stats_get(alg);
        if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
                ret = -ENOKEY;
        else
                ret = crypto_ahash_op(req, tfm->digest);
-       crypto_stat_ahash_final(req, ret);
+       crypto_stats_ahash_final(nbytes, ret, alg);
        return ret;
 }
 EXPORT_SYMBOL_GPL(crypto_ahash_digest);
 
 }
 EXPORT_SYMBOL_GPL(crypto_type_has_alg);
 
+#ifdef CONFIG_CRYPTO_STATS
+void crypto_stats_get(struct crypto_alg *alg)
+{
+       crypto_alg_get(alg);
+}
+EXPORT_SYMBOL_GPL(crypto_stats_get);
+
+void crypto_stats_ablkcipher_encrypt(unsigned int nbytes, int ret,
+                                    struct crypto_alg *alg)
+{
+       if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
+               atomic64_inc(&alg->cipher_err_cnt);
+       } else {
+               atomic64_inc(&alg->encrypt_cnt);
+               atomic64_add(nbytes, &alg->encrypt_tlen);
+       }
+       crypto_alg_put(alg);
+}
+EXPORT_SYMBOL_GPL(crypto_stats_ablkcipher_encrypt);
+
+void crypto_stats_ablkcipher_decrypt(unsigned int nbytes, int ret,
+                                    struct crypto_alg *alg)
+{
+       if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
+               atomic64_inc(&alg->cipher_err_cnt);
+       } else {
+               atomic64_inc(&alg->decrypt_cnt);
+               atomic64_add(nbytes, &alg->decrypt_tlen);
+       }
+       crypto_alg_put(alg);
+}
+EXPORT_SYMBOL_GPL(crypto_stats_ablkcipher_decrypt);
+
+void crypto_stats_aead_encrypt(unsigned int cryptlen, struct crypto_alg *alg,
+                              int ret)
+{
+       if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
+               atomic64_inc(&alg->aead_err_cnt);
+       } else {
+               atomic64_inc(&alg->encrypt_cnt);
+               atomic64_add(cryptlen, &alg->encrypt_tlen);
+       }
+       crypto_alg_put(alg);
+}
+EXPORT_SYMBOL_GPL(crypto_stats_aead_encrypt);
+
+void crypto_stats_aead_decrypt(unsigned int cryptlen, struct crypto_alg *alg,
+                              int ret)
+{
+       if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
+               atomic64_inc(&alg->aead_err_cnt);
+       } else {
+               atomic64_inc(&alg->decrypt_cnt);
+               atomic64_add(cryptlen, &alg->decrypt_tlen);
+       }
+       crypto_alg_put(alg);
+}
+EXPORT_SYMBOL_GPL(crypto_stats_aead_decrypt);
+
+void crypto_stats_akcipher_encrypt(unsigned int src_len, int ret,
+                                  struct crypto_alg *alg)
+{
+       if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
+               atomic64_inc(&alg->akcipher_err_cnt);
+       } else {
+               atomic64_inc(&alg->encrypt_cnt);
+               atomic64_add(src_len, &alg->encrypt_tlen);
+       }
+       crypto_alg_put(alg);
+}
+EXPORT_SYMBOL_GPL(crypto_stats_akcipher_encrypt);
+
+void crypto_stats_akcipher_decrypt(unsigned int src_len, int ret,
+                                  struct crypto_alg *alg)
+{
+       if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
+               atomic64_inc(&alg->akcipher_err_cnt);
+       } else {
+               atomic64_inc(&alg->decrypt_cnt);
+               atomic64_add(src_len, &alg->decrypt_tlen);
+       }
+       crypto_alg_put(alg);
+}
+EXPORT_SYMBOL_GPL(crypto_stats_akcipher_decrypt);
+
+void crypto_stats_akcipher_sign(int ret, struct crypto_alg *alg)
+{
+       if (ret && ret != -EINPROGRESS && ret != -EBUSY)
+               atomic64_inc(&alg->akcipher_err_cnt);
+       else
+               atomic64_inc(&alg->sign_cnt);
+       crypto_alg_put(alg);
+}
+EXPORT_SYMBOL_GPL(crypto_stats_akcipher_sign);
+
+void crypto_stats_akcipher_verify(int ret, struct crypto_alg *alg)
+{
+       if (ret && ret != -EINPROGRESS && ret != -EBUSY)
+               atomic64_inc(&alg->akcipher_err_cnt);
+       else
+               atomic64_inc(&alg->verify_cnt);
+       crypto_alg_put(alg);
+}
+EXPORT_SYMBOL_GPL(crypto_stats_akcipher_verify);
+
+void crypto_stats_compress(unsigned int slen, int ret, struct crypto_alg *alg)
+{
+       if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
+               atomic64_inc(&alg->compress_err_cnt);
+       } else {
+               atomic64_inc(&alg->compress_cnt);
+               atomic64_add(slen, &alg->compress_tlen);
+       }
+       crypto_alg_put(alg);
+}
+EXPORT_SYMBOL_GPL(crypto_stats_compress);
+
+void crypto_stats_decompress(unsigned int slen, int ret, struct crypto_alg *alg)
+{
+       if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
+               atomic64_inc(&alg->compress_err_cnt);
+       } else {
+               atomic64_inc(&alg->decompress_cnt);
+               atomic64_add(slen, &alg->decompress_tlen);
+       }
+       crypto_alg_put(alg);
+}
+EXPORT_SYMBOL_GPL(crypto_stats_decompress);
+
+void crypto_stats_ahash_update(unsigned int nbytes, int ret,
+                              struct crypto_alg *alg)
+{
+       if (ret && ret != -EINPROGRESS && ret != -EBUSY)
+               atomic64_inc(&alg->hash_err_cnt);
+       else
+               atomic64_add(nbytes, &alg->hash_tlen);
+       crypto_alg_put(alg);
+}
+EXPORT_SYMBOL_GPL(crypto_stats_ahash_update);
+
+void crypto_stats_ahash_final(unsigned int nbytes, int ret,
+                             struct crypto_alg *alg)
+{
+       if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
+               atomic64_inc(&alg->hash_err_cnt);
+       } else {
+               atomic64_inc(&alg->hash_cnt);
+               atomic64_add(nbytes, &alg->hash_tlen);
+       }
+       crypto_alg_put(alg);
+}
+EXPORT_SYMBOL_GPL(crypto_stats_ahash_final);
+
+void crypto_stats_kpp_set_secret(struct crypto_alg *alg, int ret)
+{
+       if (ret)
+               atomic64_inc(&alg->kpp_err_cnt);
+       else
+               atomic64_inc(&alg->setsecret_cnt);
+       crypto_alg_put(alg);
+}
+EXPORT_SYMBOL_GPL(crypto_stats_kpp_set_secret);
+
+void crypto_stats_kpp_generate_public_key(struct crypto_alg *alg, int ret)
+{
+       if (ret)
+               atomic64_inc(&alg->kpp_err_cnt);
+       else
+               atomic64_inc(&alg->generate_public_key_cnt);
+       crypto_alg_put(alg);
+}
+EXPORT_SYMBOL_GPL(crypto_stats_kpp_generate_public_key);
+
+void crypto_stats_kpp_compute_shared_secret(struct crypto_alg *alg, int ret)
+{
+       if (ret)
+               atomic64_inc(&alg->kpp_err_cnt);
+       else
+               atomic64_inc(&alg->compute_shared_secret_cnt);
+       crypto_alg_put(alg);
+}
+EXPORT_SYMBOL_GPL(crypto_stats_kpp_compute_shared_secret);
+
+void crypto_stats_rng_seed(struct crypto_alg *alg, int ret)
+{
+       if (ret && ret != -EINPROGRESS && ret != -EBUSY)
+               atomic64_inc(&alg->rng_err_cnt);
+       else
+               atomic64_inc(&alg->seed_cnt);
+       crypto_alg_put(alg);
+}
+EXPORT_SYMBOL_GPL(crypto_stats_rng_seed);
+
+void crypto_stats_rng_generate(struct crypto_alg *alg, unsigned int dlen,
+                              int ret)
+{
+       if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
+               atomic64_inc(&alg->rng_err_cnt);
+       } else {
+               atomic64_inc(&alg->generate_cnt);
+               atomic64_add(dlen, &alg->generate_tlen);
+       }
+       crypto_alg_put(alg);
+}
+EXPORT_SYMBOL_GPL(crypto_stats_rng_generate);
+
+void crypto_stats_skcipher_encrypt(unsigned int cryptlen, int ret,
+                                  struct crypto_alg *alg)
+{
+       if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
+               atomic64_inc(&alg->cipher_err_cnt);
+       } else {
+               atomic64_inc(&alg->encrypt_cnt);
+               atomic64_add(cryptlen, &alg->encrypt_tlen);
+       }
+       crypto_alg_put(alg);
+}
+EXPORT_SYMBOL_GPL(crypto_stats_skcipher_encrypt);
+
+void crypto_stats_skcipher_decrypt(unsigned int cryptlen, int ret,
+                                  struct crypto_alg *alg)
+{
+       if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
+               atomic64_inc(&alg->cipher_err_cnt);
+       } else {
+               atomic64_inc(&alg->decrypt_cnt);
+               atomic64_add(cryptlen, &alg->decrypt_tlen);
+       }
+       crypto_alg_put(alg);
+}
+EXPORT_SYMBOL_GPL(crypto_stats_skcipher_decrypt);
+#endif
+
 static int __init crypto_algapi_init(void)
 {
        crypto_init_proc();
 
 
 int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen)
 {
+       struct crypto_alg *alg = tfm->base.__crt_alg;
        u8 *buf = NULL;
        int err;
 
+       crypto_stats_get(alg);
        if (!seed && slen) {
                buf = kmalloc(slen, GFP_KERNEL);
                if (!buf)
        }
 
        err = crypto_rng_alg(tfm)->seed(tfm, seed, slen);
-       crypto_stat_rng_seed(tfm, err);
+       crypto_stats_rng_seed(alg, err);
 out:
        kzfree(buf);
        return err;
 
                req->flags |= CRYPTO_ACOMP_ALLOC_OUTPUT;
 }
 
-static inline void crypto_stat_compress(struct acomp_req *req, int ret)
-{
-#ifdef CONFIG_CRYPTO_STATS
-       struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
-
-       if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
-               atomic64_inc(&tfm->base.__crt_alg->compress_err_cnt);
-       } else {
-               atomic64_inc(&tfm->base.__crt_alg->compress_cnt);
-               atomic64_add(req->slen, &tfm->base.__crt_alg->compress_tlen);
-       }
-#endif
-}
-
-static inline void crypto_stat_decompress(struct acomp_req *req, int ret)
-{
-#ifdef CONFIG_CRYPTO_STATS
-       struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
-
-       if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
-               atomic64_inc(&tfm->base.__crt_alg->compress_err_cnt);
-       } else {
-               atomic64_inc(&tfm->base.__crt_alg->decompress_cnt);
-               atomic64_add(req->slen, &tfm->base.__crt_alg->decompress_tlen);
-       }
-#endif
-}
-
 /**
  * crypto_acomp_compress() -- Invoke asynchronous compress operation
  *
 static inline int crypto_acomp_compress(struct acomp_req *req)
 {
        struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
+       struct crypto_alg *alg = tfm->base.__crt_alg;
+       unsigned int slen = req->slen;
        int ret;
 
+       crypto_stats_get(alg);
        ret = tfm->compress(req);
-       crypto_stat_compress(req, ret);
+       crypto_stats_compress(slen, ret, alg);
        return ret;
 }
 
 static inline int crypto_acomp_decompress(struct acomp_req *req)
 {
        struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
+       struct crypto_alg *alg = tfm->base.__crt_alg;
+       unsigned int slen = req->slen;
        int ret;
 
+       crypto_stats_get(alg);
        ret = tfm->decompress(req);
-       crypto_stat_decompress(req, ret);
+       crypto_stats_decompress(slen, ret, alg);
        return ret;
 }
 
 
        return __crypto_aead_cast(req->base.tfm);
 }
 
-static inline void crypto_stat_aead_encrypt(struct aead_request *req, int ret)
-{
-#ifdef CONFIG_CRYPTO_STATS
-       struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-
-       if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
-               atomic64_inc(&tfm->base.__crt_alg->aead_err_cnt);
-       } else {
-               atomic64_inc(&tfm->base.__crt_alg->encrypt_cnt);
-               atomic64_add(req->cryptlen, &tfm->base.__crt_alg->encrypt_tlen);
-       }
-#endif
-}
-
-static inline void crypto_stat_aead_decrypt(struct aead_request *req, int ret)
-{
-#ifdef CONFIG_CRYPTO_STATS
-       struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-
-       if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
-               atomic64_inc(&tfm->base.__crt_alg->aead_err_cnt);
-       } else {
-               atomic64_inc(&tfm->base.__crt_alg->decrypt_cnt);
-               atomic64_add(req->cryptlen, &tfm->base.__crt_alg->decrypt_tlen);
-       }
-#endif
-}
-
 /**
  * crypto_aead_encrypt() - encrypt plaintext
  * @req: reference to the aead_request handle that holds all information
 static inline int crypto_aead_encrypt(struct aead_request *req)
 {
        struct crypto_aead *aead = crypto_aead_reqtfm(req);
+       struct crypto_alg *alg = aead->base.__crt_alg;
+       unsigned int cryptlen = req->cryptlen;
        int ret;
 
+       crypto_stats_get(alg);
        if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY)
                ret = -ENOKEY;
        else
                ret = crypto_aead_alg(aead)->encrypt(req);
-       crypto_stat_aead_encrypt(req, ret);
+       crypto_stats_aead_encrypt(cryptlen, alg, ret);
        return ret;
 }
 
 static inline int crypto_aead_decrypt(struct aead_request *req)
 {
        struct crypto_aead *aead = crypto_aead_reqtfm(req);
+       struct crypto_alg *alg = aead->base.__crt_alg;
+       unsigned int cryptlen = req->cryptlen;
        int ret;
 
+       crypto_stats_get(alg);
        if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY)
                ret = -ENOKEY;
        else if (req->cryptlen < crypto_aead_authsize(aead))
                ret = -EINVAL;
        else
                ret = crypto_aead_alg(aead)->decrypt(req);
-       crypto_stat_aead_decrypt(req, ret);
+       crypto_stats_aead_decrypt(cryptlen, alg, ret);
        return ret;
 }
 
 
        return alg->max_size(tfm);
 }
 
-static inline void crypto_stat_akcipher_encrypt(struct akcipher_request *req,
-                                               int ret)
-{
-#ifdef CONFIG_CRYPTO_STATS
-       struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
-
-       if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
-               atomic64_inc(&tfm->base.__crt_alg->akcipher_err_cnt);
-       } else {
-               atomic64_inc(&tfm->base.__crt_alg->encrypt_cnt);
-               atomic64_add(req->src_len, &tfm->base.__crt_alg->encrypt_tlen);
-       }
-#endif
-}
-
-static inline void crypto_stat_akcipher_decrypt(struct akcipher_request *req,
-                                               int ret)
-{
-#ifdef CONFIG_CRYPTO_STATS
-       struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
-
-       if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
-               atomic64_inc(&tfm->base.__crt_alg->akcipher_err_cnt);
-       } else {
-               atomic64_inc(&tfm->base.__crt_alg->decrypt_cnt);
-               atomic64_add(req->src_len, &tfm->base.__crt_alg->decrypt_tlen);
-       }
-#endif
-}
-
-static inline void crypto_stat_akcipher_sign(struct akcipher_request *req,
-                                            int ret)
-{
-#ifdef CONFIG_CRYPTO_STATS
-       struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
-
-       if (ret && ret != -EINPROGRESS && ret != -EBUSY)
-               atomic64_inc(&tfm->base.__crt_alg->akcipher_err_cnt);
-       else
-               atomic64_inc(&tfm->base.__crt_alg->sign_cnt);
-#endif
-}
-
-static inline void crypto_stat_akcipher_verify(struct akcipher_request *req,
-                                              int ret)
-{
-#ifdef CONFIG_CRYPTO_STATS
-       struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
-
-       if (ret && ret != -EINPROGRESS && ret != -EBUSY)
-               atomic64_inc(&tfm->base.__crt_alg->akcipher_err_cnt);
-       else
-               atomic64_inc(&tfm->base.__crt_alg->verify_cnt);
-#endif
-}
-
 /**
  * crypto_akcipher_encrypt() - Invoke public key encrypt operation
  *
 {
        struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
        struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
+       struct crypto_alg *calg = tfm->base.__crt_alg;
+       unsigned int src_len = req->src_len;
        int ret;
 
+       crypto_stats_get(calg);
        ret = alg->encrypt(req);
-       crypto_stat_akcipher_encrypt(req, ret);
+       crypto_stats_akcipher_encrypt(src_len, ret, calg);
        return ret;
 }
 
 {
        struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
        struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
+       struct crypto_alg *calg = tfm->base.__crt_alg;
+       unsigned int src_len = req->src_len;
        int ret;
 
+       crypto_stats_get(calg);
        ret = alg->decrypt(req);
-       crypto_stat_akcipher_decrypt(req, ret);
+       crypto_stats_akcipher_decrypt(src_len, ret, calg);
        return ret;
 }
 
 {
        struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
        struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
+       struct crypto_alg *calg = tfm->base.__crt_alg;
        int ret;
 
+       crypto_stats_get(calg);
        ret = alg->sign(req);
-       crypto_stat_akcipher_sign(req, ret);
+       crypto_stats_akcipher_sign(ret, calg);
        return ret;
 }
 
 {
        struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
        struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
+       struct crypto_alg *calg = tfm->base.__crt_alg;
        int ret;
 
+       crypto_stats_get(calg);
        ret = alg->verify(req);
-       crypto_stat_akcipher_verify(req, ret);
+       crypto_stats_akcipher_verify(ret, calg);
        return ret;
 }
 
 
 int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
                        unsigned int keylen);
 
-static inline void crypto_stat_ahash_update(struct ahash_request *req, int ret)
-{
-#ifdef CONFIG_CRYPTO_STATS
-       struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
-
-       if (ret && ret != -EINPROGRESS && ret != -EBUSY)
-               atomic64_inc(&tfm->base.__crt_alg->hash_err_cnt);
-       else
-               atomic64_add(req->nbytes, &tfm->base.__crt_alg->hash_tlen);
-#endif
-}
-
-static inline void crypto_stat_ahash_final(struct ahash_request *req, int ret)
-{
-#ifdef CONFIG_CRYPTO_STATS
-       struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
-
-       if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
-               atomic64_inc(&tfm->base.__crt_alg->hash_err_cnt);
-       } else {
-               atomic64_inc(&tfm->base.__crt_alg->hash_cnt);
-               atomic64_add(req->nbytes, &tfm->base.__crt_alg->hash_tlen);
-       }
-#endif
-}
-
 /**
  * crypto_ahash_finup() - update and finalize message digest
  * @req: reference to the ahash_request handle that holds all information
  */
 static inline int crypto_ahash_update(struct ahash_request *req)
 {
+       struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+       struct crypto_alg *alg = tfm->base.__crt_alg;
+       unsigned int nbytes = req->nbytes;
        int ret;
 
+       crypto_stats_get(alg);
        ret = crypto_ahash_reqtfm(req)->update(req);
-       crypto_stat_ahash_update(req, ret);
+       crypto_stats_ahash_update(nbytes, ret, alg);
        return ret;
 }
 
 
        unsigned short len;
 };
 
-static inline void crypto_stat_kpp_set_secret(struct crypto_kpp *tfm, int ret)
-{
-#ifdef CONFIG_CRYPTO_STATS
-       if (ret)
-               atomic64_inc(&tfm->base.__crt_alg->kpp_err_cnt);
-       else
-               atomic64_inc(&tfm->base.__crt_alg->setsecret_cnt);
-#endif
-}
-
-static inline void crypto_stat_kpp_generate_public_key(struct kpp_request *req,
-                                                      int ret)
-{
-#ifdef CONFIG_CRYPTO_STATS
-       struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
-
-       if (ret)
-               atomic64_inc(&tfm->base.__crt_alg->kpp_err_cnt);
-       else
-               atomic64_inc(&tfm->base.__crt_alg->generate_public_key_cnt);
-#endif
-}
-
-static inline void crypto_stat_kpp_compute_shared_secret(struct kpp_request *req,
-                                                        int ret)
-{
-#ifdef CONFIG_CRYPTO_STATS
-       struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
-
-       if (ret)
-               atomic64_inc(&tfm->base.__crt_alg->kpp_err_cnt);
-       else
-               atomic64_inc(&tfm->base.__crt_alg->compute_shared_secret_cnt);
-#endif
-}
-
 /**
  * crypto_kpp_set_secret() - Invoke kpp operation
  *
                                        const void *buffer, unsigned int len)
 {
        struct kpp_alg *alg = crypto_kpp_alg(tfm);
+       struct crypto_alg *calg = tfm->base.__crt_alg;
        int ret;
 
+       crypto_stats_get(calg);
        ret = alg->set_secret(tfm, buffer, len);
-       crypto_stat_kpp_set_secret(tfm, ret);
+       crypto_stats_kpp_set_secret(calg, ret);
        return ret;
 }
 
 {
        struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
        struct kpp_alg *alg = crypto_kpp_alg(tfm);
+       struct crypto_alg *calg = tfm->base.__crt_alg;
        int ret;
 
+       crypto_stats_get(calg);
        ret = alg->generate_public_key(req);
-       crypto_stat_kpp_generate_public_key(req, ret);
+       crypto_stats_kpp_generate_public_key(calg, ret);
        return ret;
 }
 
 {
        struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
        struct kpp_alg *alg = crypto_kpp_alg(tfm);
+       struct crypto_alg *calg = tfm->base.__crt_alg;
        int ret;
 
+       crypto_stats_get(calg);
        ret = alg->compute_shared_secret(req);
-       crypto_stat_kpp_compute_shared_secret(req, ret);
+       crypto_stats_kpp_compute_shared_secret(calg, ret);
        return ret;
 }
 
 
        crypto_destroy_tfm(tfm, crypto_rng_tfm(tfm));
 }
 
-static inline void crypto_stat_rng_seed(struct crypto_rng *tfm, int ret)
-{
-#ifdef CONFIG_CRYPTO_STATS
-       if (ret && ret != -EINPROGRESS && ret != -EBUSY)
-               atomic64_inc(&tfm->base.__crt_alg->rng_err_cnt);
-       else
-               atomic64_inc(&tfm->base.__crt_alg->seed_cnt);
-#endif
-}
-
-static inline void crypto_stat_rng_generate(struct crypto_rng *tfm,
-                                           unsigned int dlen, int ret)
-{
-#ifdef CONFIG_CRYPTO_STATS
-       if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
-               atomic64_inc(&tfm->base.__crt_alg->rng_err_cnt);
-       } else {
-               atomic64_inc(&tfm->base.__crt_alg->generate_cnt);
-               atomic64_add(dlen, &tfm->base.__crt_alg->generate_tlen);
-       }
-#endif
-}
-
 /**
  * crypto_rng_generate() - get random number
  * @tfm: cipher handle
                                      const u8 *src, unsigned int slen,
                                      u8 *dst, unsigned int dlen)
 {
+       struct crypto_alg *alg = tfm->base.__crt_alg;
        int ret;
 
+       crypto_stats_get(alg);
        ret = crypto_rng_alg(tfm)->generate(tfm, src, slen, dst, dlen);
-       crypto_stat_rng_generate(tfm, dlen, ret);
+       crypto_stats_rng_generate(alg, dlen, ret);
        return ret;
 }
 
 
        return container_of(tfm, struct crypto_sync_skcipher, base);
 }
 
-static inline void crypto_stat_skcipher_encrypt(struct skcipher_request *req,
-                                               int ret, struct crypto_alg *alg)
-{
-#ifdef CONFIG_CRYPTO_STATS
-       if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
-               atomic64_inc(&alg->cipher_err_cnt);
-       } else {
-               atomic64_inc(&alg->encrypt_cnt);
-               atomic64_add(req->cryptlen, &alg->encrypt_tlen);
-       }
-#endif
-}
-
-static inline void crypto_stat_skcipher_decrypt(struct skcipher_request *req,
-                                               int ret, struct crypto_alg *alg)
-{
-#ifdef CONFIG_CRYPTO_STATS
-       if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
-               atomic64_inc(&alg->cipher_err_cnt);
-       } else {
-               atomic64_inc(&alg->decrypt_cnt);
-               atomic64_add(req->cryptlen, &alg->decrypt_tlen);
-       }
-#endif
-}
-
 /**
  * crypto_skcipher_encrypt() - encrypt plaintext
  * @req: reference to the skcipher_request handle that holds all information
 static inline int crypto_skcipher_encrypt(struct skcipher_request *req)
 {
        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+       struct crypto_alg *alg = tfm->base.__crt_alg;
+       unsigned int cryptlen = req->cryptlen;
        int ret;
 
+       crypto_stats_get(alg);
        if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
                ret = -ENOKEY;
        else
                ret = tfm->encrypt(req);
-       crypto_stat_skcipher_encrypt(req, ret, tfm->base.__crt_alg);
+       crypto_stats_skcipher_encrypt(cryptlen, ret, alg);
        return ret;
 }
 
 static inline int crypto_skcipher_decrypt(struct skcipher_request *req)
 {
        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+       struct crypto_alg *alg = tfm->base.__crt_alg;
+       unsigned int cryptlen = req->cryptlen;
        int ret;
 
+       crypto_stats_get(alg);
        if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
                ret = -ENOKEY;
        else
                ret = tfm->decrypt(req);
-       crypto_stat_skcipher_decrypt(req, ret, tfm->base.__crt_alg);
+       crypto_stats_skcipher_decrypt(cryptlen, ret, alg);
        return ret;
 }
 
 
 
 } CRYPTO_MINALIGN_ATTR;
 
+#ifdef CONFIG_CRYPTO_STATS
+void crypto_stats_get(struct crypto_alg *alg);
+void crypto_stats_ablkcipher_encrypt(unsigned int nbytes, int ret, struct crypto_alg *alg);
+void crypto_stats_ablkcipher_decrypt(unsigned int nbytes, int ret, struct crypto_alg *alg);
+void crypto_stats_aead_encrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret);
+void crypto_stats_aead_decrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret);
+void crypto_stats_ahash_update(unsigned int nbytes, int ret, struct crypto_alg *alg);
+void crypto_stats_ahash_final(unsigned int nbytes, int ret, struct crypto_alg *alg);
+void crypto_stats_akcipher_encrypt(unsigned int src_len, int ret, struct crypto_alg *alg);
+void crypto_stats_akcipher_decrypt(unsigned int src_len, int ret, struct crypto_alg *alg);
+void crypto_stats_akcipher_sign(int ret, struct crypto_alg *alg);
+void crypto_stats_akcipher_verify(int ret, struct crypto_alg *alg);
+void crypto_stats_compress(unsigned int slen, int ret, struct crypto_alg *alg);
+void crypto_stats_decompress(unsigned int slen, int ret, struct crypto_alg *alg);
+void crypto_stats_kpp_set_secret(struct crypto_alg *alg, int ret);
+void crypto_stats_kpp_generate_public_key(struct crypto_alg *alg, int ret);
+void crypto_stats_kpp_compute_shared_secret(struct crypto_alg *alg, int ret);
+void crypto_stats_rng_seed(struct crypto_alg *alg, int ret);
+void crypto_stats_rng_generate(struct crypto_alg *alg, unsigned int dlen, int ret);
+void crypto_stats_skcipher_encrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg);
+void crypto_stats_skcipher_decrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg);
+#else
+static inline void crypto_stats_get(struct crypto_alg *alg)
+{}
+static inline void crypto_stats_ablkcipher_encrypt(unsigned int nbytes, int ret, struct crypto_alg *alg)
+{}
+static inline void crypto_stats_ablkcipher_decrypt(unsigned int nbytes, int ret, struct crypto_alg *alg)
+{}
+static inline void crypto_stats_aead_encrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret)
+{}
+static inline void crypto_stats_aead_decrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret)
+{}
+static inline void crypto_stats_ahash_update(unsigned int nbytes, int ret, struct crypto_alg *alg)
+{}
+static inline void crypto_stats_ahash_final(unsigned int nbytes, int ret, struct crypto_alg *alg)
+{}
+static inline void crypto_stats_akcipher_encrypt(unsigned int src_len, int ret, struct crypto_alg *alg)
+{}
+static inline void crypto_stats_akcipher_decrypt(unsigned int src_len, int ret, struct crypto_alg *alg)
+{}
+static inline void crypto_stats_akcipher_sign(int ret, struct crypto_alg *alg)
+{}
+static inline void crypto_stats_akcipher_verify(int ret, struct crypto_alg *alg)
+{}
+static inline void crypto_stats_compress(unsigned int slen, int ret, struct crypto_alg *alg)
+{}
+static inline void crypto_stats_decompress(unsigned int slen, int ret, struct crypto_alg *alg)
+{}
+static inline void crypto_stats_kpp_set_secret(struct crypto_alg *alg, int ret)
+{}
+static inline void crypto_stats_kpp_generate_public_key(struct crypto_alg *alg, int ret)
+{}
+static inline void crypto_stats_kpp_compute_shared_secret(struct crypto_alg *alg, int ret)
+{}
+static inline void crypto_stats_rng_seed(struct crypto_alg *alg, int ret)
+{}
+static inline void crypto_stats_rng_generate(struct crypto_alg *alg, unsigned int dlen, int ret)
+{}
+static inline void crypto_stats_skcipher_encrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg)
+{}
+static inline void crypto_stats_skcipher_decrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg)
+{}
+#endif
 /*
  * A helper struct for waiting for completion of async crypto ops
  */
        return __crypto_ablkcipher_cast(req->base.tfm);
 }
 
-static inline void crypto_stat_ablkcipher_encrypt(struct ablkcipher_request *req,
-                                                 int ret)
-{
-#ifdef CONFIG_CRYPTO_STATS
-       struct ablkcipher_tfm *crt =
-               crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req));
-
-       if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
-               atomic64_inc(&crt->base->base.__crt_alg->cipher_err_cnt);
-       } else {
-               atomic64_inc(&crt->base->base.__crt_alg->encrypt_cnt);
-               atomic64_add(req->nbytes, &crt->base->base.__crt_alg->encrypt_tlen);
-       }
-#endif
-}
-
-static inline void crypto_stat_ablkcipher_decrypt(struct ablkcipher_request *req,
-                                                 int ret)
-{
-#ifdef CONFIG_CRYPTO_STATS
-       struct ablkcipher_tfm *crt =
-               crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req));
-
-       if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
-               atomic64_inc(&crt->base->base.__crt_alg->cipher_err_cnt);
-       } else {
-               atomic64_inc(&crt->base->base.__crt_alg->decrypt_cnt);
-               atomic64_add(req->nbytes, &crt->base->base.__crt_alg->decrypt_tlen);
-       }
-#endif
-}
-
 /**
  * crypto_ablkcipher_encrypt() - encrypt plaintext
  * @req: reference to the ablkcipher_request handle that holds all information
 {
        struct ablkcipher_tfm *crt =
                crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req));
+       struct crypto_alg *alg = crt->base->base.__crt_alg;
+       unsigned int nbytes = req->nbytes;
        int ret;
 
+       crypto_stats_get(alg);
        ret = crt->encrypt(req);
-       crypto_stat_ablkcipher_encrypt(req, ret);
+       crypto_stats_ablkcipher_encrypt(nbytes, ret, alg);
        return ret;
 }
 
 {
        struct ablkcipher_tfm *crt =
                crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req));
+       struct crypto_alg *alg = crt->base->base.__crt_alg;
+       unsigned int nbytes = req->nbytes;
        int ret;
 
+       crypto_stats_get(alg);
        ret = crt->decrypt(req);
-       crypto_stat_ablkcipher_decrypt(req, ret);
+       crypto_stats_ablkcipher_decrypt(nbytes, ret, alg);
        return ret;
 }