#include "fsverity_private.h"
 
+#include <crypto/hash.h>
 #include <linux/mount.h>
 #include <linux/sched/signal.h>
 #include <linux/uaccess.h>
 /* Hash a block, writing the result to the next level's pending block buffer. */
 static int hash_one_block(struct inode *inode,
                          const struct merkle_tree_params *params,
-                         struct ahash_request *req, struct block_buffer *cur)
+                         struct block_buffer *cur)
 {
        struct block_buffer *next = cur + 1;
        int err;
        /* Zero-pad the block if it's shorter than the block size. */
        memset(&cur->data[cur->filled], 0, params->block_size - cur->filled);
 
-       err = fsverity_hash_block(params, inode, req, virt_to_page(cur->data),
-                                 offset_in_page(cur->data),
+       err = fsverity_hash_block(params, inode, cur->data,
                                  &next->data[next->filled]);
        if (err)
                return err;
        struct inode *inode = file_inode(filp);
        const u64 data_size = inode->i_size;
        const int num_levels = params->num_levels;
-       struct ahash_request *req;
        struct block_buffer _buffers[1 + FS_VERITY_MAX_LEVELS + 1] = {};
        struct block_buffer *buffers = &_buffers[1];
        unsigned long level_offset[FS_VERITY_MAX_LEVELS];
                return 0;
        }
 
-       /* This allocation never fails, since it's mempool-backed. */
-       req = fsverity_alloc_hash_request(params->hash_alg, GFP_KERNEL);
-
        /*
         * Allocate the block buffers.  Buffer "-1" is for data blocks.
         * Buffers 0 <= level < num_levels are for the actual tree levels.
                        fsverity_err(inode, "Short read of file data");
                        goto out;
                }
-               err = hash_one_block(inode, params, req, &buffers[-1]);
+               err = hash_one_block(inode, params, &buffers[-1]);
                if (err)
                        goto out;
                for (level = 0; level < num_levels; level++) {
                        }
                        /* Next block at @level is full */
 
-                       err = hash_one_block(inode, params, req,
-                                            &buffers[level]);
+                       err = hash_one_block(inode, params, &buffers[level]);
                        if (err)
                                goto out;
                        err = write_merkle_tree_block(inode,
        /* Finish all nonempty pending tree blocks. */
        for (level = 0; level < num_levels; level++) {
                if (buffers[level].filled != 0) {
-                       err = hash_one_block(inode, params, req,
-                                            &buffers[level]);
+                       err = hash_one_block(inode, params, &buffers[level]);
                        if (err)
                                goto out;
                        err = write_merkle_tree_block(inode,
 out:
        for (level = -1; level < num_levels; level++)
                kfree(buffers[level].data);
-       fsverity_free_hash_request(params->hash_alg, req);
        return err;
 }
 
 
 #define pr_fmt(fmt) "fs-verity: " fmt
 
 #include <linux/fsverity.h>
-#include <linux/mempool.h>
-
-struct ahash_request;
 
 /*
  * Implementation limit: maximum depth of the Merkle tree.  For now 8 is plenty;
 
 /* A hash algorithm supported by fs-verity */
 struct fsverity_hash_alg {
-       struct crypto_ahash *tfm; /* hash tfm, allocated on demand */
+       struct crypto_shash *tfm; /* hash tfm, allocated on demand */
        const char *name;         /* crypto API name, e.g. sha256 */
        unsigned int digest_size; /* digest size in bytes, e.g. 32 for SHA-256 */
        unsigned int block_size;  /* block size in bytes, e.g. 64 for SHA-256 */
-       mempool_t req_pool;       /* mempool with a preallocated hash request */
        /*
         * The HASH_ALGO_* constant for this algorithm.  This is different from
         * FS_VERITY_HASH_ALG_*, which uses a different numbering scheme.
 
 struct fsverity_hash_alg *fsverity_get_hash_alg(const struct inode *inode,
                                                unsigned int num);
-struct ahash_request *fsverity_alloc_hash_request(struct fsverity_hash_alg *alg,
-                                                 gfp_t gfp_flags);
-void fsverity_free_hash_request(struct fsverity_hash_alg *alg,
-                               struct ahash_request *req);
 const u8 *fsverity_prepare_hash_state(struct fsverity_hash_alg *alg,
                                      const u8 *salt, size_t salt_size);
 int fsverity_hash_block(const struct merkle_tree_params *params,
-                       const struct inode *inode, struct ahash_request *req,
-                       struct page *page, unsigned int offset, u8 *out);
+                       const struct inode *inode, const void *data, u8 *out);
 int fsverity_hash_buffer(struct fsverity_hash_alg *alg,
                         const void *data, size_t size, u8 *out);
 void __init fsverity_check_hash_algs(void);
 
 #include "fsverity_private.h"
 
 #include <crypto/hash.h>
-#include <linux/scatterlist.h>
 
 /* The hash algorithms supported by fs-verity */
 struct fsverity_hash_alg fsverity_hash_algs[] = {
                                                unsigned int num)
 {
        struct fsverity_hash_alg *alg;
-       struct crypto_ahash *tfm;
+       struct crypto_shash *tfm;
        int err;
 
        if (num >= ARRAY_SIZE(fsverity_hash_algs) ||
        if (alg->tfm != NULL)
                goto out_unlock;
 
-       /*
-        * Using the shash API would make things a bit simpler, but the ahash
-        * API is preferable as it allows the use of crypto accelerators.
-        */
-       tfm = crypto_alloc_ahash(alg->name, 0, 0);
+       tfm = crypto_alloc_shash(alg->name, 0, 0);
        if (IS_ERR(tfm)) {
                if (PTR_ERR(tfm) == -ENOENT) {
                        fsverity_warn(inode,
        }
 
        err = -EINVAL;
-       if (WARN_ON_ONCE(alg->digest_size != crypto_ahash_digestsize(tfm)))
+       if (WARN_ON_ONCE(alg->digest_size != crypto_shash_digestsize(tfm)))
                goto err_free_tfm;
-       if (WARN_ON_ONCE(alg->block_size != crypto_ahash_blocksize(tfm)))
-               goto err_free_tfm;
-
-       err = mempool_init_kmalloc_pool(&alg->req_pool, 1,
-                                       sizeof(struct ahash_request) +
-                                       crypto_ahash_reqsize(tfm));
-       if (err)
+       if (WARN_ON_ONCE(alg->block_size != crypto_shash_blocksize(tfm)))
                goto err_free_tfm;
 
        pr_info("%s using implementation \"%s\"\n",
-               alg->name, crypto_ahash_driver_name(tfm));
+               alg->name, crypto_shash_driver_name(tfm));
 
        /* pairs with smp_load_acquire() above */
        smp_store_release(&alg->tfm, tfm);
        goto out_unlock;
 
 err_free_tfm:
-       crypto_free_ahash(tfm);
+       crypto_free_shash(tfm);
        alg = ERR_PTR(err);
 out_unlock:
        mutex_unlock(&fsverity_hash_alg_init_mutex);
        return alg;
 }
 
-/**
- * fsverity_alloc_hash_request() - allocate a hash request object
- * @alg: the hash algorithm for which to allocate the request
- * @gfp_flags: memory allocation flags
- *
- * This is mempool-backed, so this never fails if __GFP_DIRECT_RECLAIM is set in
- * @gfp_flags.  However, in that case this might need to wait for all
- * previously-allocated requests to be freed.  So to avoid deadlocks, callers
- * must never need multiple requests at a time to make forward progress.
- *
- * Return: the request object on success; NULL on failure (but see above)
- */
-struct ahash_request *fsverity_alloc_hash_request(struct fsverity_hash_alg *alg,
-                                                 gfp_t gfp_flags)
-{
-       struct ahash_request *req = mempool_alloc(&alg->req_pool, gfp_flags);
-
-       if (req)
-               ahash_request_set_tfm(req, alg->tfm);
-       return req;
-}
-
-/**
- * fsverity_free_hash_request() - free a hash request object
- * @alg: the hash algorithm
- * @req: the hash request object to free
- */
-void fsverity_free_hash_request(struct fsverity_hash_alg *alg,
-                               struct ahash_request *req)
-{
-       if (req) {
-               ahash_request_zero(req);
-               mempool_free(req, &alg->req_pool);
-       }
-}
-
 /**
  * fsverity_prepare_hash_state() - precompute the initial hash state
  * @alg: hash algorithm
                                      const u8 *salt, size_t salt_size)
 {
        u8 *hashstate = NULL;
-       struct ahash_request *req = NULL;
+       SHASH_DESC_ON_STACK(desc, alg->tfm);
        u8 *padded_salt = NULL;
        size_t padded_salt_size;
-       struct scatterlist sg;
-       DECLARE_CRYPTO_WAIT(wait);
        int err;
 
+       desc->tfm = alg->tfm;
+
        if (salt_size == 0)
                return NULL;
 
-       hashstate = kmalloc(crypto_ahash_statesize(alg->tfm), GFP_KERNEL);
+       hashstate = kmalloc(crypto_shash_statesize(alg->tfm), GFP_KERNEL);
        if (!hashstate)
                return ERR_PTR(-ENOMEM);
 
-       /* This allocation never fails, since it's mempool-backed. */
-       req = fsverity_alloc_hash_request(alg, GFP_KERNEL);
-
        /*
         * Zero-pad the salt to the next multiple of the input size of the hash
         * algorithm's compression function, e.g. 64 bytes for SHA-256 or 128
                goto err_free;
        }
        memcpy(padded_salt, salt, salt_size);
-
-       sg_init_one(&sg, padded_salt, padded_salt_size);
-       ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
-                                       CRYPTO_TFM_REQ_MAY_BACKLOG,
-                                  crypto_req_done, &wait);
-       ahash_request_set_crypt(req, &sg, NULL, padded_salt_size);
-
-       err = crypto_wait_req(crypto_ahash_init(req), &wait);
+       err = crypto_shash_init(desc);
        if (err)
                goto err_free;
 
-       err = crypto_wait_req(crypto_ahash_update(req), &wait);
+       err = crypto_shash_update(desc, padded_salt, padded_salt_size);
        if (err)
                goto err_free;
 
-       err = crypto_ahash_export(req, hashstate);
+       err = crypto_shash_export(desc, hashstate);
        if (err)
                goto err_free;
 out:
-       fsverity_free_hash_request(alg, req);
        kfree(padded_salt);
        return hashstate;
 
  * fsverity_hash_block() - hash a single data or hash block
  * @params: the Merkle tree's parameters
  * @inode: inode for which the hashing is being done
- * @req: preallocated hash request
- * @page: the page containing the block to hash
- * @offset: the offset of the block within @page
+ * @data: virtual address of a buffer containing the block to hash
  * @out: output digest, size 'params->digest_size' bytes
  *
  * Hash a single data or hash block.  The hash is salted if a salt is specified
  * Return: 0 on success, -errno on failure
  */
 int fsverity_hash_block(const struct merkle_tree_params *params,
-                       const struct inode *inode, struct ahash_request *req,
-                       struct page *page, unsigned int offset, u8 *out)
+                       const struct inode *inode, const void *data, u8 *out)
 {
-       struct scatterlist sg;
-       DECLARE_CRYPTO_WAIT(wait);
+       SHASH_DESC_ON_STACK(desc, params->hash_alg->tfm);
        int err;
 
-       sg_init_table(&sg, 1);
-       sg_set_page(&sg, page, params->block_size, offset);
-       ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
-                                       CRYPTO_TFM_REQ_MAY_BACKLOG,
-                                  crypto_req_done, &wait);
-       ahash_request_set_crypt(req, &sg, out, params->block_size);
+       desc->tfm = params->hash_alg->tfm;
 
        if (params->hashstate) {
-               err = crypto_ahash_import(req, params->hashstate);
+               err = crypto_shash_import(desc, params->hashstate);
                if (err) {
                        fsverity_err(inode,
                                     "Error %d importing hash state", err);
                        return err;
                }
-               err = crypto_ahash_finup(req);
+               err = crypto_shash_finup(desc, data, params->block_size, out);
        } else {
-               err = crypto_ahash_digest(req);
+               err = crypto_shash_digest(desc, data, params->block_size, out);
        }
-
-       err = crypto_wait_req(err, &wait);
        if (err)
                fsverity_err(inode, "Error %d computing block hash", err);
        return err;
  * @size: size of data to hash, in bytes
  * @out: output digest, size 'alg->digest_size' bytes
  *
- * Hash some data which is located in physically contiguous memory (i.e. memory
- * allocated by kmalloc(), not by vmalloc()).  No salt is used.
- *
  * Return: 0 on success, -errno on failure
  */
 int fsverity_hash_buffer(struct fsverity_hash_alg *alg,
                         const void *data, size_t size, u8 *out)
 {
-       struct ahash_request *req;
-       struct scatterlist sg;
-       DECLARE_CRYPTO_WAIT(wait);
-       int err;
-
-       /* This allocation never fails, since it's mempool-backed. */
-       req = fsverity_alloc_hash_request(alg, GFP_KERNEL);
-
-       sg_init_one(&sg, data, size);
-       ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
-                                       CRYPTO_TFM_REQ_MAY_BACKLOG,
-                                  crypto_req_done, &wait);
-       ahash_request_set_crypt(req, &sg, out, size);
-
-       err = crypto_wait_req(crypto_ahash_digest(req), &wait);
-
-       fsverity_free_hash_request(alg, req);
-       return err;
+       return crypto_shash_tfm_digest(alg->tfm, data, size, out);
 }
 
 void __init fsverity_check_hash_algs(void)
 
        return -EBADMSG;
 }
 
-static bool data_is_zeroed(struct inode *inode, struct page *page,
-                          unsigned int len, unsigned int offset)
-{
-       void *virt = kmap_local_page(page);
-
-       if (memchr_inv(virt + offset, 0, len)) {
-               kunmap_local(virt);
-               fsverity_err(inode,
-                            "FILE CORRUPTED!  Data past EOF is not zeroed");
-               return false;
-       }
-       kunmap_local(virt);
-       return true;
-}
-
 /*
  * Returns true if the hash block with index @hblock_idx in the tree, located in
  * @hpage, has already been verified.
  */
 static bool
 verify_data_block(struct inode *inode, struct fsverity_info *vi,
-                 struct ahash_request *req, struct page *data_page,
-                 u64 data_pos, unsigned int dblock_offset_in_page,
-                 unsigned long max_ra_pages)
+                 const void *data, u64 data_pos, unsigned long max_ra_pages)
 {
        const struct merkle_tree_params *params = &vi->tree_params;
        const unsigned int hsize = params->digest_size;
        struct {
                /* Page containing the hash block */
                struct page *page;
+               /* Mapped address of the hash block (will be within @page) */
+               const void *addr;
                /* Index of the hash block in the tree overall */
                unsigned long index;
-               /* Byte offset of the hash block within @page */
-               unsigned int offset_in_page;
-               /* Byte offset of the wanted hash within @page */
+               /* Byte offset of the wanted hash relative to @addr */
                unsigned int hoffset;
        } hblocks[FS_VERITY_MAX_LEVELS];
        /*
        u64 hidx = data_pos >> params->log_blocksize;
        int err;
 
+       /* Up to 1 + FS_VERITY_MAX_LEVELS pages may be mapped at once */
+       BUILD_BUG_ON(1 + FS_VERITY_MAX_LEVELS > KM_MAX_IDX);
+
        if (unlikely(data_pos >= inode->i_size)) {
                /*
                 * This can happen in the data page spanning EOF when the Merkle
                 * any part past EOF should be all zeroes.  Therefore, we need
                 * to verify that any data blocks fully past EOF are all zeroes.
                 */
-               return data_is_zeroed(inode, data_page, params->block_size,
-                                     dblock_offset_in_page);
+               if (memchr_inv(data, 0, params->block_size)) {
+                       fsverity_err(inode,
+                                    "FILE CORRUPTED!  Data past EOF is not zeroed");
+                       return false;
+               }
+               return true;
        }
 
        /*
                unsigned int hblock_offset_in_page;
                unsigned int hoffset;
                struct page *hpage;
+               const void *haddr;
 
                /*
                 * The index of the block in the current level; also the index
                hblock_offset_in_page =
                        (hblock_idx << params->log_blocksize) & ~PAGE_MASK;
 
-               /* Byte offset of the hash within the page */
-               hoffset = hblock_offset_in_page +
-                         ((hidx << params->log_digestsize) &
-                          (params->block_size - 1));
+               /* Byte offset of the hash within the block */
+               hoffset = (hidx << params->log_digestsize) &
+                         (params->block_size - 1);
 
                hpage = inode->i_sb->s_vop->read_merkle_tree_page(inode,
                                hpage_idx, level == 0 ? min(max_ra_pages,
                                     err, hpage_idx);
                        goto out;
                }
+               haddr = kmap_local_page(hpage) + hblock_offset_in_page;
                if (is_hash_block_verified(vi, hpage, hblock_idx)) {
-                       memcpy_from_page(_want_hash, hpage, hoffset, hsize);
+                       memcpy(_want_hash, haddr + hoffset, hsize);
                        want_hash = _want_hash;
+                       kunmap_local(haddr);
                        put_page(hpage);
                        goto descend;
                }
                hblocks[level].page = hpage;
+               hblocks[level].addr = haddr;
                hblocks[level].index = hblock_idx;
-               hblocks[level].offset_in_page = hblock_offset_in_page;
                hblocks[level].hoffset = hoffset;
                hidx = next_hidx;
        }
        /* Descend the tree verifying hash blocks. */
        for (; level > 0; level--) {
                struct page *hpage = hblocks[level - 1].page;
+               const void *haddr = hblocks[level - 1].addr;
                unsigned long hblock_idx = hblocks[level - 1].index;
-               unsigned int hblock_offset_in_page =
-                       hblocks[level - 1].offset_in_page;
                unsigned int hoffset = hblocks[level - 1].hoffset;
 
-               err = fsverity_hash_block(params, inode, req, hpage,
-                                         hblock_offset_in_page, real_hash);
+               err = fsverity_hash_block(params, inode, haddr, real_hash);
                if (err)
                        goto out;
                err = cmp_hashes(vi, want_hash, real_hash, data_pos, level - 1);
                        set_bit(hblock_idx, vi->hash_block_verified);
                else
                        SetPageChecked(hpage);
-               memcpy_from_page(_want_hash, hpage, hoffset, hsize);
+               memcpy(_want_hash, haddr + hoffset, hsize);
                want_hash = _want_hash;
+               kunmap_local(haddr);
                put_page(hpage);
        }
 
        /* Finally, verify the data block. */
-       err = fsverity_hash_block(params, inode, req, data_page,
-                                 dblock_offset_in_page, real_hash);
+       err = fsverity_hash_block(params, inode, data, real_hash);
        if (err)
                goto out;
        err = cmp_hashes(vi, want_hash, real_hash, data_pos, -1);
 out:
-       for (; level > 0; level--)
+       for (; level > 0; level--) {
+               kunmap_local(hblocks[level - 1].addr);
                put_page(hblocks[level - 1].page);
-
+       }
        return err == 0;
 }
 
 static bool
-verify_data_blocks(struct inode *inode, struct fsverity_info *vi,
-                  struct ahash_request *req, struct folio *data_folio,
+verify_data_blocks(struct inode *inode, struct folio *data_folio,
                   size_t len, size_t offset, unsigned long max_ra_pages)
 {
+       struct fsverity_info *vi = inode->i_verity_info;
        const unsigned int block_size = vi->tree_params.block_size;
        u64 pos = (u64)data_folio->index << PAGE_SHIFT;
 
                         folio_test_uptodate(data_folio)))
                return false;
        do {
-               struct page *data_page =
-                       folio_page(data_folio, offset >> PAGE_SHIFT);
-
-               if (!verify_data_block(inode, vi, req, data_page, pos + offset,
-                                      offset & ~PAGE_MASK, max_ra_pages))
+               void *data;
+               bool valid;
+
+               data = kmap_local_folio(data_folio, offset);
+               valid = verify_data_block(inode, vi, data, pos + offset,
+                                         max_ra_pages);
+               kunmap_local(data);
+               if (!valid)
                        return false;
                offset += block_size;
                len -= block_size;
  */
 bool fsverity_verify_blocks(struct folio *folio, size_t len, size_t offset)
 {
-       struct inode *inode = folio->mapping->host;
-       struct fsverity_info *vi = inode->i_verity_info;
-       struct ahash_request *req;
-       bool valid;
-
-       /* This allocation never fails, since it's mempool-backed. */
-       req = fsverity_alloc_hash_request(vi->tree_params.hash_alg, GFP_NOFS);
-
-       valid = verify_data_blocks(inode, vi, req, folio, len, offset, 0);
-
-       fsverity_free_hash_request(vi->tree_params.hash_alg, req);
-
-       return valid;
+       return verify_data_blocks(folio->mapping->host, folio, len, offset, 0);
 }
 EXPORT_SYMBOL_GPL(fsverity_verify_blocks);
 
 void fsverity_verify_bio(struct bio *bio)
 {
        struct inode *inode = bio_first_page_all(bio)->mapping->host;
-       struct fsverity_info *vi = inode->i_verity_info;
-       struct ahash_request *req;
        struct folio_iter fi;
        unsigned long max_ra_pages = 0;
 
-       /* This allocation never fails, since it's mempool-backed. */
-       req = fsverity_alloc_hash_request(vi->tree_params.hash_alg, GFP_NOFS);
-
        if (bio->bi_opf & REQ_RAHEAD) {
                /*
                 * If this bio is for data readahead, then we also do readahead
        }
 
        bio_for_each_folio_all(fi, bio) {
-               if (!verify_data_blocks(inode, vi, req, fi.folio, fi.length,
-                                       fi.offset, max_ra_pages)) {
+               if (!verify_data_blocks(inode, fi.folio, fi.length, fi.offset,
+                                       max_ra_pages)) {
                        bio->bi_status = BLK_STS_IOERR;
                        break;
                }
        }
-
-       fsverity_free_hash_request(vi->tree_params.hash_alg, req);
 }
 EXPORT_SYMBOL_GPL(fsverity_verify_bio);
 #endif /* CONFIG_BLOCK */