crypto: ccree - protect against empty or NULL scatterlists
authorGilad Ben-Yossef <gilad@benyossef.com>
Wed, 29 Jan 2020 14:37:54 +0000 (16:37 +0200)
committerHerbert Xu <herbert@gondor.apana.org.au>
Thu, 13 Feb 2020 09:05:24 +0000 (17:05 +0800)
Deal gracefully with a NULL or empty scatterlist which can happen
if both cryptlen and assoclen are zero and we're doing in-place
AEAD encryption.

This fixes a crash when this causes us to try and map a NULL page,
at least with some platforms / DMA mapping configs.

Cc: stable@vger.kernel.org # v4.19+
Reported-by: Geert Uytterhoeven <geert+renesas@glider.be>
Tested-by: Geert Uytterhoeven <geert+renesas@glider.be>
Signed-off-by: Gilad Ben-Yossef <gilad@benyossef.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
drivers/crypto/ccree/cc_buffer_mgr.c
drivers/crypto/ccree/cc_buffer_mgr.h

index a72586eccd81816571ddf17c8b07748cc9e5c50b..b938ceae7ae763b54e915d79dc87740ad4cdb92c 100644 (file)
@@ -87,6 +87,8 @@ static unsigned int cc_get_sgl_nents(struct device *dev,
 {
        unsigned int nents = 0;
 
+       *lbytes = 0;
+
        while (nbytes && sg_list) {
                nents++;
                /* get the number of bytes in the last entry */
@@ -95,6 +97,7 @@ static unsigned int cc_get_sgl_nents(struct device *dev,
                                nbytes : sg_list->length;
                sg_list = sg_next(sg_list);
        }
+
        dev_dbg(dev, "nents %d last bytes %d\n", nents, *lbytes);
        return nents;
 }
@@ -290,37 +293,25 @@ static int cc_map_sg(struct device *dev, struct scatterlist *sg,
                     unsigned int nbytes, int direction, u32 *nents,
                     u32 max_sg_nents, u32 *lbytes, u32 *mapped_nents)
 {
-       if (sg_is_last(sg)) {
-               /* One entry only case -set to DLLI */
-               if (dma_map_sg(dev, sg, 1, direction) != 1) {
-                       dev_err(dev, "dma_map_sg() single buffer failed\n");
-                       return -ENOMEM;
-               }
-               dev_dbg(dev, "Mapped sg: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
-                       &sg_dma_address(sg), sg_page(sg), sg_virt(sg),
-                       sg->offset, sg->length);
-               *lbytes = nbytes;
-               *nents = 1;
-               *mapped_nents = 1;
-       } else {  /*sg_is_last*/
-               *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes);
-               if (*nents > max_sg_nents) {
-                       *nents = 0;
-                       dev_err(dev, "Too many fragments. current %d max %d\n",
-                               *nents, max_sg_nents);
-                       return -ENOMEM;
-               }
-               /* In case of mmu the number of mapped nents might
-                * be changed from the original sgl nents
-                */
-               *mapped_nents = dma_map_sg(dev, sg, *nents, direction);
-               if (*mapped_nents == 0) {
-                       *nents = 0;
-                       dev_err(dev, "dma_map_sg() sg buffer failed\n");
-                       return -ENOMEM;
-               }
+       int ret = 0;
+
+       *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes);
+       if (*nents > max_sg_nents) {
+               *nents = 0;
+               dev_err(dev, "Too many fragments. current %d max %d\n",
+                       *nents, max_sg_nents);
+               return -ENOMEM;
        }
 
+       ret = dma_map_sg(dev, sg, *nents, direction);
+       if (dma_mapping_error(dev, ret)) {
+               *nents = 0;
+               dev_err(dev, "dma_map_sg() sg buffer failed %d\n", ret);
+               return -ENOMEM;
+       }
+
+       *mapped_nents = ret;
+
        return 0;
 }
 
@@ -555,11 +546,12 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
                sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents,
                areq_ctx->assoclen, req->cryptlen);
 
-       dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_BIDIRECTIONAL);
+       dma_unmap_sg(dev, req->src, areq_ctx->src.mapped_nents,
+                    DMA_BIDIRECTIONAL);
        if (req->src != req->dst) {
                dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n",
                        sg_virt(req->dst));
-               dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
+               dma_unmap_sg(dev, req->dst, areq_ctx->dst.mapped_nents,
                             DMA_BIDIRECTIONAL);
        }
        if (drvdata->coherent &&
@@ -881,7 +873,7 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
                                            &src_last_bytes);
        sg_index = areq_ctx->src_sgl->length;
        //check where the data starts
-       while (sg_index <= size_to_skip) {
+       while (src_mapped_nents && (sg_index <= size_to_skip)) {
                src_mapped_nents--;
                offset -= areq_ctx->src_sgl->length;
                sgl = sg_next(areq_ctx->src_sgl);
@@ -908,7 +900,7 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
                        size_for_map += crypto_aead_ivsize(tfm);
 
                rc = cc_map_sg(dev, req->dst, size_for_map, DMA_BIDIRECTIONAL,
-                              &areq_ctx->dst.nents,
+                              &areq_ctx->dst.mapped_nents,
                               LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes,
                               &dst_mapped_nents);
                if (rc)
@@ -921,7 +913,7 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
        offset = size_to_skip;
 
        //check where the data starts
-       while (sg_index <= size_to_skip) {
+       while (dst_mapped_nents && sg_index <= size_to_skip) {
                dst_mapped_nents--;
                offset -= areq_ctx->dst_sgl->length;
                sgl = sg_next(areq_ctx->dst_sgl);
@@ -1123,7 +1115,7 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
        if (is_gcm4543)
                size_to_map += crypto_aead_ivsize(tfm);
        rc = cc_map_sg(dev, req->src, size_to_map, DMA_BIDIRECTIONAL,
-                      &areq_ctx->src.nents,
+                      &areq_ctx->src.mapped_nents,
                       (LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES +
                        LLI_MAX_NUM_OF_DATA_ENTRIES),
                       &dummy, &mapped_nents);
index af434872c6ff7ec75a8e974ee657aaff8885b43f..827b6cb1236e8a65c47ae05edae7653523413fd5 100644 (file)
@@ -25,6 +25,7 @@ enum cc_sg_cpy_direct {
 
 struct cc_mlli {
        cc_sram_addr_t sram_addr;
+       unsigned int mapped_nents;
        unsigned int nents; //sg nents
        unsigned int mlli_nents; //mlli nents might be different than the above
 };