crypto: inside-secure - fix use of the SG list
authorAntoine Tenart <antoine.tenart@bootlin.com>
Mon, 27 May 2019 14:51:05 +0000 (16:51 +0200)
committerHerbert Xu <herbert@gondor.apana.org.au>
Thu, 6 Jun 2019 06:38:56 +0000 (14:38 +0800)
Replace sg_nents_for_len by sg_nents when DMA mapping/unmapping buffers
and when looping over the SG entries. This fix cases where the SG
entries aren't used fully, which would in such cases led to using fewer
SG entries than needed (and thus the engine wouldn't have access to the
full input data and the result would be wrong).

Signed-off-by: Antoine Tenart <antoine.tenart@bootlin.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
drivers/crypto/inside-secure/safexcel_cipher.c
drivers/crypto/inside-secure/safexcel_hash.c

index cedfb121c27859c9cd32bc769b56d957e4c6baa8..6e193baccec73e2bfb32c068189c8bb5d7a0297b 100644 (file)
@@ -369,16 +369,10 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
        safexcel_complete(priv, ring);
 
        if (src == dst) {
-               dma_unmap_sg(priv->dev, src,
-                            sg_nents_for_len(src, cryptlen),
-                            DMA_BIDIRECTIONAL);
+               dma_unmap_sg(priv->dev, src, sg_nents(src), DMA_BIDIRECTIONAL);
        } else {
-               dma_unmap_sg(priv->dev, src,
-                            sg_nents_for_len(src, cryptlen),
-                            DMA_TO_DEVICE);
-               dma_unmap_sg(priv->dev, dst,
-                            sg_nents_for_len(dst, cryptlen),
-                            DMA_FROM_DEVICE);
+               dma_unmap_sg(priv->dev, src, sg_nents(src), DMA_TO_DEVICE);
+               dma_unmap_sg(priv->dev, dst, sg_nents(dst), DMA_FROM_DEVICE);
        }
 
        *should_complete = true;
@@ -403,26 +397,21 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring,
        int i, ret = 0;
 
        if (src == dst) {
-               nr_src = dma_map_sg(priv->dev, src,
-                                   sg_nents_for_len(src, totlen),
+               nr_src = dma_map_sg(priv->dev, src, sg_nents(src),
                                    DMA_BIDIRECTIONAL);
                nr_dst = nr_src;
                if (!nr_src)
                        return -EINVAL;
        } else {
-               nr_src = dma_map_sg(priv->dev, src,
-                                   sg_nents_for_len(src, totlen),
+               nr_src = dma_map_sg(priv->dev, src, sg_nents(src),
                                    DMA_TO_DEVICE);
                if (!nr_src)
                        return -EINVAL;
 
-               nr_dst = dma_map_sg(priv->dev, dst,
-                                   sg_nents_for_len(dst, totlen),
+               nr_dst = dma_map_sg(priv->dev, dst, sg_nents(dst),
                                    DMA_FROM_DEVICE);
                if (!nr_dst) {
-                       dma_unmap_sg(priv->dev, src,
-                                    sg_nents_for_len(src, totlen),
-                                    DMA_TO_DEVICE);
+                       dma_unmap_sg(priv->dev, src, nr_src, DMA_TO_DEVICE);
                        return -EINVAL;
                }
        }
@@ -472,7 +461,7 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring,
 
        /* result descriptors */
        for_each_sg(dst, sg, nr_dst, i) {
-               bool first = !i, last = (i == nr_dst - 1);
+               bool first = !i, last = sg_is_last(sg);
                u32 len = sg_dma_len(sg);
 
                rdesc = safexcel_add_rdesc(priv, ring, first, last,
@@ -501,16 +490,10 @@ cdesc_rollback:
                safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
 
        if (src == dst) {
-               dma_unmap_sg(priv->dev, src,
-                            sg_nents_for_len(src, totlen),
-                            DMA_BIDIRECTIONAL);
+               dma_unmap_sg(priv->dev, src, nr_src, DMA_BIDIRECTIONAL);
        } else {
-               dma_unmap_sg(priv->dev, src,
-                            sg_nents_for_len(src, totlen),
-                            DMA_TO_DEVICE);
-               dma_unmap_sg(priv->dev, dst,
-                            sg_nents_for_len(dst, totlen),
-                            DMA_FROM_DEVICE);
+               dma_unmap_sg(priv->dev, src, nr_src, DMA_TO_DEVICE);
+               dma_unmap_sg(priv->dev, dst, nr_dst, DMA_FROM_DEVICE);
        }
 
        return ret;
index 20950744ea4e263166ee75b06478489b21fa2b2d..a80a5e757b1f49820fec1487fc6c0011b02b6fde 100644 (file)
@@ -273,8 +273,7 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
        }
 
        /* Now handle the current ahash request buffer(s) */
-       req->nents = dma_map_sg(priv->dev, areq->src,
-                               sg_nents_for_len(areq->src, areq->nbytes),
+       req->nents = dma_map_sg(priv->dev, areq->src, sg_nents(areq->src),
                                DMA_TO_DEVICE);
        if (!req->nents) {
                ret = -ENOMEM;