crypto: hisilicon/hpre - fix unmapping invalid dma address
authorHui Tang <tanghui20@huawei.com>
Mon, 10 May 2021 09:02:55 +0000 (17:02 +0800)
committerHerbert Xu <herbert@gondor.apana.org.au>
Fri, 21 May 2021 08:17:30 +0000 (16:17 +0800)
Currently, an invalid dma address may be unmapped when calling
'xx_data_clr_all' in error path, so check dma address of sqe in/out
if initialized before calling 'dma_free_coherent' or 'dma_unmap_single'.

Fixes: a9214b0b6ed2 ("crypto: hisilicon - fix the check on dma address")
Signed-off-by: Hui Tang <tanghui20@huawei.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
drivers/crypto/hisilicon/hpre/hpre_crypto.c

index c07a7f52d857ea63713a1679258efc254b4c54fb..db00e9f763da002a9e94fd593112b7d4b835fb08 100644 (file)
@@ -298,6 +298,8 @@ static void hpre_hw_data_clr_all(struct hpre_ctx *ctx,
        dma_addr_t tmp;
 
        tmp = le64_to_cpu(sqe->in);
+       if (unlikely(dma_mapping_error(dev, tmp)))
+               return;
 
        if (src) {
                if (req->src)
@@ -307,6 +309,8 @@ static void hpre_hw_data_clr_all(struct hpre_ctx *ctx,
        }
 
        tmp = le64_to_cpu(sqe->out);
+       if (unlikely(dma_mapping_error(dev, tmp)))
+               return;
 
        if (req->dst) {
                if (dst)
@@ -524,6 +528,8 @@ static int hpre_msg_request_set(struct hpre_ctx *ctx, void *req, bool is_rsa)
                msg->key = cpu_to_le64(ctx->dh.dma_xa_p);
        }
 
+       msg->in = cpu_to_le64(DMA_MAPPING_ERROR);
+       msg->out = cpu_to_le64(DMA_MAPPING_ERROR);
        msg->dw0 |= cpu_to_le32(0x1 << HPRE_SQE_DONE_SHIFT);
        msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1;
        h_req->ctx = ctx;
@@ -1372,11 +1378,15 @@ static void hpre_ecdh_hw_data_clr_all(struct hpre_ctx *ctx,
        dma_addr_t dma;
 
        dma = le64_to_cpu(sqe->in);
+       if (unlikely(dma_mapping_error(dev, dma)))
+               return;
 
        if (src && req->src)
                dma_free_coherent(dev, ctx->key_sz << 2, req->src, dma);
 
        dma = le64_to_cpu(sqe->out);
+       if (unlikely(dma_mapping_error(dev, dma)))
+               return;
 
        if (req->dst)
                dma_free_coherent(dev, ctx->key_sz << 1, req->dst, dma);
@@ -1431,6 +1441,8 @@ static int hpre_ecdh_msg_request_set(struct hpre_ctx *ctx,
        h_req->areq.ecdh = req;
        msg = &h_req->req;
        memset(msg, 0, sizeof(*msg));
+       msg->in = cpu_to_le64(DMA_MAPPING_ERROR);
+       msg->out = cpu_to_le64(DMA_MAPPING_ERROR);
        msg->key = cpu_to_le64(ctx->ecdh.dma_p);
 
        msg->dw0 |= cpu_to_le32(0x1U << HPRE_SQE_DONE_SHIFT);
@@ -1667,11 +1679,15 @@ static void hpre_curve25519_hw_data_clr_all(struct hpre_ctx *ctx,
        dma_addr_t dma;
 
        dma = le64_to_cpu(sqe->in);
+       if (unlikely(dma_mapping_error(dev, dma)))
+               return;
 
        if (src && req->src)
                dma_free_coherent(dev, ctx->key_sz, req->src, dma);
 
        dma = le64_to_cpu(sqe->out);
+       if (unlikely(dma_mapping_error(dev, dma)))
+               return;
 
        if (req->dst)
                dma_free_coherent(dev, ctx->key_sz, req->dst, dma);
@@ -1722,6 +1738,8 @@ static int hpre_curve25519_msg_request_set(struct hpre_ctx *ctx,
        h_req->areq.curve25519 = req;
        msg = &h_req->req;
        memset(msg, 0, sizeof(*msg));
+       msg->in = cpu_to_le64(DMA_MAPPING_ERROR);
+       msg->out = cpu_to_le64(DMA_MAPPING_ERROR);
        msg->key = cpu_to_le64(ctx->curve25519.dma_p);
 
        msg->dw0 |= cpu_to_le32(0x1U << HPRE_SQE_DONE_SHIFT);