crypto: hisilicon/qm - fix use of 'dma_map_single'
authorWeili Qian <qianweili@huawei.com>
Thu, 4 Feb 2021 08:59:35 +0000 (16:59 +0800)
committerHerbert Xu <herbert@gondor.apana.org.au>
Wed, 10 Feb 2021 06:55:59 +0000 (17:55 +1100)
Calling 'dma_map_single' after the data is written to
ensure that the cpu cache and dma cache are consistent.

Signed-off-by: Weili Qian <qianweili@huawei.com>
Signed-off-by: Hui Tang <tanghui20@huawei.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
drivers/crypto/hisilicon/qm.c

index 904b99a2244271e06734e5bbcafd57431e7b5c59..93d4a21cf825b910d3ba9966de6a176920e8176d 100644 (file)
@@ -1747,12 +1747,6 @@ static int qm_sq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid)
        sqc = kzalloc(sizeof(struct qm_sqc), GFP_KERNEL);
        if (!sqc)
                return -ENOMEM;
-       sqc_dma = dma_map_single(dev, sqc, sizeof(struct qm_sqc),
-                                DMA_TO_DEVICE);
-       if (dma_mapping_error(dev, sqc_dma)) {
-               kfree(sqc);
-               return -ENOMEM;
-       }
 
        INIT_QC_COMMON(sqc, qp->sqe_dma, pasid);
        if (ver == QM_HW_V1) {
@@ -1765,6 +1759,13 @@ static int qm_sq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid)
        sqc->cq_num = cpu_to_le16(qp_id);
        sqc->w13 = cpu_to_le16(QM_MK_SQC_W13(0, 1, qp->alg_type));
 
+       sqc_dma = dma_map_single(dev, sqc, sizeof(struct qm_sqc),
+                                DMA_TO_DEVICE);
+       if (dma_mapping_error(dev, sqc_dma)) {
+               kfree(sqc);
+               return -ENOMEM;
+       }
+
        ret = qm_mb(qm, QM_MB_CMD_SQC, sqc_dma, qp_id, 0);
        dma_unmap_single(dev, sqc_dma, sizeof(struct qm_sqc), DMA_TO_DEVICE);
        kfree(sqc);
@@ -1784,12 +1785,6 @@ static int qm_cq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid)
        cqc = kzalloc(sizeof(struct qm_cqc), GFP_KERNEL);
        if (!cqc)
                return -ENOMEM;
-       cqc_dma = dma_map_single(dev, cqc, sizeof(struct qm_cqc),
-                                DMA_TO_DEVICE);
-       if (dma_mapping_error(dev, cqc_dma)) {
-               kfree(cqc);
-               return -ENOMEM;
-       }
 
        INIT_QC_COMMON(cqc, qp->cqe_dma, pasid);
        if (ver == QM_HW_V1) {
@@ -1802,6 +1797,13 @@ static int qm_cq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid)
        }
        cqc->dw6 = cpu_to_le32(1 << QM_CQ_PHASE_SHIFT | 1 << QM_CQ_FLAG_SHIFT);
 
+       cqc_dma = dma_map_single(dev, cqc, sizeof(struct qm_cqc),
+                                DMA_TO_DEVICE);
+       if (dma_mapping_error(dev, cqc_dma)) {
+               kfree(cqc);
+               return -ENOMEM;
+       }
+
        ret = qm_mb(qm, QM_MB_CMD_CQC, cqc_dma, qp_id, 0);
        dma_unmap_single(dev, cqc_dma, sizeof(struct qm_cqc), DMA_TO_DEVICE);
        kfree(cqc);
@@ -2558,15 +2560,9 @@ static int qm_eq_ctx_cfg(struct hisi_qm *qm)
        dma_addr_t eqc_dma;
        int ret;
 
-       eqc = kzalloc(sizeof(struct qm_eqc), GFP_KERNEL); //todo
+       eqc = kzalloc(sizeof(struct qm_eqc), GFP_KERNEL);
        if (!eqc)
                return -ENOMEM;
-       eqc_dma = dma_map_single(dev, eqc, sizeof(struct qm_eqc),
-                                DMA_TO_DEVICE);
-       if (dma_mapping_error(dev, eqc_dma)) {
-               kfree(eqc);
-               return -ENOMEM;
-       }
 
        eqc->base_l = cpu_to_le32(lower_32_bits(qm->eqe_dma));
        eqc->base_h = cpu_to_le32(upper_32_bits(qm->eqe_dma));
@@ -2574,6 +2570,13 @@ static int qm_eq_ctx_cfg(struct hisi_qm *qm)
                eqc->dw3 = cpu_to_le32(QM_EQE_AEQE_SIZE);
        eqc->dw6 = cpu_to_le32((QM_EQ_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT));
 
+       eqc_dma = dma_map_single(dev, eqc, sizeof(struct qm_eqc),
+                                DMA_TO_DEVICE);
+       if (dma_mapping_error(dev, eqc_dma)) {
+               kfree(eqc);
+               return -ENOMEM;
+       }
+
        ret = qm_mb(qm, QM_MB_CMD_EQC, eqc_dma, 0, 0);
        dma_unmap_single(dev, eqc_dma, sizeof(struct qm_eqc), DMA_TO_DEVICE);
        kfree(eqc);
@@ -2591,6 +2594,11 @@ static int qm_aeq_ctx_cfg(struct hisi_qm *qm)
        aeqc = kzalloc(sizeof(struct qm_aeqc), GFP_KERNEL);
        if (!aeqc)
                return -ENOMEM;
+
+       aeqc->base_l = cpu_to_le32(lower_32_bits(qm->aeqe_dma));
+       aeqc->base_h = cpu_to_le32(upper_32_bits(qm->aeqe_dma));
+       aeqc->dw6 = cpu_to_le32((QM_Q_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT));
+
        aeqc_dma = dma_map_single(dev, aeqc, sizeof(struct qm_aeqc),
                                  DMA_TO_DEVICE);
        if (dma_mapping_error(dev, aeqc_dma)) {
@@ -2598,10 +2606,6 @@ static int qm_aeq_ctx_cfg(struct hisi_qm *qm)
                return -ENOMEM;
        }
 
-       aeqc->base_l = cpu_to_le32(lower_32_bits(qm->aeqe_dma));
-       aeqc->base_h = cpu_to_le32(upper_32_bits(qm->aeqe_dma));
-       aeqc->dw6 = cpu_to_le32((QM_Q_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT));
-
        ret = qm_mb(qm, QM_MB_CMD_AEQC, aeqc_dma, 0, 0);
        dma_unmap_single(dev, aeqc_dma, sizeof(struct qm_aeqc), DMA_TO_DEVICE);
        kfree(aeqc);