crypto: qat - extend buffer list logic interface
authorLucas Segarra Fernandez <lucas.segarra.fernandez@intel.com>
Mon, 23 Jan 2023 10:42:21 +0000 (11:42 +0100)
committerHerbert Xu <herbert@gondor.apana.org.au>
Fri, 3 Feb 2023 04:54:54 +0000 (12:54 +0800)
Extend qat_bl_sgl_to_bufl() to allow skipping the mapping of a region
of the source and the destination scatter lists starting from byte
zero.

This is to support the ZLIB format (RFC 1950) in the qat driver.
The ZLIB format is made of deflate compressed data surrounded by a
header and a footer. The QAT accelerators support only the deflate
algorithm, therefore the header should not be mapped since it is
inserted in software.

Signed-off-by: Lucas Segarra Fernandez <lucas.segarra.fernandez@intel.com>
Reviewed-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
drivers/crypto/qat/qat_common/qat_bl.c
drivers/crypto/qat/qat_common/qat_bl.h
drivers/crypto/qat/qat_common/qat_comp_algs.c

index c72831fa025d5bf9e790104c7d6f03a456bb7d88..76baed0a76c0ee9386e9c14b60315026be6b532a 100644 (file)
@@ -53,6 +53,8 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
                                struct qat_request_buffs *buf,
                                dma_addr_t extra_dst_buff,
                                size_t sz_extra_dst_buff,
+                               unsigned int sskip,
+                               unsigned int dskip,
                                gfp_t flags)
 {
        struct device *dev = &GET_DEV(accel_dev);
@@ -65,6 +67,7 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
        struct scatterlist *sg;
        size_t sz_out, sz = struct_size(bufl, buffers, n);
        int node = dev_to_node(&GET_DEV(accel_dev));
+       unsigned int left;
        int bufl_dma_dir;
 
        if (unlikely(!n))
@@ -88,19 +91,29 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
        for (i = 0; i < n; i++)
                bufl->buffers[i].addr = DMA_MAPPING_ERROR;
 
+       left = sskip;
+
        for_each_sg(sgl, sg, n, i) {
                int y = sg_nctr;
 
                if (!sg->length)
                        continue;
 
-               bufl->buffers[y].addr = dma_map_single(dev, sg_virt(sg),
-                                                      sg->length,
+               if (left >= sg->length) {
+                       left -= sg->length;
+                       continue;
+               }
+               bufl->buffers[y].addr = dma_map_single(dev, sg_virt(sg) + left,
+                                                      sg->length - left,
                                                       bufl_dma_dir);
                bufl->buffers[y].len = sg->length;
                if (unlikely(dma_mapping_error(dev, bufl->buffers[y].addr)))
                        goto err_in;
                sg_nctr++;
+               if (left) {
+                       bufl->buffers[y].len -= left;
+                       left = 0;
+               }
        }
        bufl->num_bufs = sg_nctr;
        blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
@@ -117,6 +130,8 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
 
                n = n_sglout + extra_buff;
                sz_out = struct_size(buflout, buffers, n);
+               left = dskip;
+
                sg_nctr = 0;
 
                if (n > QAT_MAX_BUFF_DESC) {
@@ -139,13 +154,21 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
                        if (!sg->length)
                                continue;
 
-                       buffers[y].addr = dma_map_single(dev, sg_virt(sg),
-                                                        sg->length,
+                       if (left >= sg->length) {
+                               left -= sg->length;
+                               continue;
+                       }
+                       buffers[y].addr = dma_map_single(dev, sg_virt(sg) + left,
+                                                        sg->length - left,
                                                         DMA_FROM_DEVICE);
                        if (unlikely(dma_mapping_error(dev, buffers[y].addr)))
                                goto err_out;
                        buffers[y].len = sg->length;
                        sg_nctr++;
+                       if (left) {
+                               buffers[y].len -= left;
+                               left = 0;
+                       }
                }
                if (extra_buff) {
                        buffers[sg_nctr].addr = extra_dst_buff;
@@ -212,15 +235,19 @@ int qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
 {
        dma_addr_t extra_dst_buff = 0;
        size_t sz_extra_dst_buff = 0;
+       unsigned int sskip = 0;
+       unsigned int dskip = 0;
 
        if (params) {
                extra_dst_buff = params->extra_dst_buff;
                sz_extra_dst_buff = params->sz_extra_dst_buff;
+               sskip = params->sskip;
+               dskip = params->dskip;
        }
 
        return __qat_bl_sgl_to_bufl(accel_dev, sgl, sglout, buf,
                                    extra_dst_buff, sz_extra_dst_buff,
-                                   flags);
+                                   sskip, dskip, flags);
 }
 
 static void qat_bl_sgl_unmap(struct adf_accel_dev *accel_dev,
index 1479fef3b634f858ee7423244ceddc3315ce3181..d87e4f35ac395c768dd4f57ecf25a85f862c9352 100644 (file)
@@ -42,6 +42,8 @@ struct qat_request_buffs {
 struct qat_sgl_to_bufl_params {
        dma_addr_t extra_dst_buff;
        size_t sz_extra_dst_buff;
+       unsigned int sskip;
+       unsigned int dskip;
 };
 
 void qat_bl_free_bufl(struct adf_accel_dev *accel_dev,
index 1480d36a8d2bb152b134ebe5f72fc6fdb537bbf9..12d5e0fc3a95aa4a06018f119024e295541c77cd 100644 (file)
@@ -233,6 +233,9 @@ static int qat_comp_alg_compress_decompress(struct acomp_req *areq,
        size_t ovf_buff_sz;
        int ret;
 
+       params.sskip = 0;
+       params.dskip = 0;
+
        if (!areq->src || !slen)
                return -EINVAL;