dma-direct: clean up the remapping checks in dma_direct_alloc
authorChristoph Hellwig <hch@lst.de>
Tue, 9 Nov 2021 14:20:40 +0000 (15:20 +0100)
committerChristoph Hellwig <hch@lst.de>
Tue, 7 Dec 2021 11:48:09 +0000 (12:48 +0100)
Add two local variables to track if we want to remap the returned
address using vmap or call dma_set_uncached and use that to simplify
the code flow.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Robin Murphy <robin.murphy@arm.com>
kernel/dma/direct.c

index d7a489be4847030d2f6eebc64f30e140cf56b5e9..3d1718dc077e9e180ef64886620aa6e24ebe4470 100644 (file)
@@ -171,6 +171,7 @@ static void *dma_direct_alloc_from_pool(struct device *dev, size_t size,
 void *dma_direct_alloc(struct device *dev, size_t size,
                dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
 {
+       bool remap = false, set_uncached = false;
        struct page *page;
        void *ret;
 
@@ -222,9 +223,25 @@ void *dma_direct_alloc(struct device *dev, size_t size,
        if (!page)
                return NULL;
 
-       if ((IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
-            !dev_is_dma_coherent(dev)) ||
-           (IS_ENABLED(CONFIG_DMA_REMAP) && PageHighMem(page))) {
+       if (!dev_is_dma_coherent(dev) && IS_ENABLED(CONFIG_DMA_DIRECT_REMAP)) {
+               remap = true;
+       } else if (PageHighMem(page)) {
+               /*
+                * Depending on the cma= arguments and per-arch setup,
+                * dma_alloc_contiguous could return highmem pages.
+                * Without remapping there is no way to return them here, so
+                * log an error and fail.
+                */
+               if (!IS_ENABLED(CONFIG_DMA_REMAP)) {
+                       dev_info(dev, "Rejecting highmem page from CMA.\n");
+                       goto out_free_pages;
+               }
+               remap = true;
+       } else if (!dev_is_dma_coherent(dev) &&
+                  IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED))
+               set_uncached = true;
+
+       if (remap) {
                /* remove any dirty cache lines on the kernel alias */
                arch_dma_prep_coherent(page, size);
 
@@ -234,34 +251,21 @@ void *dma_direct_alloc(struct device *dev, size_t size,
                                __builtin_return_address(0));
                if (!ret)
                        goto out_free_pages;
-               memset(ret, 0, size);
-               goto done;
-       }
-
-       if (PageHighMem(page)) {
-               /*
-                * Depending on the cma= arguments and per-arch setup
-                * dma_alloc_contiguous could return highmem pages.
-                * Without remapping there is no way to return them here,
-                * so log an error and fail.
-                */
-               dev_info(dev, "Rejecting highmem page from CMA.\n");
-               goto out_free_pages;
+       } else {
+               ret = page_address(page);
+               if (dma_set_decrypted(dev, ret, size))
+                       goto out_free_pages;
        }
 
-       ret = page_address(page);
-       if (dma_set_decrypted(dev, ret, size))
-               goto out_free_pages;
        memset(ret, 0, size);
 
-       if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
-           !dev_is_dma_coherent(dev)) {
+       if (set_uncached) {
                arch_dma_prep_coherent(page, size);
                ret = arch_dma_set_uncached(ret, size);
                if (IS_ERR(ret))
                        goto out_encrypt_pages;
        }
-done:
+
        *dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
        return ret;