static int dma_set_encrypted(struct device *dev, void *vaddr, size_t size)
 {
+       int ret;
+
        if (!force_dma_unencrypted(dev))
                return 0;
-       return set_memory_encrypted((unsigned long)vaddr, 1 << get_order(size));
+       ret = set_memory_encrypted((unsigned long)vaddr, 1 << get_order(size));
+       if (ret)
+               pr_warn_ratelimited("leaking DMA memory that can't be re-encrypted\n");
+       return ret;
 }
 
 static void __dma_direct_free_pages(struct device *dev, struct page *page,
        return ret;
 
 out_encrypt_pages:
-       /* If memory cannot be re-encrypted, it must be leaked */
        if (dma_set_encrypted(dev, page_address(page), size))
                return NULL;
 out_free_pages:
        } else {
                if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_CLEAR_UNCACHED))
                        arch_dma_clear_uncached(cpu_addr, size);
-               dma_set_encrypted(dev, cpu_addr, 1 << page_order);
+               if (dma_set_encrypted(dev, cpu_addr, 1 << page_order))
+                       return;
        }
 
        __dma_direct_free_pages(dev, dma_direct_to_page(dev, dma_addr), size);
            dma_free_from_pool(dev, vaddr, size))
                return;
 
-       dma_set_encrypted(dev, vaddr, 1 << page_order);
+       if (dma_set_encrypted(dev, vaddr, 1 << page_order))
+               return;
        __dma_direct_free_pages(dev, page, size);
 }