swiotlb: use the right nslabs-derived sizes in swiotlb_init_late
authorChristoph Hellwig <hch@lst.de>
Wed, 11 May 2022 06:24:10 +0000 (08:24 +0200)
committerChristoph Hellwig <hch@lst.de>
Fri, 13 May 2022 10:49:27 +0000 (12:49 +0200)
nslabs can shrink when allocations or the remap don't succeed, so make
sure to use it for all sizing.  For that remove the bytes value that
can get stale and replace it with local calculations and a boolean to
indicate if the originally requested size could not be allocated.

Fixes: 6424e31b1c05 ("swiotlb: remove swiotlb_init_with_tbl and swiotlb_init_late_with_tbl")
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
kernel/dma/swiotlb.c

index 113e1e8aaca37d5470c32934d7b193a1ac7ee768..d6e62a6a42ceb84fa6da1d6ea021e708c74493d6 100644 (file)
@@ -297,9 +297,9 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask,
 {
        struct io_tlb_mem *mem = &io_tlb_default_mem;
        unsigned long nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE);
-       unsigned long bytes;
        unsigned char *vstart = NULL;
        unsigned int order;
+       bool retried = false;
        int rc = 0;
 
        if (swiotlb_force_disable)
@@ -308,7 +308,6 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask,
 retry:
        order = get_order(nslabs << IO_TLB_SHIFT);
        nslabs = SLABS_PER_PAGE << order;
-       bytes = nslabs << IO_TLB_SHIFT;
 
        while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
                vstart = (void *)__get_free_pages(gfp_mask | __GFP_NOWARN,
@@ -316,16 +315,13 @@ retry:
                if (vstart)
                        break;
                order--;
+               nslabs = SLABS_PER_PAGE << order;
+               retried = true;
        }
 
        if (!vstart)
                return -ENOMEM;
 
-       if (order != get_order(bytes)) {
-               pr_warn("only able to allocate %ld MB\n",
-                       (PAGE_SIZE << order) >> 20);
-               nslabs = SLABS_PER_PAGE << order;
-       }
        if (remap)
                rc = remap(vstart, nslabs);
        if (rc) {
@@ -334,9 +330,15 @@ retry:
                nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE);
                if (nslabs < IO_TLB_MIN_SLABS)
                        return rc;
+               retried = true;
                goto retry;
        }
 
+       if (retried) {
+               pr_warn("only able to allocate %ld MB\n",
+                       (PAGE_SIZE << order) >> 20);
+       }
+
        mem->slots = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
                get_order(array_size(sizeof(*mem->slots), nslabs)));
        if (!mem->slots) {
@@ -344,7 +346,8 @@ retry:
                return -ENOMEM;
        }
 
-       set_memory_decrypted((unsigned long)vstart, bytes >> PAGE_SHIFT);
+       set_memory_decrypted((unsigned long)vstart,
+                            (nslabs << IO_TLB_SHIFT) >> PAGE_SHIFT);
        swiotlb_init_io_tlb_mem(mem, virt_to_phys(vstart), nslabs, true);
 
        swiotlb_print_info();