nouveau/dmem: handle kcalloc() allocation failure
authorDuoming Zhou <duoming@zju.edu.cn>
Wed, 6 Mar 2024 05:01:04 +0000 (13:01 +0800)
committerDanilo Krummrich <dakr@redhat.com>
Fri, 8 Mar 2024 16:36:28 +0000 (17:36 +0100)
The kcalloc() in nouveau_dmem_evict_chunk() will return null if
the physical memory has run out. As a result, if we dereference
src_pfns, dst_pfns or dma_addrs, the null pointer dereference bugs
will happen.

Moreover, the GPU is going away. If the kcalloc() fails, we could not
evict all pages mapping a chunk. So this patch adds a __GFP_NOFAIL
flag in kcalloc().

Finally, as there is no need to have physically contiguous memory,
this patch switches kcalloc() to kvcalloc() in order to avoid
failing allocations.

CC: <stable@vger.kernel.org> # v6.1
Fixes: 249881232e14 ("nouveau/dmem: evict device private memory during release")
Suggested-by: Danilo Krummrich <dakr@redhat.com>
Signed-off-by: Duoming Zhou <duoming@zju.edu.cn>
Signed-off-by: Danilo Krummrich <dakr@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240306050104.11259-1-duoming@zju.edu.cn
drivers/gpu/drm/nouveau/nouveau_dmem.c

index 12feecf71e752de075eaa94d7d3de9ec5a9e082b..6fb65b01d778049f9166681e26a7fdf85891e475 100644 (file)
@@ -378,9 +378,9 @@ nouveau_dmem_evict_chunk(struct nouveau_dmem_chunk *chunk)
        dma_addr_t *dma_addrs;
        struct nouveau_fence *fence;
 
-       src_pfns = kcalloc(npages, sizeof(*src_pfns), GFP_KERNEL);
-       dst_pfns = kcalloc(npages, sizeof(*dst_pfns), GFP_KERNEL);
-       dma_addrs = kcalloc(npages, sizeof(*dma_addrs), GFP_KERNEL);
+       src_pfns = kvcalloc(npages, sizeof(*src_pfns), GFP_KERNEL | __GFP_NOFAIL);
+       dst_pfns = kvcalloc(npages, sizeof(*dst_pfns), GFP_KERNEL | __GFP_NOFAIL);
+       dma_addrs = kvcalloc(npages, sizeof(*dma_addrs), GFP_KERNEL | __GFP_NOFAIL);
 
        migrate_device_range(src_pfns, chunk->pagemap.range.start >> PAGE_SHIFT,
                        npages);
@@ -406,11 +406,11 @@ nouveau_dmem_evict_chunk(struct nouveau_dmem_chunk *chunk)
        migrate_device_pages(src_pfns, dst_pfns, npages);
        nouveau_dmem_fence_done(&fence);
        migrate_device_finalize(src_pfns, dst_pfns, npages);
-       kfree(src_pfns);
-       kfree(dst_pfns);
+       kvfree(src_pfns);
+       kvfree(dst_pfns);
        for (i = 0; i < npages; i++)
                dma_unmap_page(chunk->drm->dev->dev, dma_addrs[i], PAGE_SIZE, DMA_BIDIRECTIONAL);
-       kfree(dma_addrs);
+       kvfree(dma_addrs);
 }
 
 void