iommufd: Improve a few unclear bits of code
authorJason Gunthorpe <jgg@nvidia.com>
Wed, 7 Dec 2022 20:44:42 +0000 (16:44 -0400)
committerJason Gunthorpe <jgg@nvidia.com>
Fri, 9 Dec 2022 19:20:37 +0000 (15:20 -0400)
Correct a few items noticed late in review:

 - We should assert that the math in batch_clear_carry() doesn't underflow

 - user->locked should be -1 not 0 sicne we just did mmput

 - npages should not have been recalculated, it already has that value

No functional change.

Fixes: 8d160cd4d506 ("iommufd: Algorithms for PFN storage")
Link: https://lore.kernel.org/r/2-v1-0362a1a1c034+98-iommufd_fixes1_jgg@nvidia.com
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Reported-by: Binbin Wu <binbin.wu@linux.intel.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
drivers/iommu/iommufd/pages.c

index fccdba782cb699c671cc6ff473059bcf916484b0..c771772296485f06ef93b696d058ea0f5dd785bc 100644 (file)
@@ -289,6 +289,10 @@ static void batch_clear_carry(struct pfn_batch *batch, unsigned int keep_pfns)
        if (!keep_pfns)
                return batch_clear(batch);
 
+       if (IS_ENABLED(CONFIG_IOMMUFD_TEST))
+               WARN_ON(!batch->end ||
+                       batch->npfns[batch->end - 1] < keep_pfns);
+
        batch->total_pfns = keep_pfns;
        batch->npfns[0] = keep_pfns;
        batch->pfns[0] = batch->pfns[batch->end - 1] +
@@ -723,7 +727,7 @@ static void pfn_reader_user_destroy(struct pfn_reader_user *user,
                        mmap_read_unlock(pages->source_mm);
                if (pages->source_mm != current->mm)
                        mmput(pages->source_mm);
-               user->locked = 0;
+               user->locked = -1;
        }
 
        kfree(user->upages);
@@ -810,7 +814,6 @@ static int incr_user_locked_vm(struct iopt_pages *pages, unsigned long npages)
 
        lock_limit = task_rlimit(pages->source_task, RLIMIT_MEMLOCK) >>
                     PAGE_SHIFT;
-       npages = pages->npinned - pages->last_npinned;
        do {
                cur_pages = atomic_long_read(&pages->source_user->locked_vm);
                new_pages = cur_pages + npages;