iommufd: Add additional invariant assertions
authorJason Gunthorpe <jgg@nvidia.com>
Tue, 29 Nov 2022 20:29:41 +0000 (16:29 -0400)
committerJason Gunthorpe <jgg@nvidia.com>
Thu, 1 Dec 2022 00:16:49 +0000 (20:16 -0400)
These are on performance paths so we protect them using the
CONFIG_IOMMUFD_TEST to not take a hit during normal operation.

These are useful when running the test suite and syzkaller to find data
structure inconsistencies early.

Link: https://lore.kernel.org/r/18-v6-a196d26f289e+11787-iommufd_jgg@nvidia.com
Tested-by: Yi Liu <yi.l.liu@intel.com>
Tested-by: Matthew Rosato <mjrosato@linux.ibm.com> # s390
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
drivers/iommu/iommufd/device.c
drivers/iommu/iommufd/io_pagetable.c
drivers/iommu/iommufd/io_pagetable.h
drivers/iommu/iommufd/pages.c

index 67ce36152e8ab233ca0eb6e24a3a868335e50fa6..dd2a415b603e3b2fa6fa90832b2826cb066298c2 100644 (file)
@@ -625,6 +625,11 @@ int iommufd_access_pin_pages(struct iommufd_access *access, unsigned long iova,
        struct iopt_area *area;
        int rc;
 
+       /* Driver's ops don't support pin_pages */
+       if (IS_ENABLED(CONFIG_IOMMUFD_TEST) &&
+           WARN_ON(access->iova_alignment != PAGE_SIZE || !access->ops->unmap))
+               return -EINVAL;
+
        if (!length)
                return -EINVAL;
        if (check_add_overflow(iova, length - 1, &last_iova))
index 4f4a9d9aac570efdcdd597e6944583936c272bfe..3467cea795684c9f203f519ee7db7f88b59b4d47 100644 (file)
@@ -251,6 +251,11 @@ static int iopt_alloc_area_pages(struct io_pagetable *iopt,
                        (uintptr_t)elm->pages->uptr + elm->start_byte, length);
                if (rc)
                        goto out_unlock;
+               if (IS_ENABLED(CONFIG_IOMMUFD_TEST) &&
+                   WARN_ON(iopt_check_iova(iopt, *dst_iova, length))) {
+                       rc = -EINVAL;
+                       goto out_unlock;
+               }
        } else {
                rc = iopt_check_iova(iopt, *dst_iova, length);
                if (rc)
@@ -277,6 +282,8 @@ out_unlock:
 
 static void iopt_abort_area(struct iopt_area *area)
 {
+       if (IS_ENABLED(CONFIG_IOMMUFD_TEST))
+               WARN_ON(area->pages);
        if (area->iopt) {
                down_write(&area->iopt->iova_rwsem);
                interval_tree_remove(&area->node, &area->iopt->area_itree);
@@ -642,6 +649,9 @@ void iopt_destroy_table(struct io_pagetable *iopt)
 {
        struct interval_tree_node *node;
 
+       if (IS_ENABLED(CONFIG_IOMMUFD_TEST))
+               iopt_remove_reserved_iova(iopt, NULL);
+
        while ((node = interval_tree_iter_first(&iopt->allowed_itree, 0,
                                                ULONG_MAX))) {
                interval_tree_remove(node, &iopt->allowed_itree);
@@ -688,6 +698,8 @@ static void iopt_unfill_domain(struct io_pagetable *iopt,
                                continue;
 
                        mutex_lock(&pages->mutex);
+                       if (IS_ENABLED(CONFIG_IOMMUFD_TEST))
+                               WARN_ON(!area->storage_domain);
                        if (area->storage_domain == domain)
                                area->storage_domain = storage_domain;
                        mutex_unlock(&pages->mutex);
@@ -792,6 +804,16 @@ static int iopt_check_iova_alignment(struct io_pagetable *iopt,
                    (iopt_area_length(area) & align_mask) ||
                    (area->page_offset & align_mask))
                        return -EADDRINUSE;
+
+       if (IS_ENABLED(CONFIG_IOMMUFD_TEST)) {
+               struct iommufd_access *access;
+               unsigned long index;
+
+               xa_for_each(&iopt->access_list, index, access)
+                       if (WARN_ON(access->iova_alignment >
+                                   new_iova_alignment))
+                               return -EADDRINUSE;
+       }
        return 0;
 }
 
index 2ee6942c3ef4a59befd68d2580b256abf3b5016a..83e7c175f2a277593a3d272366e7a1c3bb9f1051 100644 (file)
@@ -101,6 +101,9 @@ static inline size_t iopt_area_length(struct iopt_area *area)
 static inline unsigned long iopt_area_start_byte(struct iopt_area *area,
                                                 unsigned long iova)
 {
+       if (IS_ENABLED(CONFIG_IOMMUFD_TEST))
+               WARN_ON(iova < iopt_area_iova(area) ||
+                       iova > iopt_area_last_iova(area));
        return (iova - iopt_area_iova(area)) + area->page_offset +
               iopt_area_index(area) * PAGE_SIZE;
 }
index c5d2d9a8c562032617dd5c7194a42989e06d22a1..429fa3b0a239cdc6160c01f516f22f1b49faf41c 100644 (file)
@@ -162,12 +162,20 @@ void interval_tree_double_span_iter_next(
 
 static void iopt_pages_add_npinned(struct iopt_pages *pages, size_t npages)
 {
-       pages->npinned += npages;
+       int rc;
+
+       rc = check_add_overflow(pages->npinned, npages, &pages->npinned);
+       if (IS_ENABLED(CONFIG_IOMMUFD_TEST))
+               WARN_ON(rc || pages->npinned > pages->npages);
 }
 
 static void iopt_pages_sub_npinned(struct iopt_pages *pages, size_t npages)
 {
-       pages->npinned -= npages;
+       int rc;
+
+       rc = check_sub_overflow(pages->npinned, npages, &pages->npinned);
+       if (IS_ENABLED(CONFIG_IOMMUFD_TEST))
+               WARN_ON(rc || pages->npinned > pages->npages);
 }
 
 static void iopt_pages_err_unpin(struct iopt_pages *pages,
@@ -189,6 +197,9 @@ static void iopt_pages_err_unpin(struct iopt_pages *pages,
 static unsigned long iopt_area_index_to_iova(struct iopt_area *area,
                                             unsigned long index)
 {
+       if (IS_ENABLED(CONFIG_IOMMUFD_TEST))
+               WARN_ON(index < iopt_area_index(area) ||
+                       index > iopt_area_last_index(area));
        index -= iopt_area_index(area);
        if (index == 0)
                return iopt_area_iova(area);
@@ -198,6 +209,9 @@ static unsigned long iopt_area_index_to_iova(struct iopt_area *area,
 static unsigned long iopt_area_index_to_iova_last(struct iopt_area *area,
                                                  unsigned long index)
 {
+       if (IS_ENABLED(CONFIG_IOMMUFD_TEST))
+               WARN_ON(index < iopt_area_index(area) ||
+                       index > iopt_area_last_index(area));
        if (index == iopt_area_last_index(area))
                return iopt_area_last_iova(area);
        return iopt_area_iova(area) - area->page_offset +
@@ -286,6 +300,8 @@ static void batch_skip_carry(struct pfn_batch *batch, unsigned int skip_pfns)
 {
        if (!batch->total_pfns)
                return;
+       if (IS_ENABLED(CONFIG_IOMMUFD_TEST))
+               WARN_ON(batch->total_pfns != batch->npfns[0]);
        skip_pfns = min(batch->total_pfns, skip_pfns);
        batch->pfns[0] += skip_pfns;
        batch->npfns[0] -= skip_pfns;
@@ -301,6 +317,8 @@ static int __batch_init(struct pfn_batch *batch, size_t max_pages, void *backup,
        batch->pfns = temp_kmalloc(&size, backup, backup_len);
        if (!batch->pfns)
                return -ENOMEM;
+       if (IS_ENABLED(CONFIG_IOMMUFD_TEST) && WARN_ON(size < elmsz))
+               return -EINVAL;
        batch->array_size = size / elmsz;
        batch->npfns = (u32 *)(batch->pfns + batch->array_size);
        batch_clear(batch);
@@ -429,6 +447,10 @@ static int batch_iommu_map_small(struct iommu_domain *domain,
        unsigned long start_iova = iova;
        int rc;
 
+       if (IS_ENABLED(CONFIG_IOMMUFD_TEST))
+               WARN_ON(paddr % PAGE_SIZE || iova % PAGE_SIZE ||
+                       size % PAGE_SIZE);
+
        while (size) {
                rc = iommu_map(domain, iova, paddr, PAGE_SIZE, prot);
                if (rc)
@@ -718,6 +740,10 @@ static int pfn_reader_user_pin(struct pfn_reader_user *user,
        uintptr_t uptr;
        long rc;
 
+       if (IS_ENABLED(CONFIG_IOMMUFD_TEST) &&
+           WARN_ON(last_index < start_index))
+               return -EINVAL;
+
        if (!user->upages) {
                /* All undone in pfn_reader_destroy() */
                user->upages_len =
@@ -956,6 +982,10 @@ static int pfn_reader_fill_span(struct pfn_reader *pfns)
        struct iopt_area *area;
        int rc;
 
+       if (IS_ENABLED(CONFIG_IOMMUFD_TEST) &&
+           WARN_ON(span->last_used < start_index))
+               return -EINVAL;
+
        if (span->is_used == 1) {
                batch_from_xarray(&pfns->batch, &pfns->pages->pinned_pfns,
                                  start_index, span->last_used);
@@ -1008,6 +1038,10 @@ static int pfn_reader_next(struct pfn_reader *pfns)
        while (pfns->batch_end_index != pfns->last_index + 1) {
                unsigned int npfns = pfns->batch.total_pfns;
 
+               if (IS_ENABLED(CONFIG_IOMMUFD_TEST) &&
+                   WARN_ON(interval_tree_double_span_iter_done(&pfns->span)))
+                       return -EINVAL;
+
                rc = pfn_reader_fill_span(pfns);
                if (rc)
                        return rc;
@@ -1091,6 +1125,10 @@ static int pfn_reader_first(struct pfn_reader *pfns, struct iopt_pages *pages,
 {
        int rc;
 
+       if (IS_ENABLED(CONFIG_IOMMUFD_TEST) &&
+           WARN_ON(last_index < start_index))
+               return -EINVAL;
+
        rc = pfn_reader_init(pfns, pages, start_index, last_index);
        if (rc)
                return rc;