media: atomisp: remove hmm_page_object
authorHans de Goede <hdegoede@redhat.com>
Wed, 15 Jun 2022 20:50:29 +0000 (21:50 +0100)
committerMauro Carvalho Chehab <mchehab@kernel.org>
Fri, 8 Jul 2022 15:38:07 +0000 (16:38 +0100)
hmm_page_object only stores a struct page pointer, so we can just use
the hmm_bo.pages page pointer array everywhere.

Link: https://lore.kernel.org/linux-media/20220615205037.16549-33-hdegoede@redhat.com
Reviewed-by: Andy Shevchenko <andy.shevchenko@gmail.com>
Signed-off-by: Hans de Goede <hdegoede@redhat.com>
Signed-off-by: Mauro Carvalho Chehab <mchehab@kernel.org>
drivers/staging/media/atomisp/include/hmm/hmm_bo.h
drivers/staging/media/atomisp/pci/hmm/hmm.c
drivers/staging/media/atomisp/pci/hmm/hmm_bo.c

index a4b193c35127b298eb6d40b9acac01fc99da4db5..385e22fc4a46abe63659395e1f8977ab36c4a2d5 100644 (file)
@@ -114,10 +114,6 @@ struct hmm_bo_device {
        struct kmem_cache *bo_cache;
 };
 
-struct hmm_page_object {
-       struct page             *page;
-};
-
 struct hmm_buffer_object {
        struct hmm_bo_device    *bdev;
        struct list_head        list;
@@ -128,7 +124,6 @@ struct hmm_buffer_object {
        /* mutex protecting this BO */
        struct mutex            mutex;
        enum hmm_bo_type        type;
-       struct hmm_page_object  *page_obj;      /* physical pages */
        int             mmap_count;
        int             status;
        int             mem_type;
index 84a52514df16456e7d162e381531000c6713b12a..7f0fac779fd5dc7991e865f211fc9dd1cf4447d9 100644 (file)
@@ -295,7 +295,7 @@ static int load_and_flush_by_kmap(ia_css_ptr virt, void *data,
                idx = (virt - bo->start) >> PAGE_SHIFT;
                offset = (virt - bo->start) - (idx << PAGE_SHIFT);
 
-               src = (char *)kmap(bo->page_obj[idx].page) + offset;
+               src = (char *)kmap(bo->pages[idx]) + offset;
 
                if ((bytes + offset) >= PAGE_SIZE) {
                        len = PAGE_SIZE - offset;
@@ -314,7 +314,7 @@ static int load_and_flush_by_kmap(ia_css_ptr virt, void *data,
 
                clflush_cache_range(src, len);
 
-               kunmap(bo->page_obj[idx].page);
+               kunmap(bo->pages[idx]);
        }
 
        return 0;
@@ -428,9 +428,9 @@ int hmm_store(ia_css_ptr virt, const void *data, unsigned int bytes)
                offset = (virt - bo->start) - (idx << PAGE_SHIFT);
 
                if (in_atomic())
-                       des = (char *)kmap_atomic(bo->page_obj[idx].page);
+                       des = (char *)kmap_atomic(bo->pages[idx]);
                else
-                       des = (char *)kmap(bo->page_obj[idx].page);
+                       des = (char *)kmap(bo->pages[idx]);
 
                if (!des) {
                        dev_err(atomisp_dev,
@@ -464,7 +464,7 @@ int hmm_store(ia_css_ptr virt, const void *data, unsigned int bytes)
                         */
                        kunmap_atomic(des - offset);
                else
-                       kunmap(bo->page_obj[idx].page);
+                       kunmap(bo->pages[idx]);
        }
 
        return 0;
@@ -508,7 +508,7 @@ int hmm_set(ia_css_ptr virt, int c, unsigned int bytes)
                idx = (virt - bo->start) >> PAGE_SHIFT;
                offset = (virt - bo->start) - (idx << PAGE_SHIFT);
 
-               des = (char *)kmap(bo->page_obj[idx].page) + offset;
+               des = (char *)kmap(bo->pages[idx]) + offset;
 
                if ((bytes + offset) >= PAGE_SIZE) {
                        len = PAGE_SIZE - offset;
@@ -524,7 +524,7 @@ int hmm_set(ia_css_ptr virt, int c, unsigned int bytes)
 
                clflush_cache_range(des, len);
 
-               kunmap(bo->page_obj[idx].page);
+               kunmap(bo->pages[idx]);
        }
 
        return 0;
@@ -547,7 +547,7 @@ phys_addr_t hmm_virt_to_phys(ia_css_ptr virt)
        idx = (virt - bo->start) >> PAGE_SHIFT;
        offset = (virt - bo->start) - (idx << PAGE_SHIFT);
 
-       return page_to_phys(bo->page_obj[idx].page) + offset;
+       return page_to_phys(bo->pages[idx]) + offset;
 }
 
 int hmm_mmap(struct vm_area_struct *vma, ia_css_ptr virt)
index e5796ab7dbcfd2648882f22ba333e6c830b0e955..f50494123f0393ed3e667647035af31e7a436880 100644 (file)
@@ -631,7 +631,7 @@ static void free_private_bo_pages(struct hmm_buffer_object *bo,
        int i, ret;
 
        for (i = 0; i < free_pgnr; i++) {
-               ret = set_pages_wb(bo->page_obj[i].page, 1);
+               ret = set_pages_wb(bo->pages[i], 1);
                if (ret)
                        dev_err(atomisp_dev,
                                "set page to WB err ...ret = %d\n",
@@ -644,7 +644,7 @@ static void free_private_bo_pages(struct hmm_buffer_object *bo,
                address be valid,it maybe memory corruption by lowmemory
                */
                if (!ret) {
-                       __free_pages(bo->page_obj[i].page, 0);
+                       __free_pages(bo->pages[i], 0);
                }
        }
 }
@@ -663,11 +663,6 @@ static int alloc_private_pages(struct hmm_buffer_object *bo)
 
        pgnr = bo->pgnr;
 
-       bo->page_obj = kmalloc_array(pgnr, sizeof(struct hmm_page_object),
-                                    GFP_KERNEL);
-       if (unlikely(!bo->page_obj))
-               return -ENOMEM;
-
        i = 0;
        alloc_pgnr = 0;
 
@@ -739,7 +734,7 @@ retry:
                        }
 
                        for (j = 0; j < blk_pgnr; j++, i++) {
-                               bo->page_obj[i].page = pages + j;
+                               bo->pages[i] = pages + j;
                        }
 
                        pgnr -= blk_pgnr;
@@ -759,18 +754,9 @@ retry:
 cleanup:
        alloc_pgnr = i;
        free_private_bo_pages(bo, alloc_pgnr);
-
-       kfree(bo->page_obj);
-
        return -ENOMEM;
 }
 
-static void free_private_pages(struct hmm_buffer_object *bo)
-{
-       free_private_bo_pages(bo, bo->pgnr);
-       kfree(bo->page_obj);
-}
-
 static void free_user_pages(struct hmm_buffer_object *bo,
                            unsigned int page_nr)
 {
@@ -782,8 +768,6 @@ static void free_user_pages(struct hmm_buffer_object *bo,
                for (i = 0; i < page_nr; i++)
                        put_page(bo->pages[i]);
        }
-       kfree(bo->pages);
-       kfree(bo->page_obj);
 }
 
 /*
@@ -793,20 +777,7 @@ static int alloc_user_pages(struct hmm_buffer_object *bo,
                            const void __user *userptr)
 {
        int page_nr;
-       int i;
        struct vm_area_struct *vma;
-       struct page **pages;
-
-       pages = kmalloc_array(bo->pgnr, sizeof(struct page *), GFP_KERNEL);
-       if (unlikely(!pages))
-               return -ENOMEM;
-
-       bo->page_obj = kmalloc_array(bo->pgnr, sizeof(struct hmm_page_object),
-                                    GFP_KERNEL);
-       if (unlikely(!bo->page_obj)) {
-               kfree(pages);
-               return -ENOMEM;
-       }
 
        mutex_unlock(&bo->mutex);
        mmap_read_lock(current->mm);
@@ -814,8 +785,6 @@ static int alloc_user_pages(struct hmm_buffer_object *bo,
        mmap_read_unlock(current->mm);
        if (!vma) {
                dev_err(atomisp_dev, "find_vma failed\n");
-               kfree(bo->page_obj);
-               kfree(pages);
                mutex_lock(&bo->mutex);
                return -EFAULT;
        }
@@ -827,18 +796,16 @@ static int alloc_user_pages(struct hmm_buffer_object *bo,
 
        userptr = untagged_addr(userptr);
 
-       bo->pages = pages;
-
        if (vma->vm_flags & (VM_IO | VM_PFNMAP)) {
                page_nr = pin_user_pages((unsigned long)userptr, bo->pgnr,
                                         FOLL_LONGTERM | FOLL_WRITE,
-                                        pages, NULL);
+                                        bo->pages, NULL);
                bo->mem_type = HMM_BO_MEM_TYPE_PFN;
        } else {
                /*Handle frame buffer allocated in user space*/
                mutex_unlock(&bo->mutex);
                page_nr = get_user_pages_fast((unsigned long)userptr,
-                                             (int)(bo->pgnr), 1, pages);
+                                             (int)(bo->pgnr), 1, bo->pages);
                mutex_lock(&bo->mutex);
                bo->mem_type = HMM_BO_MEM_TYPE_USER;
        }
@@ -858,10 +825,6 @@ static int alloc_user_pages(struct hmm_buffer_object *bo,
                goto out_of_mem;
        }
 
-       for (i = 0; i < bo->pgnr; i++) {
-               bo->page_obj[i].page = pages[i];
-       }
-
        return 0;
 
 out_of_mem:
@@ -891,6 +854,12 @@ int hmm_bo_alloc_pages(struct hmm_buffer_object *bo,
        mutex_lock(&bo->mutex);
        check_bo_status_no_goto(bo, HMM_BO_PAGE_ALLOCED, status_err);
 
+       bo->pages = kmalloc_array(bo->pgnr, sizeof(struct page *), GFP_KERNEL);
+       if (unlikely(!bo->pages)) {
+               ret = -ENOMEM;
+               goto alloc_err;
+       }
+
        /*
         * TO DO:
         * add HMM_BO_USER type
@@ -915,6 +884,7 @@ int hmm_bo_alloc_pages(struct hmm_buffer_object *bo,
        return 0;
 
 alloc_err:
+       kfree(bo->pages);
        mutex_unlock(&bo->mutex);
        dev_err(atomisp_dev, "alloc pages err...\n");
        return ret;
@@ -940,11 +910,13 @@ void hmm_bo_free_pages(struct hmm_buffer_object *bo)
        bo->status &= (~HMM_BO_PAGE_ALLOCED);
 
        if (bo->type == HMM_BO_PRIVATE)
-               free_private_pages(bo);
+               free_private_bo_pages(bo, bo->pgnr);
        else if (bo->type == HMM_BO_USER)
                free_user_pages(bo, bo->pgnr);
        else
                dev_err(atomisp_dev, "invalid buffer type.\n");
+
+       kfree(bo->pages);
        mutex_unlock(&bo->mutex);
 
        return;
@@ -989,7 +961,7 @@ int hmm_bo_bind(struct hmm_buffer_object *bo)
        for (i = 0; i < bo->pgnr; i++) {
                ret =
                    isp_mmu_map(&bdev->mmu, virt,
-                               page_to_phys(bo->page_obj[i].page), 1);
+                               page_to_phys(bo->pages[i]), 1);
                if (ret)
                        goto map_err;
                virt += (1 << PAGE_SHIFT);
@@ -1103,9 +1075,6 @@ int hmm_bo_binded(struct hmm_buffer_object *bo)
 
 void *hmm_bo_vmap(struct hmm_buffer_object *bo, bool cached)
 {
-       struct page **pages;
-       int i;
-
        check_bo_null_return(bo, NULL);
 
        mutex_lock(&bo->mutex);
@@ -1122,27 +1091,15 @@ void *hmm_bo_vmap(struct hmm_buffer_object *bo, bool cached)
                bo->status &= ~(HMM_BO_VMAPED | HMM_BO_VMAPED_CACHED);
        }
 
-       pages = kmalloc_array(bo->pgnr, sizeof(*pages), GFP_KERNEL);
-       if (unlikely(!pages)) {
-               mutex_unlock(&bo->mutex);
-               return NULL;
-       }
-
-       for (i = 0; i < bo->pgnr; i++)
-               pages[i] = bo->page_obj[i].page;
-
-       bo->vmap_addr = vmap(pages, bo->pgnr, VM_MAP,
+       bo->vmap_addr = vmap(bo->pages, bo->pgnr, VM_MAP,
                             cached ? PAGE_KERNEL : PAGE_KERNEL_NOCACHE);
        if (unlikely(!bo->vmap_addr)) {
-               kfree(pages);
                mutex_unlock(&bo->mutex);
                dev_err(atomisp_dev, "vmap failed...\n");
                return NULL;
        }
        bo->status |= (cached ? HMM_BO_VMAPED_CACHED : HMM_BO_VMAPED);
 
-       kfree(pages);
-
        mutex_unlock(&bo->mutex);
        return bo->vmap_addr;
 }
@@ -1272,7 +1229,7 @@ int hmm_bo_mmap(struct vm_area_struct *vma, struct hmm_buffer_object *bo)
 
        virt = vma->vm_start;
        for (i = 0; i < pgnr; i++) {
-               pfn = page_to_pfn(bo->page_obj[i].page);
+               pfn = page_to_pfn(bo->pages[i]);
                if (remap_pfn_range(vma, virt, pfn, PAGE_SIZE, PAGE_SHARED)) {
                        dev_warn(atomisp_dev,
                                 "remap_pfn_range failed: virt = 0x%x, pfn = 0x%x, mapped_pgnr = %d\n",