Merge branch 'odp_fixes' into hmm.git
authorJason Gunthorpe <jgg@mellanox.com>
Wed, 21 Aug 2019 17:12:29 +0000 (14:12 -0300)
committerJason Gunthorpe <jgg@mellanox.com>
Wed, 21 Aug 2019 23:58:18 +0000 (20:58 -0300)
From rdma.git

Jason Gunthorpe says:

====================
This is a collection of general cleanups for ODP to clarify some of the
flows around umem creation and use of the interval tree.
====================

The branch is based on v5.3-rc5 due to dependencies, and is being taken
into hmm.git due to dependencies in the next patches.

* odp_fixes:
  RDMA/mlx5: Use odp instead of mr->umem in pagefault_mr
  RDMA/mlx5: Use ib_umem_start instead of umem.address
  RDMA/core: Make invalidate_range a device operation
  RDMA/odp: Use kvcalloc for the dma_list and page_list
  RDMA/odp: Check for overflow when computing the umem_odp end
  RDMA/odp: Provide ib_umem_odp_release() to undo the allocs
  RDMA/odp: Split creating a umem_odp from ib_umem_get
  RDMA/odp: Make the three ways to create a umem_odp clear
  RMDA/odp: Consolidate umem_odp initialization
  RDMA/odp: Make it clearer when a umem is an implicit ODP umem
  RDMA/odp: Iterate over the whole rbtree directly
  RDMA/odp: Use the common interval tree library instead of generic
  RDMA/mlx5: Fix MR npages calculation for IB_ACCESS_HUGETLB

Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
1  2 
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
include/linux/mm_types.h
kernel/fork.c
mm/memremap.c
mm/migrate.c

Simple merge
diff --cc kernel/fork.c
Simple merge
diff --cc mm/memremap.c
index 0000000000000000000000000000000000000000,ed70c4e8e52a505f431fdeefaf209694a4b03093..32c79b51af8678bead9702a1447b7def15050ad9
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,435 +1,456 @@@
 -static void devmap_managed_enable_put(void *data)
+ /* SPDX-License-Identifier: GPL-2.0 */
+ /* Copyright(c) 2015 Intel Corporation. All rights reserved. */
+ #include <linux/device.h>
+ #include <linux/io.h>
+ #include <linux/kasan.h>
+ #include <linux/memory_hotplug.h>
+ #include <linux/mm.h>
+ #include <linux/pfn_t.h>
+ #include <linux/swap.h>
+ #include <linux/swapops.h>
+ #include <linux/types.h>
+ #include <linux/wait_bit.h>
+ #include <linux/xarray.h>
+ static DEFINE_XARRAY(pgmap_array);
+ #define SECTION_MASK ~((1UL << PA_SECTION_SHIFT) - 1)
+ #define SECTION_SIZE (1UL << PA_SECTION_SHIFT)
+ #ifdef CONFIG_DEV_PAGEMAP_OPS
+ DEFINE_STATIC_KEY_FALSE(devmap_managed_key);
+ EXPORT_SYMBOL(devmap_managed_key);
+ static atomic_t devmap_managed_enable;
 -static int devmap_managed_enable_get(struct device *dev, struct dev_pagemap *pgmap)
++static void devmap_managed_enable_put(void)
+ {
+       if (atomic_dec_and_test(&devmap_managed_enable))
+               static_branch_disable(&devmap_managed_key);
+ }
 -      return devm_add_action_or_reset(dev, devmap_managed_enable_put, NULL);
++static int devmap_managed_enable_get(struct dev_pagemap *pgmap)
+ {
+       if (!pgmap->ops || !pgmap->ops->page_free) {
+               WARN(1, "Missing page_free method\n");
+               return -EINVAL;
+       }
+       if (atomic_inc_return(&devmap_managed_enable) == 1)
+               static_branch_enable(&devmap_managed_key);
 -static int devmap_managed_enable_get(struct device *dev, struct dev_pagemap *pgmap)
++      return 0;
+ }
+ #else
 -static void devm_memremap_pages_release(void *data)
++static int devmap_managed_enable_get(struct dev_pagemap *pgmap)
+ {
+       return -EINVAL;
+ }
++static void devmap_managed_enable_put(void)
++{
++}
+ #endif /* CONFIG_DEV_PAGEMAP_OPS */
+ static void pgmap_array_delete(struct resource *res)
+ {
+       xa_store_range(&pgmap_array, PHYS_PFN(res->start), PHYS_PFN(res->end),
+                       NULL, GFP_KERNEL);
+       synchronize_rcu();
+ }
+ static unsigned long pfn_first(struct dev_pagemap *pgmap)
+ {
+       return PHYS_PFN(pgmap->res.start) +
+               vmem_altmap_offset(pgmap_altmap(pgmap));
+ }
+ static unsigned long pfn_end(struct dev_pagemap *pgmap)
+ {
+       const struct resource *res = &pgmap->res;
+       return (res->start + resource_size(res)) >> PAGE_SHIFT;
+ }
+ static unsigned long pfn_next(unsigned long pfn)
+ {
+       if (pfn % 1024 == 0)
+               cond_resched();
+       return pfn + 1;
+ }
+ #define for_each_device_pfn(pfn, map) \
+       for (pfn = pfn_first(map); pfn < pfn_end(map); pfn = pfn_next(pfn))
+ static void dev_pagemap_kill(struct dev_pagemap *pgmap)
+ {
+       if (pgmap->ops && pgmap->ops->kill)
+               pgmap->ops->kill(pgmap);
+       else
+               percpu_ref_kill(pgmap->ref);
+ }
+ static void dev_pagemap_cleanup(struct dev_pagemap *pgmap)
+ {
+       if (pgmap->ops && pgmap->ops->cleanup) {
+               pgmap->ops->cleanup(pgmap);
+       } else {
+               wait_for_completion(&pgmap->done);
+               percpu_ref_exit(pgmap->ref);
+       }
+       /*
+        * Undo the pgmap ref assignment for the internal case as the
+        * caller may re-enable the same pgmap.
+        */
+       if (pgmap->ref == &pgmap->internal_ref)
+               pgmap->ref = NULL;
+ }
 -      struct dev_pagemap *pgmap = data;
 -      struct device *dev = pgmap->dev;
++void memunmap_pages(struct dev_pagemap *pgmap)
+ {
 -      dev_WARN_ONCE(dev, pgmap->altmap.alloc,
 -                    "%s: failed to free all reserved pages\n", __func__);
+       struct resource *res = &pgmap->res;
+       unsigned long pfn;
+       int nid;
+       dev_pagemap_kill(pgmap);
+       for_each_device_pfn(pfn, pgmap)
+               put_page(pfn_to_page(pfn));
+       dev_pagemap_cleanup(pgmap);
+       /* pages are dead and unused, undo the arch mapping */
+       nid = page_to_nid(pfn_to_page(PHYS_PFN(res->start)));
+       mem_hotplug_begin();
+       if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
+               pfn = PHYS_PFN(res->start);
+               __remove_pages(page_zone(pfn_to_page(pfn)), pfn,
+                                PHYS_PFN(resource_size(res)), NULL);
+       } else {
+               arch_remove_memory(nid, res->start, resource_size(res),
+                               pgmap_altmap(pgmap));
+               kasan_remove_zero_shadow(__va(res->start), resource_size(res));
+       }
+       mem_hotplug_done();
+       untrack_pfn(NULL, PHYS_PFN(res->start), resource_size(res));
+       pgmap_array_delete(res);
 -/**
 - * devm_memremap_pages - remap and provide memmap backing for the given resource
 - * @dev: hosting device for @res
 - * @pgmap: pointer to a struct dev_pagemap
 - *
 - * Notes:
 - * 1/ At a minimum the res and type members of @pgmap must be initialized
 - *    by the caller before passing it to this function
 - *
 - * 2/ The altmap field may optionally be initialized, in which case
 - *    PGMAP_ALTMAP_VALID must be set in pgmap->flags.
 - *
 - * 3/ The ref field may optionally be provided, in which pgmap->ref must be
 - *    'live' on entry and will be killed and reaped at
 - *    devm_memremap_pages_release() time, or if this routine fails.
 - *
 - * 4/ res is expected to be a host memory range that could feasibly be
 - *    treated as a "System RAM" range, i.e. not a device mmio range, but
 - *    this is not enforced.
++      WARN_ONCE(pgmap->altmap.alloc, "failed to free all reserved pages\n");
++      devmap_managed_enable_put();
++}
++EXPORT_SYMBOL_GPL(memunmap_pages);
++
++static void devm_memremap_pages_release(void *data)
++{
++      memunmap_pages(data);
+ }
+ static void dev_pagemap_percpu_release(struct percpu_ref *ref)
+ {
+       struct dev_pagemap *pgmap =
+               container_of(ref, struct dev_pagemap, internal_ref);
+       complete(&pgmap->done);
+ }
 -void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
++/*
++ * Not device managed version of dev_memremap_pages, undone by
++ * memunmap_pages().  Please use dev_memremap_pages if you have a struct
++ * device available.
+  */
 -      int error, nid, is_ram;
++void *memremap_pages(struct dev_pagemap *pgmap, int nid)
+ {
+       struct resource *res = &pgmap->res;
+       struct dev_pagemap *conflict_pgmap;
+       struct mhp_restrictions restrictions = {
+               /*
+                * We do not want any optional features only our own memmap
+                */
+               .altmap = pgmap_altmap(pgmap),
+       };
+       pgprot_t pgprot = PAGE_KERNEL;
 -              error = devmap_managed_enable_get(dev, pgmap);
++      int error, is_ram;
+       bool need_devmap_managed = true;
+       switch (pgmap->type) {
+       case MEMORY_DEVICE_PRIVATE:
+               if (!IS_ENABLED(CONFIG_DEVICE_PRIVATE)) {
+                       WARN(1, "Device private memory not supported\n");
+                       return ERR_PTR(-EINVAL);
+               }
+               if (!pgmap->ops || !pgmap->ops->migrate_to_ram) {
+                       WARN(1, "Missing migrate_to_ram method\n");
+                       return ERR_PTR(-EINVAL);
+               }
+               break;
+       case MEMORY_DEVICE_FS_DAX:
+               if (!IS_ENABLED(CONFIG_ZONE_DEVICE) ||
+                   IS_ENABLED(CONFIG_FS_DAX_LIMITED)) {
+                       WARN(1, "File system DAX not supported\n");
+                       return ERR_PTR(-EINVAL);
+               }
+               break;
+       case MEMORY_DEVICE_DEVDAX:
+       case MEMORY_DEVICE_PCI_P2PDMA:
+               need_devmap_managed = false;
+               break;
+       default:
+               WARN(1, "Invalid pgmap type %d\n", pgmap->type);
+               break;
+       }
+       if (!pgmap->ref) {
+               if (pgmap->ops && (pgmap->ops->kill || pgmap->ops->cleanup))
+                       return ERR_PTR(-EINVAL);
+               init_completion(&pgmap->done);
+               error = percpu_ref_init(&pgmap->internal_ref,
+                               dev_pagemap_percpu_release, 0, GFP_KERNEL);
+               if (error)
+                       return ERR_PTR(error);
+               pgmap->ref = &pgmap->internal_ref;
+       } else {
+               if (!pgmap->ops || !pgmap->ops->kill || !pgmap->ops->cleanup) {
+                       WARN(1, "Missing reference count teardown definition\n");
+                       return ERR_PTR(-EINVAL);
+               }
+       }
+       if (need_devmap_managed) {
 -              dev_WARN(dev, "Conflicting mapping in same section\n");
++              error = devmap_managed_enable_get(pgmap);
+               if (error)
+                       return ERR_PTR(error);
+       }
+       conflict_pgmap = get_dev_pagemap(PHYS_PFN(res->start), NULL);
+       if (conflict_pgmap) {
 -              dev_WARN(dev, "Conflicting mapping in same section\n");
++              WARN(1, "Conflicting mapping in same section\n");
+               put_dev_pagemap(conflict_pgmap);
+               error = -ENOMEM;
+               goto err_array;
+       }
+       conflict_pgmap = get_dev_pagemap(PHYS_PFN(res->end), NULL);
+       if (conflict_pgmap) {
 -      pgmap->dev = dev;
 -
++              WARN(1, "Conflicting mapping in same section\n");
+               put_dev_pagemap(conflict_pgmap);
+               error = -ENOMEM;
+               goto err_array;
+       }
+       is_ram = region_intersects(res->start, resource_size(res),
+               IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
+       if (is_ram != REGION_DISJOINT) {
+               WARN_ONCE(1, "%s attempted on %s region %pr\n", __func__,
+                               is_ram == REGION_MIXED ? "mixed" : "ram", res);
+               error = -ENXIO;
+               goto err_array;
+       }
 -      nid = dev_to_node(dev);
+       error = xa_err(xa_store_range(&pgmap_array, PHYS_PFN(res->start),
+                               PHYS_PFN(res->end), pgmap, GFP_KERNEL));
+       if (error)
+               goto err_array;
 -
 -      error = devm_add_action_or_reset(dev, devm_memremap_pages_release,
 -                      pgmap);
 -      if (error)
 -              return ERR_PTR(error);
 -
+       if (nid < 0)
+               nid = numa_mem_id();
+       error = track_pfn_remap(NULL, &pgprot, PHYS_PFN(res->start), 0,
+                       resource_size(res));
+       if (error)
+               goto err_pfn_remap;
+       mem_hotplug_begin();
+       /*
+        * For device private memory we call add_pages() as we only need to
+        * allocate and initialize struct page for the device memory. More-
+        * over the device memory is un-accessible thus we do not want to
+        * create a linear mapping for the memory like arch_add_memory()
+        * would do.
+        *
+        * For all other device memory types, which are accessible by
+        * the CPU, we do want the linear mapping and thus use
+        * arch_add_memory().
+        */
+       if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
+               error = add_pages(nid, PHYS_PFN(res->start),
+                               PHYS_PFN(resource_size(res)), &restrictions);
+       } else {
+               error = kasan_add_zero_shadow(__va(res->start), resource_size(res));
+               if (error) {
+                       mem_hotplug_done();
+                       goto err_kasan;
+               }
+               error = arch_add_memory(nid, res->start, resource_size(res),
+                                       &restrictions);
+       }
+       if (!error) {
+               struct zone *zone;
+               zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE];
+               move_pfn_range_to_zone(zone, PHYS_PFN(res->start),
+                               PHYS_PFN(resource_size(res)), restrictions.altmap);
+       }
+       mem_hotplug_done();
+       if (error)
+               goto err_add_memory;
+       /*
+        * Initialization of the pages has been deferred until now in order
+        * to allow us to do the work while not holding the hotplug lock.
+        */
+       memmap_init_zone_device(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
+                               PHYS_PFN(res->start),
+                               PHYS_PFN(resource_size(res)), pgmap);
+       percpu_ref_get_many(pgmap->ref, pfn_end(pgmap) - pfn_first(pgmap));
+       return __va(res->start);
+  err_add_memory:
+       kasan_remove_zero_shadow(__va(res->start), resource_size(res));
+  err_kasan:
+       untrack_pfn(NULL, PHYS_PFN(res->start), resource_size(res));
+  err_pfn_remap:
+       pgmap_array_delete(res);
+  err_array:
+       dev_pagemap_kill(pgmap);
+       dev_pagemap_cleanup(pgmap);
++      devmap_managed_enable_put();
+       return ERR_PTR(error);
+ }
++EXPORT_SYMBOL_GPL(memremap_pages);
++
++/**
++ * devm_memremap_pages - remap and provide memmap backing for the given resource
++ * @dev: hosting device for @res
++ * @pgmap: pointer to a struct dev_pagemap
++ *
++ * Notes:
++ * 1/ At a minimum the res and type members of @pgmap must be initialized
++ *    by the caller before passing it to this function
++ *
++ * 2/ The altmap field may optionally be initialized, in which case
++ *    PGMAP_ALTMAP_VALID must be set in pgmap->flags.
++ *
++ * 3/ The ref field may optionally be provided, in which pgmap->ref must be
++ *    'live' on entry and will be killed and reaped at
++ *    devm_memremap_pages_release() time, or if this routine fails.
++ *
++ * 4/ res is expected to be a host memory range that could feasibly be
++ *    treated as a "System RAM" range, i.e. not a device mmio range, but
++ *    this is not enforced.
++ */
++void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
++{
++      int error;
++      void *ret;
++
++      ret = memremap_pages(pgmap, dev_to_node(dev));
++      if (IS_ERR(ret))
++              return ret;
++
++      error = devm_add_action_or_reset(dev, devm_memremap_pages_release,
++                      pgmap);
++      if (error)
++              return ERR_PTR(error);
++      return ret;
++}
+ EXPORT_SYMBOL_GPL(devm_memremap_pages);
+ void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap)
+ {
+       devm_release_action(dev, devm_memremap_pages_release, pgmap);
+ }
+ EXPORT_SYMBOL_GPL(devm_memunmap_pages);
+ unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
+ {
+       /* number of pfns from base where pfn_to_page() is valid */
+       if (altmap)
+               return altmap->reserve + altmap->free;
+       return 0;
+ }
+ void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns)
+ {
+       altmap->alloc -= nr_pfns;
+ }
+ /**
+  * get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn
+  * @pfn: page frame number to lookup page_map
+  * @pgmap: optional known pgmap that already has a reference
+  *
+  * If @pgmap is non-NULL and covers @pfn it will be returned as-is.  If @pgmap
+  * is non-NULL but does not cover @pfn the reference to it will be released.
+  */
+ struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
+               struct dev_pagemap *pgmap)
+ {
+       resource_size_t phys = PFN_PHYS(pfn);
+       /*
+        * In the cached case we're already holding a live reference.
+        */
+       if (pgmap) {
+               if (phys >= pgmap->res.start && phys <= pgmap->res.end)
+                       return pgmap;
+               put_dev_pagemap(pgmap);
+       }
+       /* fall back to slow path lookup */
+       rcu_read_lock();
+       pgmap = xa_load(&pgmap_array, PHYS_PFN(phys));
+       if (pgmap && !percpu_ref_tryget_live(pgmap->ref))
+               pgmap = NULL;
+       rcu_read_unlock();
+       return pgmap;
+ }
+ EXPORT_SYMBOL_GPL(get_dev_pagemap);
+ #ifdef CONFIG_DEV_PAGEMAP_OPS
+ void __put_devmap_managed_page(struct page *page)
+ {
+       int count = page_ref_dec_return(page);
+       /*
+        * If refcount is 1 then page is freed and refcount is stable as nobody
+        * holds a reference on the page.
+        */
+       if (count == 1) {
+               /* Clear Active bit in case of parallel mark_page_accessed */
+               __ClearPageActive(page);
+               __ClearPageWaiters(page);
+               mem_cgroup_uncharge(page);
+               /*
+                * When a device_private page is freed, the page->mapping field
+                * may still contain a (stale) mapping value. For example, the
+                * lower bits of page->mapping may still identify the page as
+                * an anonymous page. Ultimately, this entire field is just
+                * stale and wrong, and it will cause errors if not cleared.
+                * One example is:
+                *
+                *  migrate_vma_pages()
+                *    migrate_vma_insert_page()
+                *      page_add_new_anon_rmap()
+                *        __page_set_anon_rmap()
+                *          ...checks page->mapping, via PageAnon(page) call,
+                *            and incorrectly concludes that the page is an
+                *            anonymous page. Therefore, it incorrectly,
+                *            silently fails to set up the new anon rmap.
+                *
+                * For other types of ZONE_DEVICE pages, migration is either
+                * handled differently or not done at all, so there is no need
+                * to clear page->mapping.
+                */
+               if (is_device_private_page(page))
+                       page->mapping = NULL;
+               page->pgmap->ops->page_free(page);
+       } else if (!count)
+               __put_page(page);
+ }
+ EXPORT_SYMBOL(__put_devmap_managed_page);
+ #endif /* CONFIG_DEV_PAGEMAP_OPS */
diff --cc mm/migrate.c
Simple merge