if (pm->show_pfn)
                        frame = pte_pfn(pte);
                flags |= PM_PRESENT;
-               page = _vm_normal_page(vma, addr, pte, true);
+               page = vm_normal_page(vma, addr, pte);
                if (pte_soft_dirty(pte))
                        flags |= PM_SOFT_DIRTY;
        } else if (is_swap_pte(pte)) {
 
 static inline void hmm_mm_init(struct mm_struct *mm) {}
 #endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */
 
-#if IS_ENABLED(CONFIG_DEVICE_PRIVATE) ||  IS_ENABLED(CONFIG_DEVICE_PUBLIC)
+#if IS_ENABLED(CONFIG_DEVICE_PRIVATE)
 struct hmm_devmem;
 
 struct page *hmm_vma_alloc_locked_page(struct vm_area_struct *vma,
 struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
                                  struct device *device,
                                  unsigned long size);
-struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops,
-                                          struct device *device,
-                                          struct resource *res);
 
 /*
  * hmm_devmem_page_set_drvdata - set per-page driver data field
 {
        return page->hmm_data;
 }
-#endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */
+#endif /* CONFIG_DEVICE_PRIVATE */
 #else /* IS_ENABLED(CONFIG_HMM) */
 static inline void hmm_mm_destroy(struct mm_struct *mm) {}
 static inline void hmm_mm_init(struct mm_struct *mm) {}
 
        IORES_DESC_PERSISTENT_MEMORY            = 4,
        IORES_DESC_PERSISTENT_MEMORY_LEGACY     = 5,
        IORES_DESC_DEVICE_PRIVATE_MEMORY        = 6,
-       IORES_DESC_DEVICE_PUBLIC_MEMORY         = 7,
 };
 
 /* helpers to define resources */
 
  * A more complete discussion of unaddressable memory may be found in
  * include/linux/hmm.h and Documentation/vm/hmm.rst.
  *
- * MEMORY_DEVICE_PUBLIC:
- * Device memory that is cache coherent from device and CPU point of view. This
- * is use on platform that have an advance system bus (like CAPI or CCIX). A
- * driver can hotplug the device memory using ZONE_DEVICE and with that memory
- * type. Any page of a process can be migrated to such memory. However no one
- * should be allow to pin such memory so that it can always be evicted.
- *
  * MEMORY_DEVICE_FS_DAX:
  * Host memory that has similar access semantics as System RAM i.e. DMA
  * coherent and supports page pinning. In support of coordinating page
  */
 enum memory_type {
        MEMORY_DEVICE_PRIVATE = 1,
-       MEMORY_DEVICE_PUBLIC,
        MEMORY_DEVICE_FS_DAX,
        MEMORY_DEVICE_PCI_P2PDMA,
 };
 
                return false;
        switch (page->pgmap->type) {
        case MEMORY_DEVICE_PRIVATE:
-       case MEMORY_DEVICE_PUBLIC:
        case MEMORY_DEVICE_FS_DAX:
                __put_devmap_managed_page(page);
                return true;
                page->pgmap->type == MEMORY_DEVICE_PRIVATE;
 }
 
-static inline bool is_device_public_page(const struct page *page)
-{
-       return is_zone_device_page(page) &&
-               page->pgmap->type == MEMORY_DEVICE_PUBLIC;
-}
-
 #ifdef CONFIG_PCI_P2PDMA
 static inline bool is_pci_p2pdma_page(const struct page *page)
 {
        return false;
 }
 
-static inline bool is_device_public_page(const struct page *page)
-{
-       return false;
-}
-
 static inline bool is_pci_p2pdma_page(const struct page *page)
 {
        return false;
        pgoff_t last_index;                     /* Highest page->index to unmap */
 };
 
-struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
-                            pte_t pte, bool with_public_device);
-#define vm_normal_page(vma, addr, pte) _vm_normal_page(vma, addr, pte, false)
-
+struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
+                            pte_t pte);
 struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
                                pmd_t pmd);
 
 
          memory; i.e., memory that is only accessible from the device (or
          group of devices). You likely also want to select HMM_MIRROR.
 
-config DEVICE_PUBLIC
-       bool "Addressable device memory (like GPU memory)"
-       depends on ARCH_HAS_HMM
-       select HMM
-       select DEV_PAGEMAP_OPS
-
-       help
-         Allows creation of struct pages to represent addressable device
-         memory; i.e., memory that is accessible from both the device and
-         the CPU
-
 config FRAME_VECTOR
        bool
 
 
                if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(*pte)))
                        goto unmap;
                *page = pte_page(*pte);
-
-               /*
-                * This should never happen (a device public page in the gate
-                * area).
-                */
-               if (is_device_public_page(*page))
-                       goto unmap;
        }
        if (unlikely(!try_get_page(*page))) {
                ret = -ENOMEM;
 
 #endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */
 
 
-#if IS_ENABLED(CONFIG_DEVICE_PRIVATE) ||  IS_ENABLED(CONFIG_DEVICE_PUBLIC)
+#if IS_ENABLED(CONFIG_DEVICE_PRIVATE)
 struct page *hmm_vma_alloc_locked_page(struct vm_area_struct *vma,
                                       unsigned long addr)
 {
        return devmem;
 }
 EXPORT_SYMBOL_GPL(hmm_devmem_add);
-
-struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops,
-                                          struct device *device,
-                                          struct resource *res)
-{
-       struct hmm_devmem *devmem;
-       void *result;
-       int ret;
-
-       if (res->desc != IORES_DESC_DEVICE_PUBLIC_MEMORY)
-               return ERR_PTR(-EINVAL);
-
-       dev_pagemap_get_ops();
-
-       devmem = devm_kzalloc(device, sizeof(*devmem), GFP_KERNEL);
-       if (!devmem)
-               return ERR_PTR(-ENOMEM);
-
-       init_completion(&devmem->completion);
-       devmem->pfn_first = -1UL;
-       devmem->pfn_last = -1UL;
-       devmem->resource = res;
-       devmem->device = device;
-       devmem->ops = ops;
-
-       ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release,
-                             0, GFP_KERNEL);
-       if (ret)
-               return ERR_PTR(ret);
-
-       devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT;
-       devmem->pfn_last = devmem->pfn_first +
-                          (resource_size(devmem->resource) >> PAGE_SHIFT);
-       devmem->page_fault = hmm_devmem_fault;
-
-       devmem->pagemap.type = MEMORY_DEVICE_PUBLIC;
-       devmem->pagemap.res = *devmem->resource;
-       devmem->pagemap.page_free = hmm_devmem_free;
-       devmem->pagemap.altmap_valid = false;
-       devmem->pagemap.ref = &devmem->ref;
-       devmem->pagemap.data = devmem;
-       devmem->pagemap.kill = hmm_devmem_ref_kill;
-       devmem->pagemap.cleanup = hmm_devmem_ref_exit;
-
-       result = devm_memremap_pages(devmem->device, &devmem->pagemap);
-       if (IS_ERR(result))
-               return result;
-       return devmem;
-}
-EXPORT_SYMBOL_GPL(hmm_devmem_add_resource);
-#endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */
+#endif /* CONFIG_DEVICE_PRIVATE  */
 
                        continue;
                }
 
-               page = _vm_normal_page(vma, addr, ptent, true);
+               page = vm_normal_page(vma, addr, ptent);
                if (!page)
                        continue;
 
 
 static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
                                                unsigned long addr, pte_t ptent)
 {
-       struct page *page = _vm_normal_page(vma, addr, ptent, true);
+       struct page *page = vm_normal_page(vma, addr, ptent);
 
        if (!page || !page_mapped(page))
                return NULL;
  *   2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
  *     target for charge migration. if @target is not NULL, the entry is stored
  *     in target->ent.
- *   3(MC_TARGET_DEVICE): like MC_TARGET_PAGE  but page is MEMORY_DEVICE_PUBLIC
- *     or MEMORY_DEVICE_PRIVATE (so ZONE_DEVICE page and thus not on the lru).
+ *   3(MC_TARGET_DEVICE): like MC_TARGET_PAGE  but page is MEMORY_DEVICE_PRIVATE
+ *     (so ZONE_DEVICE page and thus not on the lru).
  *     For now we such page is charge like a regular page would be as for all
  *     intent and purposes it is just special memory taking the place of a
  *     regular page.
                 */
                if (page->mem_cgroup == mc.from) {
                        ret = MC_TARGET_PAGE;
-                       if (is_device_private_page(page) ||
-                           is_device_public_page(page))
+                       if (is_device_private_page(page))
                                ret = MC_TARGET_DEVICE;
                        if (target)
                                target->page = page;
        if (ptl) {
                /*
                 * Note their can not be MC_TARGET_DEVICE for now as we do not
-                * support transparent huge page with MEMORY_DEVICE_PUBLIC or
-                * MEMORY_DEVICE_PRIVATE but this might change.
+                * support transparent huge page with MEMORY_DEVICE_PRIVATE but
+                * this might change.
                 */
                if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
                        mc.precharge += HPAGE_PMD_NR;
 
                goto unlock;
        }
 
-       switch (pgmap->type) {
-       case MEMORY_DEVICE_PRIVATE:
-       case MEMORY_DEVICE_PUBLIC:
+       if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
                /*
                 * TODO: Handle HMM pages which may need coordination
                 * with device-side memory.
                 */
                goto unlock;
-       default:
-               break;
        }
 
        /*
 
  * PFNMAP mappings in order to support COWable mappings.
  *
  */
-struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
-                            pte_t pte, bool with_public_device)
+struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
+                           pte_t pte)
 {
        unsigned long pfn = pte_pfn(pte);
 
                        return NULL;
                if (is_zero_pfn(pfn))
                        return NULL;
-
-               /*
-                * Device public pages are special pages (they are ZONE_DEVICE
-                * pages but different from persistent memory). They behave
-                * allmost like normal pages. The difference is that they are
-                * not on the lru and thus should never be involve with any-
-                * thing that involve lru manipulation (mlock, numa balancing,
-                * ...).
-                *
-                * This is why we still want to return NULL for such page from
-                * vm_normal_page() so that we do not have to special case all
-                * call site of vm_normal_page().
-                */
-               if (likely(pfn <= highest_memmap_pfn)) {
-                       struct page *page = pfn_to_page(pfn);
-
-                       if (is_device_public_page(page)) {
-                               if (with_public_device)
-                                       return page;
-                               return NULL;
-                       }
-               }
-
                if (pte_devmap(pte))
                        return NULL;
 
                rss[mm_counter(page)]++;
        } else if (pte_devmap(pte)) {
                page = pte_page(pte);
-
-               /*
-                * Cache coherent device memory behave like regular page and
-                * not like persistent memory page. For more informations see
-                * MEMORY_DEVICE_CACHE_COHERENT in memory_hotplug.h
-                */
-               if (is_device_public_page(page)) {
-                       get_page(page);
-                       page_dup_rmap(page, false);
-                       rss[mm_counter(page)]++;
-               }
        }
 
 out_set_pte:
                if (pte_present(ptent)) {
                        struct page *page;
 
-                       page = _vm_normal_page(vma, addr, ptent, true);
+                       page = vm_normal_page(vma, addr, ptent);
                        if (unlikely(details) && page) {
                                /*
                                 * unmap_shared_mapping_pages() wants to
 
                        if (is_device_private_page(new)) {
                                entry = make_device_private_entry(new, pte_write(pte));
                                pte = swp_entry_to_pte(entry);
-                       } else if (is_device_public_page(new)) {
-                               pte = pte_mkdevmap(pte);
                        }
                }
 
         * ZONE_DEVICE pages.
         */
        expected_count += is_device_private_page(page);
-       expected_count += is_device_public_page(page);
        if (mapping)
                expected_count += hpage_nr_pages(page) + page_has_private(page);
 
                if (!PageMappingFlags(page))
                        page->mapping = NULL;
 
-               if (unlikely(is_zone_device_page(newpage))) {
-                       if (is_device_public_page(newpage))
-                               flush_dcache_page(newpage);
-               } else
+               if (likely(!is_zone_device_page(newpage)))
                        flush_dcache_page(newpage);
 
        }
                                pfn = 0;
                                goto next;
                        }
-                       page = _vm_normal_page(migrate->vma, addr, pte, true);
+                       page = vm_normal_page(migrate->vma, addr, pte);
                        mpfn = migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE;
                        mpfn |= pte_write(pte) ? MIGRATE_PFN_WRITE : 0;
                }
                 * FIXME proper solution is to rework migration_entry_wait() so
                 * it does not need to take a reference on page.
                 */
-               if (is_device_private_page(page))
-                       return true;
-
-               /*
-                * Only allow device public page to be migrated and account for
-                * the extra reference count imply by ZONE_DEVICE pages.
-                */
-               if (!is_device_public_page(page))
-                       return false;
-               extra++;
+               return is_device_private_page(page);
        }
 
        /* For file back page */
 
                        swp_entry = make_device_private_entry(page, vma->vm_flags & VM_WRITE);
                        entry = swp_entry_to_pte(swp_entry);
-               } else if (is_device_public_page(page)) {
-                       entry = pte_mkold(mk_pte(page, READ_ONCE(vma->vm_page_prot)));
-                       if (vma->vm_flags & VM_WRITE)
-                               entry = pte_mkwrite(pte_mkdirty(entry));
-                       entry = pte_mkdevmap(entry);
                }
        } else {
                entry = mk_pte(page, vma->vm_page_prot);
                                        migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
                                        continue;
                                }
-                       } else if (!is_device_public_page(newpage)) {
+                       } else {
                                /*
                                 * Other types of ZONE_DEVICE page are not
                                 * supported.
 
                if (is_huge_zero_page(page))
                        continue;
 
-               /* Device public page can not be huge page */
-               if (is_device_public_page(page)) {
-                       if (locked_pgdat) {
-                               spin_unlock_irqrestore(&locked_pgdat->lru_lock,
-                                                      flags);
-                               locked_pgdat = NULL;
-                       }
-                       put_devmap_managed_page(page);
-                       continue;
-               }
-
                page = compound_head(page);
                if (!put_page_testzero(page))
                        continue;