{
unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT;
- struct page *page;
+ struct page *page = pfn_to_page(start_pfn) + vmem_altmap_offset(altmap);
int ret;
- /*
- * If we have an altmap then we need to skip over any reserved PFNs
- * when querying the zone.
- */
- page = pfn_to_page(start_pfn);
- if (altmap)
- page += vmem_altmap_offset(altmap);
-
__remove_pages(page_zone(page), start_pfn, nr_pages, altmap);
/* Remove htab bolted mappings for this section of memory */
{
unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT;
- struct page *page = pfn_to_page(start_pfn);
- struct zone *zone;
+ struct page *page = pfn_to_page(start_pfn) + vmem_altmap_offset(altmap);
+ struct zone *zone = page_zone(page);
- /* With altmap the first mapped page is offset from @start */
- if (altmap)
- page += vmem_altmap_offset(altmap);
- zone = page_zone(page);
__remove_pages(zone, start_pfn, nr_pages, altmap);
kernel_physical_mapping_remove(start, start + size);
}
if (offset < reserve)
return -EINVAL;
nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns);
- pgmap->altmap_valid = false;
} else if (nd_pfn->mode == PFN_MODE_PMEM) {
nd_pfn->npfns = PFN_SECTION_ALIGN_UP((resource_size(res)
- offset) / PAGE_SIZE);
memcpy(altmap, &__altmap, sizeof(*altmap));
altmap->free = PHYS_PFN(offset - reserve);
altmap->alloc = 0;
- pgmap->altmap_valid = true;
+ pgmap->flags |= PGMAP_ALTMAP_VALID;
} else
return -ENXIO;
bb_res.start += pmem->data_offset;
} else if (pmem_should_map_pages(dev)) {
memcpy(&pmem->pgmap.res, &nsio->res, sizeof(pmem->pgmap.res));
- pmem->pgmap.altmap_valid = false;
pmem->pgmap.type = MEMORY_DEVICE_FS_DAX;
pmem->pgmap.ops = &fsdax_pagemap_ops;
addr = devm_memremap_pages(dev, &pmem->pgmap);
vm_fault_t (*migrate_to_ram)(struct vm_fault *vmf);
};
+#define PGMAP_ALTMAP_VALID (1 << 0)
+
/**
* struct dev_pagemap - metadata for ZONE_DEVICE mappings
* @altmap: pre-allocated/reserved memory for vmemmap allocations
* @dev: host device of the mapping for debug
* @data: private data pointer for page_free()
* @type: memory type: see MEMORY_* in memory_hotplug.h
+ * @flags: PGMAP_* flags to specify defailed behavior
* @ops: method table
*/
struct dev_pagemap {
struct vmem_altmap altmap;
- bool altmap_valid;
struct resource res;
struct percpu_ref *ref;
struct device *dev;
enum memory_type type;
+ unsigned int flags;
u64 pci_p2pdma_bus_offset;
const struct dev_pagemap_ops *ops;
};
+static inline struct vmem_altmap *pgmap_altmap(struct dev_pagemap *pgmap)
+{
+ if (pgmap->flags & PGMAP_ALTMAP_VALID)
+ return &pgmap->altmap;
+ return NULL;
+}
+
#ifdef CONFIG_ZONE_DEVICE
void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap);
void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap);
static unsigned long pfn_first(struct dev_pagemap *pgmap)
{
- const struct resource *res = &pgmap->res;
- struct vmem_altmap *altmap = &pgmap->altmap;
- unsigned long pfn;
-
- pfn = res->start >> PAGE_SHIFT;
- if (pgmap->altmap_valid)
- pfn += vmem_altmap_offset(altmap);
- return pfn;
+ return (pgmap->res.start >> PAGE_SHIFT) +
+ vmem_altmap_offset(pgmap_altmap(pgmap));
}
static unsigned long pfn_end(struct dev_pagemap *pgmap)
align_size >> PAGE_SHIFT, NULL);
} else {
arch_remove_memory(nid, align_start, align_size,
- pgmap->altmap_valid ? &pgmap->altmap : NULL);
+ pgmap_altmap(pgmap));
kasan_remove_zero_shadow(__va(align_start), align_size);
}
mem_hotplug_done();
* 1/ At a minimum the res, ref and type and ops members of @pgmap must be
* initialized by the caller before passing it to this function
*
- * 2/ The altmap field may optionally be initialized, in which case altmap_valid
- * must be set to true
+ * 2/ The altmap field may optionally be initialized, in which case
+ * PGMAP_ALTMAP_VALID must be set in pgmap->flags.
*
* 3/ pgmap->ref must be 'live' on entry and will be killed and reaped
* at devm_memremap_pages_release() time, or if this routine fails.
void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
{
resource_size_t align_start, align_size, align_end;
- struct vmem_altmap *altmap = pgmap->altmap_valid ?
- &pgmap->altmap : NULL;
struct resource *res = &pgmap->res;
struct dev_pagemap *conflict_pgmap;
struct mhp_restrictions restrictions = {
/*
* We do not want any optional features only our own memmap
*/
- .altmap = altmap,
+ .altmap = pgmap_altmap(pgmap),
};
pgprot_t pgprot = PAGE_KERNEL;
int error, nid, is_ram;
zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE];
move_pfn_range_to_zone(zone, align_start >> PAGE_SHIFT,
- align_size >> PAGE_SHIFT, altmap);
+ align_size >> PAGE_SHIFT, pgmap_altmap(pgmap));
}
mem_hotplug_done();
unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
{
/* number of pfns from base where pfn_to_page() is valid */
- return altmap->reserve + altmap->free;
+ if (altmap)
+ return altmap->reserve + altmap->free;
+ return 0;
}
void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns)
devmem->pagemap.type = MEMORY_DEVICE_PRIVATE;
devmem->pagemap.res = *devmem->resource;
devmem->pagemap.ops = &hmm_pagemap_ops;
- devmem->pagemap.altmap_valid = false;
devmem->pagemap.ref = &devmem->ref;
result = devm_memremap_pages(devmem->device, &devmem->pagemap);
int sections_to_remove;
/* In the ZONE_DEVICE case device driver owns the memory region */
- if (is_dev_zone(zone)) {
- if (altmap)
- map_offset = vmem_altmap_offset(altmap);
- }
+ if (is_dev_zone(zone))
+ map_offset = vmem_altmap_offset(altmap);
clear_zone_contiguous(zone);
{
unsigned long pfn, end_pfn = start_pfn + size;
struct pglist_data *pgdat = zone->zone_pgdat;
+ struct vmem_altmap *altmap = pgmap_altmap(pgmap);
unsigned long zone_idx = zone_idx(zone);
unsigned long start = jiffies;
int nid = pgdat->node_id;
* of the pages reserved for the memmap, so we can just jump to
* the end of that region and start processing the device pages.
*/
- if (pgmap->altmap_valid) {
- struct vmem_altmap *altmap = &pgmap->altmap;
-
+ if (altmap) {
start_pfn = altmap->base_pfn + vmem_altmap_offset(altmap);
size = end_pfn - start_pfn;
}