1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright(c) 2015 Intel Corporation. All rights reserved. */
3 #include <linux/device.h>
5 #include <linux/kasan.h>
6 #include <linux/memory_hotplug.h>
8 #include <linux/pfn_t.h>
9 #include <linux/swap.h>
10 #include <linux/mmzone.h>
11 #include <linux/swapops.h>
12 #include <linux/types.h>
13 #include <linux/wait_bit.h>
14 #include <linux/xarray.h>
16 static DEFINE_XARRAY(pgmap_array);
19 * The memremap() and memremap_pages() interfaces are alternately used
20 * to map persistent memory namespaces. These interfaces place different
21 * constraints on the alignment and size of the mapping (namespace).
22 * memremap() can map individual PAGE_SIZE pages. memremap_pages() can
23 * only map subsections (2MB), and at least one architecture (PowerPC)
24 * the minimum mapping granularity of memremap_pages() is 16MB.
26 * The role of memremap_compat_align() is to communicate the minimum
27 * arch supported alignment of a namespace such that it can freely
28 * switch modes without violating the arch constraint. Namely, do not
29 * allow a namespace to be PAGE_SIZE aligned since that namespace may be
30 * reconfigured into a mode that requires SUBSECTION_SIZE alignment.
32 #ifndef CONFIG_ARCH_HAS_MEMREMAP_COMPAT_ALIGN
33 unsigned long memremap_compat_align(void)
35 return SUBSECTION_SIZE;
37 EXPORT_SYMBOL_GPL(memremap_compat_align);
40 #ifdef CONFIG_DEV_PAGEMAP_OPS
41 DEFINE_STATIC_KEY_FALSE(devmap_managed_key);
42 EXPORT_SYMBOL(devmap_managed_key);
43 static atomic_t devmap_managed_enable;
45 static void devmap_managed_enable_put(void)
47 if (atomic_dec_and_test(&devmap_managed_enable))
48 static_branch_disable(&devmap_managed_key);
51 static int devmap_managed_enable_get(struct dev_pagemap *pgmap)
53 if (pgmap->type == MEMORY_DEVICE_PRIVATE &&
54 (!pgmap->ops || !pgmap->ops->page_free)) {
55 WARN(1, "Missing page_free method\n");
59 if (atomic_inc_return(&devmap_managed_enable) == 1)
60 static_branch_enable(&devmap_managed_key);
64 static int devmap_managed_enable_get(struct dev_pagemap *pgmap)
68 static void devmap_managed_enable_put(void)
71 #endif /* CONFIG_DEV_PAGEMAP_OPS */
73 static void pgmap_array_delete(struct range *range)
75 xa_store_range(&pgmap_array, PHYS_PFN(range->start), PHYS_PFN(range->end),
80 static unsigned long pfn_first(struct dev_pagemap *pgmap, int range_id)
82 struct range *range = &pgmap->ranges[range_id];
83 unsigned long pfn = PHYS_PFN(range->start);
87 return pfn + vmem_altmap_offset(pgmap_altmap(pgmap));
90 static unsigned long pfn_end(struct dev_pagemap *pgmap, int range_id)
92 const struct range *range = &pgmap->ranges[range_id];
94 return (range->start + range_len(range)) >> PAGE_SHIFT;
97 static unsigned long pfn_next(unsigned long pfn)
104 #define for_each_device_pfn(pfn, map, i) \
105 for (pfn = pfn_first(map, i); pfn < pfn_end(map, i); pfn = pfn_next(pfn))
107 static void dev_pagemap_kill(struct dev_pagemap *pgmap)
109 if (pgmap->ops && pgmap->ops->kill)
110 pgmap->ops->kill(pgmap);
112 percpu_ref_kill(pgmap->ref);
115 static void dev_pagemap_cleanup(struct dev_pagemap *pgmap)
117 if (pgmap->ops && pgmap->ops->cleanup) {
118 pgmap->ops->cleanup(pgmap);
120 wait_for_completion(&pgmap->done);
121 percpu_ref_exit(pgmap->ref);
124 * Undo the pgmap ref assignment for the internal case as the
125 * caller may re-enable the same pgmap.
127 if (pgmap->ref == &pgmap->internal_ref)
131 static void pageunmap_range(struct dev_pagemap *pgmap, int range_id)
133 struct range *range = &pgmap->ranges[range_id];
134 struct page *first_page;
137 /* make sure to access a memmap that was actually initialized */
138 first_page = pfn_to_page(pfn_first(pgmap, range_id));
140 /* pages are dead and unused, undo the arch mapping */
141 nid = page_to_nid(first_page);
144 remove_pfn_range_from_zone(page_zone(first_page), PHYS_PFN(range->start),
145 PHYS_PFN(range_len(range)));
146 if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
147 __remove_pages(PHYS_PFN(range->start),
148 PHYS_PFN(range_len(range)), NULL);
150 arch_remove_memory(nid, range->start, range_len(range),
151 pgmap_altmap(pgmap));
152 kasan_remove_zero_shadow(__va(range->start), range_len(range));
156 untrack_pfn(NULL, PHYS_PFN(range->start), range_len(range));
157 pgmap_array_delete(range);
160 void memunmap_pages(struct dev_pagemap *pgmap)
165 dev_pagemap_kill(pgmap);
166 for (i = 0; i < pgmap->nr_range; i++)
167 for_each_device_pfn(pfn, pgmap, i)
168 put_page(pfn_to_page(pfn));
169 dev_pagemap_cleanup(pgmap);
171 for (i = 0; i < pgmap->nr_range; i++)
172 pageunmap_range(pgmap, i);
174 WARN_ONCE(pgmap->altmap.alloc, "failed to free all reserved pages\n");
175 devmap_managed_enable_put();
177 EXPORT_SYMBOL_GPL(memunmap_pages);
179 static void devm_memremap_pages_release(void *data)
181 memunmap_pages(data);
184 static void dev_pagemap_percpu_release(struct percpu_ref *ref)
186 struct dev_pagemap *pgmap =
187 container_of(ref, struct dev_pagemap, internal_ref);
189 complete(&pgmap->done);
192 static int pagemap_range(struct dev_pagemap *pgmap, struct mhp_params *params,
193 int range_id, int nid)
195 struct range *range = &pgmap->ranges[range_id];
196 struct dev_pagemap *conflict_pgmap;
199 if (WARN_ONCE(pgmap_altmap(pgmap) && range_id > 0,
200 "altmap not supported for multiple ranges\n"))
203 conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->start), NULL);
204 if (conflict_pgmap) {
205 WARN(1, "Conflicting mapping in same section\n");
206 put_dev_pagemap(conflict_pgmap);
210 conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->end), NULL);
211 if (conflict_pgmap) {
212 WARN(1, "Conflicting mapping in same section\n");
213 put_dev_pagemap(conflict_pgmap);
217 is_ram = region_intersects(range->start, range_len(range),
218 IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
220 if (is_ram != REGION_DISJOINT) {
221 WARN_ONCE(1, "attempted on %s region %#llx-%#llx\n",
222 is_ram == REGION_MIXED ? "mixed" : "ram",
223 range->start, range->end);
227 error = xa_err(xa_store_range(&pgmap_array, PHYS_PFN(range->start),
228 PHYS_PFN(range->end), pgmap, GFP_KERNEL));
235 error = track_pfn_remap(NULL, ¶ms->pgprot, PHYS_PFN(range->start), 0,
243 * For device private memory we call add_pages() as we only need to
244 * allocate and initialize struct page for the device memory. More-
245 * over the device memory is un-accessible thus we do not want to
246 * create a linear mapping for the memory like arch_add_memory()
249 * For all other device memory types, which are accessible by
250 * the CPU, we do want the linear mapping and thus use
253 if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
254 error = add_pages(nid, PHYS_PFN(range->start),
255 PHYS_PFN(range_len(range)), params);
257 error = kasan_add_zero_shadow(__va(range->start), range_len(range));
263 error = arch_add_memory(nid, range->start, range_len(range),
270 zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE];
271 move_pfn_range_to_zone(zone, PHYS_PFN(range->start),
272 PHYS_PFN(range_len(range)), params->altmap);
280 * Initialization of the pages has been deferred until now in order
281 * to allow us to do the work while not holding the hotplug lock.
283 memmap_init_zone_device(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
284 PHYS_PFN(range->start),
285 PHYS_PFN(range_len(range)), pgmap);
286 percpu_ref_get_many(pgmap->ref, pfn_end(pgmap, range_id)
287 - pfn_first(pgmap, range_id));
291 kasan_remove_zero_shadow(__va(range->start), range_len(range));
293 untrack_pfn(NULL, PHYS_PFN(range->start), range_len(range));
295 pgmap_array_delete(range);
301 * Not device managed version of dev_memremap_pages, undone by
302 * memunmap_pages(). Please use dev_memremap_pages if you have a struct
305 void *memremap_pages(struct dev_pagemap *pgmap, int nid)
307 struct mhp_params params = {
308 .altmap = pgmap_altmap(pgmap),
309 .pgprot = PAGE_KERNEL,
311 const int nr_range = pgmap->nr_range;
312 bool need_devmap_managed = true;
315 if (WARN_ONCE(!nr_range, "nr_range must be specified\n"))
316 return ERR_PTR(-EINVAL);
318 switch (pgmap->type) {
319 case MEMORY_DEVICE_PRIVATE:
320 if (!IS_ENABLED(CONFIG_DEVICE_PRIVATE)) {
321 WARN(1, "Device private memory not supported\n");
322 return ERR_PTR(-EINVAL);
324 if (!pgmap->ops || !pgmap->ops->migrate_to_ram) {
325 WARN(1, "Missing migrate_to_ram method\n");
326 return ERR_PTR(-EINVAL);
329 WARN(1, "Missing owner\n");
330 return ERR_PTR(-EINVAL);
333 case MEMORY_DEVICE_FS_DAX:
334 if (!IS_ENABLED(CONFIG_ZONE_DEVICE) ||
335 IS_ENABLED(CONFIG_FS_DAX_LIMITED)) {
336 WARN(1, "File system DAX not supported\n");
337 return ERR_PTR(-EINVAL);
340 case MEMORY_DEVICE_GENERIC:
341 need_devmap_managed = false;
343 case MEMORY_DEVICE_PCI_P2PDMA:
344 params.pgprot = pgprot_noncached(params.pgprot);
345 need_devmap_managed = false;
348 WARN(1, "Invalid pgmap type %d\n", pgmap->type);
353 if (pgmap->ops && (pgmap->ops->kill || pgmap->ops->cleanup))
354 return ERR_PTR(-EINVAL);
356 init_completion(&pgmap->done);
357 error = percpu_ref_init(&pgmap->internal_ref,
358 dev_pagemap_percpu_release, 0, GFP_KERNEL);
360 return ERR_PTR(error);
361 pgmap->ref = &pgmap->internal_ref;
363 if (!pgmap->ops || !pgmap->ops->kill || !pgmap->ops->cleanup) {
364 WARN(1, "Missing reference count teardown definition\n");
365 return ERR_PTR(-EINVAL);
369 if (need_devmap_managed) {
370 error = devmap_managed_enable_get(pgmap);
372 return ERR_PTR(error);
376 * Clear the pgmap nr_range as it will be incremented for each
377 * successfully processed range. This communicates how many
378 * regions to unwind in the abort case.
382 for (i = 0; i < nr_range; i++) {
383 error = pagemap_range(pgmap, ¶ms, i, nid);
390 memunmap_pages(pgmap);
391 pgmap->nr_range = nr_range;
392 return ERR_PTR(error);
395 return __va(pgmap->ranges[0].start);
397 EXPORT_SYMBOL_GPL(memremap_pages);
400 * devm_memremap_pages - remap and provide memmap backing for the given resource
401 * @dev: hosting device for @res
402 * @pgmap: pointer to a struct dev_pagemap
405 * 1/ At a minimum the res and type members of @pgmap must be initialized
406 * by the caller before passing it to this function
408 * 2/ The altmap field may optionally be initialized, in which case
409 * PGMAP_ALTMAP_VALID must be set in pgmap->flags.
411 * 3/ The ref field may optionally be provided, in which pgmap->ref must be
412 * 'live' on entry and will be killed and reaped at
413 * devm_memremap_pages_release() time, or if this routine fails.
415 * 4/ range is expected to be a host memory range that could feasibly be
416 * treated as a "System RAM" range, i.e. not a device mmio range, but
417 * this is not enforced.
419 void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
424 ret = memremap_pages(pgmap, dev_to_node(dev));
428 error = devm_add_action_or_reset(dev, devm_memremap_pages_release,
431 return ERR_PTR(error);
434 EXPORT_SYMBOL_GPL(devm_memremap_pages);
436 void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap)
438 devm_release_action(dev, devm_memremap_pages_release, pgmap);
440 EXPORT_SYMBOL_GPL(devm_memunmap_pages);
442 unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
444 /* number of pfns from base where pfn_to_page() is valid */
446 return altmap->reserve + altmap->free;
450 void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns)
452 altmap->alloc -= nr_pfns;
456 * get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn
457 * @pfn: page frame number to lookup page_map
458 * @pgmap: optional known pgmap that already has a reference
460 * If @pgmap is non-NULL and covers @pfn it will be returned as-is. If @pgmap
461 * is non-NULL but does not cover @pfn the reference to it will be released.
463 struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
464 struct dev_pagemap *pgmap)
466 resource_size_t phys = PFN_PHYS(pfn);
469 * In the cached case we're already holding a live reference.
472 if (phys >= pgmap->range.start && phys <= pgmap->range.end)
474 put_dev_pagemap(pgmap);
477 /* fall back to slow path lookup */
479 pgmap = xa_load(&pgmap_array, PHYS_PFN(phys));
480 if (pgmap && !percpu_ref_tryget_live(pgmap->ref))
486 EXPORT_SYMBOL_GPL(get_dev_pagemap);
488 #ifdef CONFIG_DEV_PAGEMAP_OPS
489 void free_devmap_managed_page(struct page *page)
491 /* notify page idle for dax */
492 if (!is_device_private_page(page)) {
493 wake_up_var(&page->_refcount);
497 /* Clear Active bit in case of parallel mark_page_accessed */
498 __ClearPageActive(page);
499 __ClearPageWaiters(page);
501 mem_cgroup_uncharge(page);
504 * When a device_private page is freed, the page->mapping field
505 * may still contain a (stale) mapping value. For example, the
506 * lower bits of page->mapping may still identify the page as an
507 * anonymous page. Ultimately, this entire field is just stale
508 * and wrong, and it will cause errors if not cleared. One
511 * migrate_vma_pages()
512 * migrate_vma_insert_page()
513 * page_add_new_anon_rmap()
514 * __page_set_anon_rmap()
515 * ...checks page->mapping, via PageAnon(page) call,
516 * and incorrectly concludes that the page is an
517 * anonymous page. Therefore, it incorrectly,
518 * silently fails to set up the new anon rmap.
520 * For other types of ZONE_DEVICE pages, migration is either
521 * handled differently or not done at all, so there is no need
522 * to clear page->mapping.
524 page->mapping = NULL;
525 page->pgmap->ops->page_free(page);
527 #endif /* CONFIG_DEV_PAGEMAP_OPS */