1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2016-2018 Intel Corporation. All rights reserved. */
3 #include <linux/memremap.h>
4 #include <linux/pagemap.h>
5 #include <linux/module.h>
6 #include <linux/device.h>
7 #include <linux/pfn_t.h>
8 #include <linux/cdev.h>
9 #include <linux/slab.h>
10 #include <linux/dax.h>
13 #include <linux/mman.h>
14 #include "dax-private.h"
17 static struct dev_dax *ref_to_dev_dax(struct percpu_ref *ref)
19 return container_of(ref, struct dev_dax, ref);
22 static void dev_dax_percpu_release(struct percpu_ref *ref)
24 struct dev_dax *dev_dax = ref_to_dev_dax(ref);
26 dev_dbg(&dev_dax->dev, "%s\n", __func__);
27 complete(&dev_dax->cmp);
30 static void dev_dax_percpu_exit(void *data)
32 struct percpu_ref *ref = data;
33 struct dev_dax *dev_dax = ref_to_dev_dax(ref);
35 dev_dbg(&dev_dax->dev, "%s\n", __func__);
36 wait_for_completion(&dev_dax->cmp);
40 static void dev_dax_percpu_kill(struct percpu_ref *data)
42 struct percpu_ref *ref = data;
43 struct dev_dax *dev_dax = ref_to_dev_dax(ref);
45 dev_dbg(&dev_dax->dev, "%s\n", __func__);
49 static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma,
52 struct dax_region *dax_region = dev_dax->region;
53 struct device *dev = &dev_dax->dev;
56 if (!dax_alive(dev_dax->dax_dev))
59 /* prevent private mappings from being established */
60 if ((vma->vm_flags & VM_MAYSHARE) != VM_MAYSHARE) {
61 dev_info_ratelimited(dev,
62 "%s: %s: fail, attempted private mapping\n",
67 mask = dax_region->align - 1;
68 if (vma->vm_start & mask || vma->vm_end & mask) {
69 dev_info_ratelimited(dev,
70 "%s: %s: fail, unaligned vma (%#lx - %#lx, %#lx)\n",
71 current->comm, func, vma->vm_start, vma->vm_end,
76 if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) == PFN_DEV
77 && (vma->vm_flags & VM_DONTCOPY) == 0) {
78 dev_info_ratelimited(dev,
79 "%s: %s: fail, dax range requires MADV_DONTFORK\n",
84 if (!vma_is_dax(vma)) {
85 dev_info_ratelimited(dev,
86 "%s: %s: fail, vma is not DAX capable\n",
94 /* see "strong" declaration in tools/testing/nvdimm/dax-dev.c */
95 __weak phys_addr_t dax_pgoff_to_phys(struct dev_dax *dev_dax, pgoff_t pgoff,
98 struct resource *res = &dev_dax->region->res;
101 phys = pgoff * PAGE_SIZE + res->start;
102 if (phys >= res->start && phys <= res->end) {
103 if (phys + size - 1 <= res->end)
110 static vm_fault_t __dev_dax_pte_fault(struct dev_dax *dev_dax,
111 struct vm_fault *vmf, pfn_t *pfn)
113 struct device *dev = &dev_dax->dev;
114 struct dax_region *dax_region;
116 unsigned int fault_size = PAGE_SIZE;
118 if (check_vma(dev_dax, vmf->vma, __func__))
119 return VM_FAULT_SIGBUS;
121 dax_region = dev_dax->region;
122 if (dax_region->align > PAGE_SIZE) {
123 dev_dbg(dev, "alignment (%#x) > fault size (%#x)\n",
124 dax_region->align, fault_size);
125 return VM_FAULT_SIGBUS;
128 if (fault_size != dax_region->align)
129 return VM_FAULT_SIGBUS;
131 phys = dax_pgoff_to_phys(dev_dax, vmf->pgoff, PAGE_SIZE);
133 dev_dbg(dev, "pgoff_to_phys(%#lx) failed\n", vmf->pgoff);
134 return VM_FAULT_SIGBUS;
137 *pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
139 return vmf_insert_mixed(vmf->vma, vmf->address, *pfn);
142 static vm_fault_t __dev_dax_pmd_fault(struct dev_dax *dev_dax,
143 struct vm_fault *vmf, pfn_t *pfn)
145 unsigned long pmd_addr = vmf->address & PMD_MASK;
146 struct device *dev = &dev_dax->dev;
147 struct dax_region *dax_region;
150 unsigned int fault_size = PMD_SIZE;
152 if (check_vma(dev_dax, vmf->vma, __func__))
153 return VM_FAULT_SIGBUS;
155 dax_region = dev_dax->region;
156 if (dax_region->align > PMD_SIZE) {
157 dev_dbg(dev, "alignment (%#x) > fault size (%#x)\n",
158 dax_region->align, fault_size);
159 return VM_FAULT_SIGBUS;
162 /* dax pmd mappings require pfn_t_devmap() */
163 if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) != (PFN_DEV|PFN_MAP)) {
164 dev_dbg(dev, "region lacks devmap flags\n");
165 return VM_FAULT_SIGBUS;
168 if (fault_size < dax_region->align)
169 return VM_FAULT_SIGBUS;
170 else if (fault_size > dax_region->align)
171 return VM_FAULT_FALLBACK;
173 /* if we are outside of the VMA */
174 if (pmd_addr < vmf->vma->vm_start ||
175 (pmd_addr + PMD_SIZE) > vmf->vma->vm_end)
176 return VM_FAULT_SIGBUS;
178 pgoff = linear_page_index(vmf->vma, pmd_addr);
179 phys = dax_pgoff_to_phys(dev_dax, pgoff, PMD_SIZE);
181 dev_dbg(dev, "pgoff_to_phys(%#lx) failed\n", pgoff);
182 return VM_FAULT_SIGBUS;
185 *pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
187 return vmf_insert_pfn_pmd(vmf, *pfn, vmf->flags & FAULT_FLAG_WRITE);
190 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
191 static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax,
192 struct vm_fault *vmf, pfn_t *pfn)
194 unsigned long pud_addr = vmf->address & PUD_MASK;
195 struct device *dev = &dev_dax->dev;
196 struct dax_region *dax_region;
199 unsigned int fault_size = PUD_SIZE;
202 if (check_vma(dev_dax, vmf->vma, __func__))
203 return VM_FAULT_SIGBUS;
205 dax_region = dev_dax->region;
206 if (dax_region->align > PUD_SIZE) {
207 dev_dbg(dev, "alignment (%#x) > fault size (%#x)\n",
208 dax_region->align, fault_size);
209 return VM_FAULT_SIGBUS;
212 /* dax pud mappings require pfn_t_devmap() */
213 if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) != (PFN_DEV|PFN_MAP)) {
214 dev_dbg(dev, "region lacks devmap flags\n");
215 return VM_FAULT_SIGBUS;
218 if (fault_size < dax_region->align)
219 return VM_FAULT_SIGBUS;
220 else if (fault_size > dax_region->align)
221 return VM_FAULT_FALLBACK;
223 /* if we are outside of the VMA */
224 if (pud_addr < vmf->vma->vm_start ||
225 (pud_addr + PUD_SIZE) > vmf->vma->vm_end)
226 return VM_FAULT_SIGBUS;
228 pgoff = linear_page_index(vmf->vma, pud_addr);
229 phys = dax_pgoff_to_phys(dev_dax, pgoff, PUD_SIZE);
231 dev_dbg(dev, "pgoff_to_phys(%#lx) failed\n", pgoff);
232 return VM_FAULT_SIGBUS;
235 *pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
237 return vmf_insert_pfn_pud(vmf, *pfn, vmf->flags & FAULT_FLAG_WRITE);
240 static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax,
241 struct vm_fault *vmf, pfn_t *pfn)
243 return VM_FAULT_FALLBACK;
245 #endif /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
247 static vm_fault_t dev_dax_huge_fault(struct vm_fault *vmf,
248 enum page_entry_size pe_size)
250 struct file *filp = vmf->vma->vm_file;
251 unsigned long fault_size;
252 vm_fault_t rc = VM_FAULT_SIGBUS;
255 struct dev_dax *dev_dax = filp->private_data;
257 dev_dbg(&dev_dax->dev, "%s: %s (%#lx - %#lx) size = %d\n", current->comm,
258 (vmf->flags & FAULT_FLAG_WRITE) ? "write" : "read",
259 vmf->vma->vm_start, vmf->vma->vm_end, pe_size);
261 id = dax_read_lock();
264 fault_size = PAGE_SIZE;
265 rc = __dev_dax_pte_fault(dev_dax, vmf, &pfn);
268 fault_size = PMD_SIZE;
269 rc = __dev_dax_pmd_fault(dev_dax, vmf, &pfn);
272 fault_size = PUD_SIZE;
273 rc = __dev_dax_pud_fault(dev_dax, vmf, &pfn);
276 rc = VM_FAULT_SIGBUS;
279 if (rc == VM_FAULT_NOPAGE) {
284 * In the device-dax case the only possibility for a
285 * VM_FAULT_NOPAGE result is when device-dax capacity is
286 * mapped. No need to consider the zero page, or racing
287 * conflicting mappings.
289 pgoff = linear_page_index(vmf->vma, vmf->address
290 & ~(fault_size - 1));
291 for (i = 0; i < fault_size / PAGE_SIZE; i++) {
294 page = pfn_to_page(pfn_t_to_pfn(pfn) + i);
297 page->mapping = filp->f_mapping;
298 page->index = pgoff + i;
306 static vm_fault_t dev_dax_fault(struct vm_fault *vmf)
308 return dev_dax_huge_fault(vmf, PE_SIZE_PTE);
311 static int dev_dax_split(struct vm_area_struct *vma, unsigned long addr)
313 struct file *filp = vma->vm_file;
314 struct dev_dax *dev_dax = filp->private_data;
315 struct dax_region *dax_region = dev_dax->region;
317 if (!IS_ALIGNED(addr, dax_region->align))
322 static unsigned long dev_dax_pagesize(struct vm_area_struct *vma)
324 struct file *filp = vma->vm_file;
325 struct dev_dax *dev_dax = filp->private_data;
326 struct dax_region *dax_region = dev_dax->region;
328 return dax_region->align;
331 static const struct vm_operations_struct dax_vm_ops = {
332 .fault = dev_dax_fault,
333 .huge_fault = dev_dax_huge_fault,
334 .split = dev_dax_split,
335 .pagesize = dev_dax_pagesize,
338 static int dax_mmap(struct file *filp, struct vm_area_struct *vma)
340 struct dev_dax *dev_dax = filp->private_data;
343 dev_dbg(&dev_dax->dev, "trace\n");
346 * We lock to check dax_dev liveness and will re-check at
349 id = dax_read_lock();
350 rc = check_vma(dev_dax, vma, __func__);
355 vma->vm_ops = &dax_vm_ops;
356 vma->vm_flags |= VM_HUGEPAGE;
360 /* return an unmapped area aligned to the dax region specified alignment */
361 static unsigned long dax_get_unmapped_area(struct file *filp,
362 unsigned long addr, unsigned long len, unsigned long pgoff,
365 unsigned long off, off_end, off_align, len_align, addr_align, align;
366 struct dev_dax *dev_dax = filp ? filp->private_data : NULL;
367 struct dax_region *dax_region;
369 if (!dev_dax || addr)
372 dax_region = dev_dax->region;
373 align = dax_region->align;
374 off = pgoff << PAGE_SHIFT;
376 off_align = round_up(off, align);
378 if ((off_end <= off_align) || ((off_end - off_align) < align))
381 len_align = len + align;
382 if ((off + len_align) < off)
385 addr_align = current->mm->get_unmapped_area(filp, addr, len_align,
387 if (!IS_ERR_VALUE(addr_align)) {
388 addr_align += (off - addr_align) & (align - 1);
392 return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
395 static const struct address_space_operations dev_dax_aops = {
396 .set_page_dirty = noop_set_page_dirty,
397 .invalidatepage = noop_invalidatepage,
400 static int dax_open(struct inode *inode, struct file *filp)
402 struct dax_device *dax_dev = inode_dax(inode);
403 struct inode *__dax_inode = dax_inode(dax_dev);
404 struct dev_dax *dev_dax = dax_get_private(dax_dev);
406 dev_dbg(&dev_dax->dev, "trace\n");
407 inode->i_mapping = __dax_inode->i_mapping;
408 inode->i_mapping->host = __dax_inode;
409 inode->i_mapping->a_ops = &dev_dax_aops;
410 filp->f_mapping = inode->i_mapping;
411 filp->f_wb_err = filemap_sample_wb_err(filp->f_mapping);
412 filp->private_data = dev_dax;
413 inode->i_flags = S_DAX;
418 static int dax_release(struct inode *inode, struct file *filp)
420 struct dev_dax *dev_dax = filp->private_data;
422 dev_dbg(&dev_dax->dev, "trace\n");
426 static const struct file_operations dax_fops = {
427 .llseek = noop_llseek,
428 .owner = THIS_MODULE,
430 .release = dax_release,
431 .get_unmapped_area = dax_get_unmapped_area,
433 .mmap_supported_flags = MAP_SYNC,
436 static void dev_dax_cdev_del(void *cdev)
441 static void dev_dax_kill(void *dev_dax)
443 kill_dev_dax(dev_dax);
446 int dev_dax_probe(struct device *dev)
448 struct dev_dax *dev_dax = to_dev_dax(dev);
449 struct dax_device *dax_dev = dev_dax->dax_dev;
450 struct resource *res = &dev_dax->region->res;
456 /* 1:1 map region resource range to device-dax instance range */
457 if (!devm_request_mem_region(dev, res->start, resource_size(res),
459 dev_warn(dev, "could not reserve region %pR\n", res);
463 init_completion(&dev_dax->cmp);
464 rc = percpu_ref_init(&dev_dax->ref, dev_dax_percpu_release, 0,
469 rc = devm_add_action_or_reset(dev, dev_dax_percpu_exit, &dev_dax->ref);
473 dev_dax->pgmap.ref = &dev_dax->ref;
474 dev_dax->pgmap.kill = dev_dax_percpu_kill;
475 addr = devm_memremap_pages(dev, &dev_dax->pgmap);
477 devm_remove_action(dev, dev_dax_percpu_exit, &dev_dax->ref);
478 percpu_ref_exit(&dev_dax->ref);
479 return PTR_ERR(addr);
482 inode = dax_inode(dax_dev);
483 cdev = inode->i_cdev;
484 cdev_init(cdev, &dax_fops);
486 /* for the CONFIG_DEV_DAX_PMEM_COMPAT case */
487 cdev->owner = dev->parent->driver->owner;
489 cdev->owner = dev->driver->owner;
490 cdev_set_parent(cdev, &dev->kobj);
491 rc = cdev_add(cdev, dev->devt, 1);
495 rc = devm_add_action_or_reset(dev, dev_dax_cdev_del, cdev);
500 return devm_add_action_or_reset(dev, dev_dax_kill, dev_dax);
502 EXPORT_SYMBOL_GPL(dev_dax_probe);
504 static int dev_dax_remove(struct device *dev)
506 /* all probe actions are unwound by devm */
510 static struct dax_device_driver device_dax_driver = {
512 .probe = dev_dax_probe,
513 .remove = dev_dax_remove,
518 static int __init dax_init(void)
520 return dax_driver_register(&device_dax_driver);
523 static void __exit dax_exit(void)
525 dax_driver_unregister(&device_dax_driver);
528 MODULE_AUTHOR("Intel Corporation");
529 MODULE_LICENSE("GPL v2");
530 module_init(dax_init);
531 module_exit(dax_exit);
532 MODULE_ALIAS_DAX_DEVICE(0);