1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright(c) 2017 Intel Corporation. All rights reserved.
5 #include <linux/pagemap.h>
6 #include <linux/module.h>
7 #include <linux/mount.h>
8 #include <linux/pseudo_fs.h>
9 #include <linux/magic.h>
10 #include <linux/genhd.h>
11 #include <linux/pfn_t.h>
12 #include <linux/cdev.h>
13 #include <linux/hash.h>
14 #include <linux/slab.h>
15 #include <linux/uio.h>
16 #include <linux/dax.h>
18 #include "dax-private.h"
20 static dev_t dax_devt;
21 DEFINE_STATIC_SRCU(dax_srcu);
22 static struct vfsmount *dax_mnt;
23 static DEFINE_IDA(dax_minor_ida);
24 static struct kmem_cache *dax_cache __read_mostly;
25 static struct super_block *dax_superblock __read_mostly;
27 #define DAX_HASH_SIZE (PAGE_SIZE / sizeof(struct hlist_head))
28 static struct hlist_head dax_host_list[DAX_HASH_SIZE];
29 static DEFINE_SPINLOCK(dax_host_lock);
31 int dax_read_lock(void)
33 return srcu_read_lock(&dax_srcu);
35 EXPORT_SYMBOL_GPL(dax_read_lock);
37 void dax_read_unlock(int id)
39 srcu_read_unlock(&dax_srcu, id);
41 EXPORT_SYMBOL_GPL(dax_read_unlock);
44 #include <linux/blkdev.h>
46 int bdev_dax_pgoff(struct block_device *bdev, sector_t sector, size_t size,
49 phys_addr_t phys_off = (get_start_sect(bdev) + sector) * 512;
52 *pgoff = PHYS_PFN(phys_off);
53 if (phys_off % PAGE_SIZE || size % PAGE_SIZE)
57 EXPORT_SYMBOL(bdev_dax_pgoff);
59 #if IS_ENABLED(CONFIG_FS_DAX)
60 struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev)
62 if (!blk_queue_dax(bdev->bd_disk->queue))
64 return dax_get_by_host(bdev->bd_disk->disk_name);
66 EXPORT_SYMBOL_GPL(fs_dax_get_by_bdev);
69 bool __generic_fsdax_supported(struct dax_device *dax_dev,
70 struct block_device *bdev, int blocksize, sector_t start,
73 bool dax_enabled = false;
74 pgoff_t pgoff, pgoff_end;
75 char buf[BDEVNAME_SIZE];
76 void *kaddr, *end_kaddr;
82 if (blocksize != PAGE_SIZE) {
83 pr_info("%s: error: unsupported blocksize for dax\n",
88 err = bdev_dax_pgoff(bdev, start, PAGE_SIZE, &pgoff);
90 pr_info("%s: error: unaligned partition for dax\n",
95 last_page = PFN_DOWN((start + sectors - 1) * 512) * PAGE_SIZE / 512;
96 err = bdev_dax_pgoff(bdev, last_page, PAGE_SIZE, &pgoff_end);
98 pr_info("%s: error: unaligned partition for dax\n",
103 id = dax_read_lock();
104 len = dax_direct_access(dax_dev, pgoff, 1, &kaddr, &pfn);
105 len2 = dax_direct_access(dax_dev, pgoff_end, 1, &end_kaddr, &end_pfn);
107 if (len < 1 || len2 < 1) {
108 pr_info("%s: error: dax access failed (%ld)\n",
109 bdevname(bdev, buf), len < 1 ? len : len2);
114 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED) && pfn_t_special(pfn)) {
116 * An arch that has enabled the pmem api should also
117 * have its drivers support pfn_t_devmap()
119 * This is a developer warning and should not trigger in
120 * production. dax_flush() will crash since it depends
121 * on being able to do (page_address(pfn_to_page())).
123 WARN_ON(IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API));
125 } else if (pfn_t_devmap(pfn) && pfn_t_devmap(end_pfn)) {
126 struct dev_pagemap *pgmap, *end_pgmap;
128 pgmap = get_dev_pagemap(pfn_t_to_pfn(pfn), NULL);
129 end_pgmap = get_dev_pagemap(pfn_t_to_pfn(end_pfn), NULL);
130 if (pgmap && pgmap == end_pgmap && pgmap->type == MEMORY_DEVICE_FS_DAX
131 && pfn_t_to_page(pfn)->pgmap == pgmap
132 && pfn_t_to_page(end_pfn)->pgmap == pgmap
133 && pfn_t_to_pfn(pfn) == PHYS_PFN(__pa(kaddr))
134 && pfn_t_to_pfn(end_pfn) == PHYS_PFN(__pa(end_kaddr)))
136 put_dev_pagemap(pgmap);
137 put_dev_pagemap(end_pgmap);
143 pr_info("%s: error: dax support not enabled\n",
144 bdevname(bdev, buf));
149 EXPORT_SYMBOL_GPL(__generic_fsdax_supported);
152 * __bdev_dax_supported() - Check if the device supports dax for filesystem
153 * @bdev: block device to check
154 * @blocksize: The block size of the device
156 * This is a library function for filesystems to check if the block device
157 * can be mounted with dax option.
159 * Return: true if supported, false if unsupported
161 bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
163 struct dax_device *dax_dev;
164 struct request_queue *q;
165 char buf[BDEVNAME_SIZE];
169 q = bdev_get_queue(bdev);
170 if (!q || !blk_queue_dax(q)) {
171 pr_debug("%s: error: request queue doesn't support dax\n",
172 bdevname(bdev, buf));
176 dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
178 pr_debug("%s: error: device does not support dax\n",
179 bdevname(bdev, buf));
183 id = dax_read_lock();
184 ret = dax_supported(dax_dev, bdev, blocksize, 0,
185 i_size_read(bdev->bd_inode) / 512);
192 EXPORT_SYMBOL_GPL(__bdev_dax_supported);
195 enum dax_device_flags {
196 /* !alive + rcu grace period == no new operations / mappings */
198 /* gate whether dax_flush() calls the low level flush routine */
200 /* flag to check if device supports synchronous flush */
205 * struct dax_device - anchor object for dax services
207 * @cdev: optional character interface for "device dax"
208 * @host: optional name for lookups where the device path is not available
209 * @private: dax driver private data
210 * @flags: state and boolean properties
213 struct hlist_node list;
219 const struct dax_operations *ops;
222 static ssize_t write_cache_show(struct device *dev,
223 struct device_attribute *attr, char *buf)
225 struct dax_device *dax_dev = dax_get_by_host(dev_name(dev));
228 WARN_ON_ONCE(!dax_dev);
232 rc = sprintf(buf, "%d\n", !!dax_write_cache_enabled(dax_dev));
237 static ssize_t write_cache_store(struct device *dev,
238 struct device_attribute *attr, const char *buf, size_t len)
241 int rc = strtobool(buf, &write_cache);
242 struct dax_device *dax_dev = dax_get_by_host(dev_name(dev));
244 WARN_ON_ONCE(!dax_dev);
251 dax_write_cache(dax_dev, write_cache);
256 static DEVICE_ATTR_RW(write_cache);
258 static umode_t dax_visible(struct kobject *kobj, struct attribute *a, int n)
260 struct device *dev = container_of(kobj, typeof(*dev), kobj);
261 struct dax_device *dax_dev = dax_get_by_host(dev_name(dev));
263 WARN_ON_ONCE(!dax_dev);
267 #ifndef CONFIG_ARCH_HAS_PMEM_API
268 if (a == &dev_attr_write_cache.attr)
274 static struct attribute *dax_attributes[] = {
275 &dev_attr_write_cache.attr,
279 struct attribute_group dax_attribute_group = {
281 .attrs = dax_attributes,
282 .is_visible = dax_visible,
284 EXPORT_SYMBOL_GPL(dax_attribute_group);
287 * dax_direct_access() - translate a device pgoff to an absolute pfn
288 * @dax_dev: a dax_device instance representing the logical memory range
289 * @pgoff: offset in pages from the start of the device to translate
290 * @nr_pages: number of consecutive pages caller can handle relative to @pfn
291 * @kaddr: output parameter that returns a virtual address mapping of pfn
292 * @pfn: output parameter that returns an absolute pfn translation of @pgoff
294 * Return: negative errno if an error occurs, otherwise the number of
295 * pages accessible at the device relative @pgoff.
297 long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
298 void **kaddr, pfn_t *pfn)
305 if (!dax_alive(dax_dev))
311 avail = dax_dev->ops->direct_access(dax_dev, pgoff, nr_pages,
315 return min(avail, nr_pages);
317 EXPORT_SYMBOL_GPL(dax_direct_access);
319 bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev,
320 int blocksize, sector_t start, sector_t len)
322 if (!dax_alive(dax_dev))
325 return dax_dev->ops->dax_supported(dax_dev, bdev, blocksize, start, len);
328 size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
329 size_t bytes, struct iov_iter *i)
331 if (!dax_alive(dax_dev))
334 return dax_dev->ops->copy_from_iter(dax_dev, pgoff, addr, bytes, i);
336 EXPORT_SYMBOL_GPL(dax_copy_from_iter);
338 size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
339 size_t bytes, struct iov_iter *i)
341 if (!dax_alive(dax_dev))
344 return dax_dev->ops->copy_to_iter(dax_dev, pgoff, addr, bytes, i);
346 EXPORT_SYMBOL_GPL(dax_copy_to_iter);
348 int dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
351 if (!dax_alive(dax_dev))
354 * There are no callers that want to zero more than one page as of now.
355 * Once users are there, this check can be removed after the
356 * device mapper code has been updated to split ranges across targets.
361 return dax_dev->ops->zero_page_range(dax_dev, pgoff, nr_pages);
363 EXPORT_SYMBOL_GPL(dax_zero_page_range);
365 #ifdef CONFIG_ARCH_HAS_PMEM_API
366 void arch_wb_cache_pmem(void *addr, size_t size);
367 void dax_flush(struct dax_device *dax_dev, void *addr, size_t size)
369 if (unlikely(!dax_write_cache_enabled(dax_dev)))
372 arch_wb_cache_pmem(addr, size);
375 void dax_flush(struct dax_device *dax_dev, void *addr, size_t size)
379 EXPORT_SYMBOL_GPL(dax_flush);
381 void dax_write_cache(struct dax_device *dax_dev, bool wc)
384 set_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
386 clear_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
388 EXPORT_SYMBOL_GPL(dax_write_cache);
390 bool dax_write_cache_enabled(struct dax_device *dax_dev)
392 return test_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
394 EXPORT_SYMBOL_GPL(dax_write_cache_enabled);
396 bool __dax_synchronous(struct dax_device *dax_dev)
398 return test_bit(DAXDEV_SYNC, &dax_dev->flags);
400 EXPORT_SYMBOL_GPL(__dax_synchronous);
402 void __set_dax_synchronous(struct dax_device *dax_dev)
404 set_bit(DAXDEV_SYNC, &dax_dev->flags);
406 EXPORT_SYMBOL_GPL(__set_dax_synchronous);
408 bool dax_alive(struct dax_device *dax_dev)
410 lockdep_assert_held(&dax_srcu);
411 return test_bit(DAXDEV_ALIVE, &dax_dev->flags);
413 EXPORT_SYMBOL_GPL(dax_alive);
415 static int dax_host_hash(const char *host)
417 return hashlen_hash(hashlen_string("DAX", host)) % DAX_HASH_SIZE;
421 * Note, rcu is not protecting the liveness of dax_dev, rcu is ensuring
422 * that any fault handlers or operations that might have seen
423 * dax_alive(), have completed. Any operations that start after
424 * synchronize_srcu() has run will abort upon seeing !dax_alive().
426 void kill_dax(struct dax_device *dax_dev)
431 clear_bit(DAXDEV_ALIVE, &dax_dev->flags);
433 synchronize_srcu(&dax_srcu);
435 spin_lock(&dax_host_lock);
436 hlist_del_init(&dax_dev->list);
437 spin_unlock(&dax_host_lock);
439 EXPORT_SYMBOL_GPL(kill_dax);
441 void run_dax(struct dax_device *dax_dev)
443 set_bit(DAXDEV_ALIVE, &dax_dev->flags);
445 EXPORT_SYMBOL_GPL(run_dax);
447 static struct inode *dax_alloc_inode(struct super_block *sb)
449 struct dax_device *dax_dev;
452 dax_dev = kmem_cache_alloc(dax_cache, GFP_KERNEL);
456 inode = &dax_dev->inode;
461 static struct dax_device *to_dax_dev(struct inode *inode)
463 return container_of(inode, struct dax_device, inode);
466 static void dax_free_inode(struct inode *inode)
468 struct dax_device *dax_dev = to_dax_dev(inode);
469 kfree(dax_dev->host);
470 dax_dev->host = NULL;
472 ida_simple_remove(&dax_minor_ida, MINOR(inode->i_rdev));
473 kmem_cache_free(dax_cache, dax_dev);
476 static void dax_destroy_inode(struct inode *inode)
478 struct dax_device *dax_dev = to_dax_dev(inode);
479 WARN_ONCE(test_bit(DAXDEV_ALIVE, &dax_dev->flags),
480 "kill_dax() must be called before final iput()\n");
483 static const struct super_operations dax_sops = {
484 .statfs = simple_statfs,
485 .alloc_inode = dax_alloc_inode,
486 .destroy_inode = dax_destroy_inode,
487 .free_inode = dax_free_inode,
488 .drop_inode = generic_delete_inode,
491 static int dax_init_fs_context(struct fs_context *fc)
493 struct pseudo_fs_context *ctx = init_pseudo(fc, DAXFS_MAGIC);
496 ctx->ops = &dax_sops;
500 static struct file_system_type dax_fs_type = {
502 .init_fs_context = dax_init_fs_context,
503 .kill_sb = kill_anon_super,
506 static int dax_test(struct inode *inode, void *data)
508 dev_t devt = *(dev_t *) data;
510 return inode->i_rdev == devt;
513 static int dax_set(struct inode *inode, void *data)
515 dev_t devt = *(dev_t *) data;
517 inode->i_rdev = devt;
521 static struct dax_device *dax_dev_get(dev_t devt)
523 struct dax_device *dax_dev;
526 inode = iget5_locked(dax_superblock, hash_32(devt + DAXFS_MAGIC, 31),
527 dax_test, dax_set, &devt);
532 dax_dev = to_dax_dev(inode);
533 if (inode->i_state & I_NEW) {
534 set_bit(DAXDEV_ALIVE, &dax_dev->flags);
535 inode->i_cdev = &dax_dev->cdev;
536 inode->i_mode = S_IFCHR;
537 inode->i_flags = S_DAX;
538 mapping_set_gfp_mask(&inode->i_data, GFP_USER);
539 unlock_new_inode(inode);
545 static void dax_add_host(struct dax_device *dax_dev, const char *host)
550 * Unconditionally init dax_dev since it's coming from a
551 * non-zeroed slab cache
553 INIT_HLIST_NODE(&dax_dev->list);
554 dax_dev->host = host;
558 hash = dax_host_hash(host);
559 spin_lock(&dax_host_lock);
560 hlist_add_head(&dax_dev->list, &dax_host_list[hash]);
561 spin_unlock(&dax_host_lock);
564 struct dax_device *alloc_dax(void *private, const char *__host,
565 const struct dax_operations *ops, unsigned long flags)
567 struct dax_device *dax_dev;
572 if (ops && !ops->zero_page_range) {
573 pr_debug("%s: error: device does not provide dax"
574 " operation zero_page_range()\n",
575 __host ? __host : "Unknown");
576 return ERR_PTR(-EINVAL);
579 host = kstrdup(__host, GFP_KERNEL);
581 return ERR_PTR(-ENOMEM);
583 minor = ida_simple_get(&dax_minor_ida, 0, MINORMASK+1, GFP_KERNEL);
587 devt = MKDEV(MAJOR(dax_devt), minor);
588 dax_dev = dax_dev_get(devt);
592 dax_add_host(dax_dev, host);
594 dax_dev->private = private;
595 if (flags & DAXDEV_F_SYNC)
596 set_dax_synchronous(dax_dev);
601 ida_simple_remove(&dax_minor_ida, minor);
604 return ERR_PTR(-ENOMEM);
606 EXPORT_SYMBOL_GPL(alloc_dax);
608 void put_dax(struct dax_device *dax_dev)
612 iput(&dax_dev->inode);
614 EXPORT_SYMBOL_GPL(put_dax);
617 * dax_get_by_host() - temporary lookup mechanism for filesystem-dax
618 * @host: alternate name for the device registered by a dax driver
620 struct dax_device *dax_get_by_host(const char *host)
622 struct dax_device *dax_dev, *found = NULL;
628 hash = dax_host_hash(host);
630 id = dax_read_lock();
631 spin_lock(&dax_host_lock);
632 hlist_for_each_entry(dax_dev, &dax_host_list[hash], list) {
633 if (!dax_alive(dax_dev)
634 || strcmp(host, dax_dev->host) != 0)
637 if (igrab(&dax_dev->inode))
641 spin_unlock(&dax_host_lock);
646 EXPORT_SYMBOL_GPL(dax_get_by_host);
649 * inode_dax: convert a public inode into its dax_dev
650 * @inode: An inode with i_cdev pointing to a dax_dev
652 * Note this is not equivalent to to_dax_dev() which is for private
653 * internal use where we know the inode filesystem type == dax_fs_type.
655 struct dax_device *inode_dax(struct inode *inode)
657 struct cdev *cdev = inode->i_cdev;
659 return container_of(cdev, struct dax_device, cdev);
661 EXPORT_SYMBOL_GPL(inode_dax);
663 struct inode *dax_inode(struct dax_device *dax_dev)
665 return &dax_dev->inode;
667 EXPORT_SYMBOL_GPL(dax_inode);
669 void *dax_get_private(struct dax_device *dax_dev)
671 if (!test_bit(DAXDEV_ALIVE, &dax_dev->flags))
673 return dax_dev->private;
675 EXPORT_SYMBOL_GPL(dax_get_private);
677 static void init_once(void *_dax_dev)
679 struct dax_device *dax_dev = _dax_dev;
680 struct inode *inode = &dax_dev->inode;
682 memset(dax_dev, 0, sizeof(*dax_dev));
683 inode_init_once(inode);
686 static int dax_fs_init(void)
690 dax_cache = kmem_cache_create("dax_cache", sizeof(struct dax_device), 0,
691 (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
692 SLAB_MEM_SPREAD|SLAB_ACCOUNT),
697 dax_mnt = kern_mount(&dax_fs_type);
698 if (IS_ERR(dax_mnt)) {
699 rc = PTR_ERR(dax_mnt);
702 dax_superblock = dax_mnt->mnt_sb;
707 kmem_cache_destroy(dax_cache);
712 static void dax_fs_exit(void)
714 kern_unmount(dax_mnt);
715 kmem_cache_destroy(dax_cache);
718 static int __init dax_core_init(void)
726 rc = alloc_chrdev_region(&dax_devt, 0, MINORMASK+1, "dax");
736 unregister_chrdev_region(dax_devt, MINORMASK+1);
742 static void __exit dax_core_exit(void)
744 unregister_chrdev_region(dax_devt, MINORMASK+1);
745 ida_destroy(&dax_minor_ida);
749 MODULE_AUTHOR("Intel Corporation");
750 MODULE_LICENSE("GPL v2");
751 subsys_initcall(dax_core_init);
752 module_exit(dax_core_exit);