1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2017-2018 Intel Corporation. All rights reserved. */
3 #include <linux/memremap.h>
4 #include <linux/device.h>
5 #include <linux/mutex.h>
6 #include <linux/list.h>
7 #include <linux/slab.h>
10 #include "dax-private.h"
13 static struct class *dax_class;
15 static DEFINE_MUTEX(dax_bus_lock);
17 #define DAX_NAME_LEN 30
19 struct list_head list;
20 char dev_name[DAX_NAME_LEN];
23 static int dax_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
26 * We only ever expect to handle device-dax instances, i.e. the
27 * @type argument to MODULE_ALIAS_DAX_DEVICE() is always zero
29 return add_uevent_var(env, "MODALIAS=" DAX_DEVICE_MODALIAS_FMT, 0);
32 static struct dax_device_driver *to_dax_drv(struct device_driver *drv)
34 return container_of(drv, struct dax_device_driver, drv);
37 static struct dax_id *__dax_match_id(struct dax_device_driver *dax_drv,
40 struct dax_id *dax_id;
42 lockdep_assert_held(&dax_bus_lock);
44 list_for_each_entry(dax_id, &dax_drv->ids, list)
45 if (sysfs_streq(dax_id->dev_name, dev_name))
50 static int dax_match_id(struct dax_device_driver *dax_drv, struct device *dev)
54 mutex_lock(&dax_bus_lock);
55 match = !!__dax_match_id(dax_drv, dev_name(dev));
56 mutex_unlock(&dax_bus_lock);
66 static ssize_t do_id_store(struct device_driver *drv, const char *buf,
67 size_t count, enum id_action action)
69 struct dax_device_driver *dax_drv = to_dax_drv(drv);
70 unsigned int region_id, id;
71 char devname[DAX_NAME_LEN];
72 struct dax_id *dax_id;
76 fields = sscanf(buf, "dax%d.%d", ®ion_id, &id);
79 sprintf(devname, "dax%d.%d", region_id, id);
80 if (!sysfs_streq(buf, devname))
83 mutex_lock(&dax_bus_lock);
84 dax_id = __dax_match_id(dax_drv, buf);
86 if (action == ID_ADD) {
87 dax_id = kzalloc(sizeof(*dax_id), GFP_KERNEL);
89 strncpy(dax_id->dev_name, buf, DAX_NAME_LEN);
90 list_add(&dax_id->list, &dax_drv->ids);
94 /* nothing to remove */;
95 } else if (action == ID_REMOVE) {
96 list_del(&dax_id->list);
99 /* dax_id already added */;
100 mutex_unlock(&dax_bus_lock);
104 if (action == ID_ADD)
105 rc = driver_attach(drv);
111 static ssize_t new_id_store(struct device_driver *drv, const char *buf,
114 return do_id_store(drv, buf, count, ID_ADD);
116 static DRIVER_ATTR_WO(new_id);
118 static ssize_t remove_id_store(struct device_driver *drv, const char *buf,
121 return do_id_store(drv, buf, count, ID_REMOVE);
123 static DRIVER_ATTR_WO(remove_id);
125 static struct attribute *dax_drv_attrs[] = {
126 &driver_attr_new_id.attr,
127 &driver_attr_remove_id.attr,
130 ATTRIBUTE_GROUPS(dax_drv);
132 static int dax_bus_match(struct device *dev, struct device_driver *drv);
134 static bool is_static(struct dax_region *dax_region)
136 return (dax_region->res.flags & IORESOURCE_DAX_STATIC) != 0;
139 static int dax_bus_probe(struct device *dev)
141 struct dax_device_driver *dax_drv = to_dax_drv(dev->driver);
142 struct dev_dax *dev_dax = to_dev_dax(dev);
143 struct dax_region *dax_region = dev_dax->region;
144 struct range *range = &dev_dax->range;
147 if (range_len(range) == 0 || dev_dax->id < 0)
150 rc = dax_drv->probe(dev_dax);
152 if (rc || is_static(dax_region))
156 * Track new seed creation only after successful probe of the
159 if (dax_region->seed == dev)
160 dax_region->seed = NULL;
165 static int dax_bus_remove(struct device *dev)
167 struct dax_device_driver *dax_drv = to_dax_drv(dev->driver);
168 struct dev_dax *dev_dax = to_dev_dax(dev);
170 return dax_drv->remove(dev_dax);
173 static struct bus_type dax_bus_type = {
175 .uevent = dax_bus_uevent,
176 .match = dax_bus_match,
177 .probe = dax_bus_probe,
178 .remove = dax_bus_remove,
179 .drv_groups = dax_drv_groups,
182 static int dax_bus_match(struct device *dev, struct device_driver *drv)
184 struct dax_device_driver *dax_drv = to_dax_drv(drv);
187 * All but the 'device-dax' driver, which has 'match_always'
188 * set, requires an exact id match.
190 if (dax_drv->match_always)
193 return dax_match_id(dax_drv, dev);
197 * Rely on the fact that drvdata is set before the attributes are
198 * registered, and that the attributes are unregistered before drvdata
199 * is cleared to assume that drvdata is always valid.
201 static ssize_t id_show(struct device *dev,
202 struct device_attribute *attr, char *buf)
204 struct dax_region *dax_region = dev_get_drvdata(dev);
206 return sprintf(buf, "%d\n", dax_region->id);
208 static DEVICE_ATTR_RO(id);
210 static ssize_t region_size_show(struct device *dev,
211 struct device_attribute *attr, char *buf)
213 struct dax_region *dax_region = dev_get_drvdata(dev);
215 return sprintf(buf, "%llu\n", (unsigned long long)
216 resource_size(&dax_region->res));
218 static struct device_attribute dev_attr_region_size = __ATTR(size, 0444,
219 region_size_show, NULL);
221 static ssize_t align_show(struct device *dev,
222 struct device_attribute *attr, char *buf)
224 struct dax_region *dax_region = dev_get_drvdata(dev);
226 return sprintf(buf, "%u\n", dax_region->align);
228 static DEVICE_ATTR_RO(align);
230 #define for_each_dax_region_resource(dax_region, res) \
231 for (res = (dax_region)->res.child; res; res = res->sibling)
233 static unsigned long long dax_region_avail_size(struct dax_region *dax_region)
235 resource_size_t size = resource_size(&dax_region->res);
236 struct resource *res;
238 device_lock_assert(dax_region->dev);
240 for_each_dax_region_resource(dax_region, res)
241 size -= resource_size(res);
245 static ssize_t available_size_show(struct device *dev,
246 struct device_attribute *attr, char *buf)
248 struct dax_region *dax_region = dev_get_drvdata(dev);
249 unsigned long long size;
252 size = dax_region_avail_size(dax_region);
255 return sprintf(buf, "%llu\n", size);
257 static DEVICE_ATTR_RO(available_size);
259 static ssize_t seed_show(struct device *dev,
260 struct device_attribute *attr, char *buf)
262 struct dax_region *dax_region = dev_get_drvdata(dev);
266 if (is_static(dax_region))
270 seed = dax_region->seed;
271 rc = sprintf(buf, "%s\n", seed ? dev_name(seed) : "");
276 static DEVICE_ATTR_RO(seed);
278 static ssize_t create_show(struct device *dev,
279 struct device_attribute *attr, char *buf)
281 struct dax_region *dax_region = dev_get_drvdata(dev);
282 struct device *youngest;
285 if (is_static(dax_region))
289 youngest = dax_region->youngest;
290 rc = sprintf(buf, "%s\n", youngest ? dev_name(youngest) : "");
296 static ssize_t create_store(struct device *dev, struct device_attribute *attr,
297 const char *buf, size_t len)
299 struct dax_region *dax_region = dev_get_drvdata(dev);
300 unsigned long long avail;
304 if (is_static(dax_region))
307 rc = kstrtoint(buf, 0, &val);
314 avail = dax_region_avail_size(dax_region);
318 struct dev_dax_data data = {
319 .dax_region = dax_region,
323 struct dev_dax *dev_dax = devm_create_dev_dax(&data);
326 rc = PTR_ERR(dev_dax);
329 * In support of crafting multiple new devices
330 * simultaneously multiple seeds can be created,
331 * but only the first one that has not been
332 * successfully bound is tracked as the region
335 if (!dax_region->seed)
336 dax_region->seed = &dev_dax->dev;
337 dax_region->youngest = &dev_dax->dev;
345 static DEVICE_ATTR_RW(create);
347 void kill_dev_dax(struct dev_dax *dev_dax)
349 struct dax_device *dax_dev = dev_dax->dax_dev;
350 struct inode *inode = dax_inode(dax_dev);
353 unmap_mapping_range(inode->i_mapping, 0, 0, 1);
355 EXPORT_SYMBOL_GPL(kill_dev_dax);
357 static void free_dev_dax_range(struct dev_dax *dev_dax)
359 struct dax_region *dax_region = dev_dax->region;
360 struct range *range = &dev_dax->range;
362 device_lock_assert(dax_region->dev);
363 if (range_len(range))
364 __release_region(&dax_region->res, range->start,
368 static void unregister_dev_dax(void *dev)
370 struct dev_dax *dev_dax = to_dev_dax(dev);
372 dev_dbg(dev, "%s\n", __func__);
374 kill_dev_dax(dev_dax);
375 free_dev_dax_range(dev_dax);
380 /* a return value >= 0 indicates this invocation invalidated the id */
381 static int __free_dev_dax_id(struct dev_dax *dev_dax)
383 struct dax_region *dax_region = dev_dax->region;
384 struct device *dev = &dev_dax->dev;
385 int rc = dev_dax->id;
387 device_lock_assert(dev);
389 if (is_static(dax_region) || dev_dax->id < 0)
391 ida_free(&dax_region->ida, dev_dax->id);
396 static int free_dev_dax_id(struct dev_dax *dev_dax)
398 struct device *dev = &dev_dax->dev;
402 rc = __free_dev_dax_id(dev_dax);
407 static ssize_t delete_store(struct device *dev, struct device_attribute *attr,
408 const char *buf, size_t len)
410 struct dax_region *dax_region = dev_get_drvdata(dev);
411 struct dev_dax *dev_dax;
412 struct device *victim;
416 if (is_static(dax_region))
419 victim = device_find_child_by_name(dax_region->dev, buf);
425 dev_dax = to_dev_dax(victim);
426 if (victim->driver || range_len(&dev_dax->range))
430 * Invalidate the device so it does not become active
431 * again, but always preserve device-id-0 so that
432 * /sys/bus/dax/ is guaranteed to be populated while any
433 * dax_region is registered.
435 if (dev_dax->id > 0) {
436 do_del = __free_dev_dax_id(dev_dax) >= 0;
438 if (dax_region->seed == victim)
439 dax_region->seed = NULL;
440 if (dax_region->youngest == victim)
441 dax_region->youngest = NULL;
445 device_unlock(victim);
447 /* won the race to invalidate the device, clean it up */
449 devm_release_action(dev, unregister_dev_dax, victim);
455 static DEVICE_ATTR_WO(delete);
457 static umode_t dax_region_visible(struct kobject *kobj, struct attribute *a,
460 struct device *dev = container_of(kobj, struct device, kobj);
461 struct dax_region *dax_region = dev_get_drvdata(dev);
463 if (is_static(dax_region))
464 if (a == &dev_attr_available_size.attr
465 || a == &dev_attr_create.attr
466 || a == &dev_attr_seed.attr
467 || a == &dev_attr_delete.attr)
472 static struct attribute *dax_region_attributes[] = {
473 &dev_attr_available_size.attr,
474 &dev_attr_region_size.attr,
475 &dev_attr_align.attr,
476 &dev_attr_create.attr,
478 &dev_attr_delete.attr,
483 static const struct attribute_group dax_region_attribute_group = {
484 .name = "dax_region",
485 .attrs = dax_region_attributes,
486 .is_visible = dax_region_visible,
489 static const struct attribute_group *dax_region_attribute_groups[] = {
490 &dax_region_attribute_group,
494 static void dax_region_free(struct kref *kref)
496 struct dax_region *dax_region;
498 dax_region = container_of(kref, struct dax_region, kref);
502 void dax_region_put(struct dax_region *dax_region)
504 kref_put(&dax_region->kref, dax_region_free);
506 EXPORT_SYMBOL_GPL(dax_region_put);
508 static void dax_region_unregister(void *region)
510 struct dax_region *dax_region = region;
512 sysfs_remove_groups(&dax_region->dev->kobj,
513 dax_region_attribute_groups);
514 dax_region_put(dax_region);
517 struct dax_region *alloc_dax_region(struct device *parent, int region_id,
518 struct resource *res, int target_node, unsigned int align,
521 struct dax_region *dax_region;
524 * The DAX core assumes that it can store its private data in
525 * parent->driver_data. This WARN is a reminder / safeguard for
526 * developers of device-dax drivers.
528 if (dev_get_drvdata(parent)) {
529 dev_WARN(parent, "dax core failed to setup private data\n");
533 if (!IS_ALIGNED(res->start, align)
534 || !IS_ALIGNED(resource_size(res), align))
537 dax_region = kzalloc(sizeof(*dax_region), GFP_KERNEL);
541 dev_set_drvdata(parent, dax_region);
542 kref_init(&dax_region->kref);
543 dax_region->id = region_id;
544 dax_region->align = align;
545 dax_region->dev = parent;
546 dax_region->target_node = target_node;
547 ida_init(&dax_region->ida);
548 dax_region->res = (struct resource) {
551 .flags = IORESOURCE_MEM | flags,
554 if (sysfs_create_groups(&parent->kobj, dax_region_attribute_groups)) {
559 kref_get(&dax_region->kref);
560 if (devm_add_action_or_reset(parent, dax_region_unregister, dax_region))
564 EXPORT_SYMBOL_GPL(alloc_dax_region);
566 static int alloc_dev_dax_range(struct dev_dax *dev_dax, u64 start,
567 resource_size_t size)
569 struct dax_region *dax_region = dev_dax->region;
570 struct resource *res = &dax_region->res;
571 struct device *dev = &dev_dax->dev;
572 struct resource *alloc;
574 device_lock_assert(dax_region->dev);
576 /* handle the seed alloc special case */
578 dev_dax->range = (struct range) {
580 .end = res->start - 1,
585 alloc = __request_region(res, start, size, dev_name(dev), 0);
589 dev_dax->range = (struct range) {
590 .start = alloc->start,
597 static int adjust_dev_dax_range(struct dev_dax *dev_dax, struct resource *res, resource_size_t size)
599 struct dax_region *dax_region = dev_dax->region;
600 struct range *range = &dev_dax->range;
603 device_lock_assert(dax_region->dev);
606 rc = adjust_resource(res, range->start, size);
608 __release_region(&dax_region->res, range->start, range_len(range));
612 dev_dax->range = (struct range) {
613 .start = range->start,
614 .end = range->start + size - 1,
620 static ssize_t size_show(struct device *dev,
621 struct device_attribute *attr, char *buf)
623 struct dev_dax *dev_dax = to_dev_dax(dev);
624 unsigned long long size = range_len(&dev_dax->range);
626 return sprintf(buf, "%llu\n", size);
629 static bool alloc_is_aligned(struct dax_region *dax_region,
630 resource_size_t size)
633 * The minimum mapping granularity for a device instance is a
634 * single subsection, unless the arch says otherwise.
636 return IS_ALIGNED(size, max_t(unsigned long, dax_region->align,
637 memremap_compat_align()));
640 static int dev_dax_shrink(struct dev_dax *dev_dax, resource_size_t size)
642 struct dax_region *dax_region = dev_dax->region;
643 struct range *range = &dev_dax->range;
644 struct resource *res, *adjust = NULL;
645 struct device *dev = &dev_dax->dev;
647 for_each_dax_region_resource(dax_region, res)
648 if (strcmp(res->name, dev_name(dev)) == 0
649 && res->start == range->start) {
654 if (dev_WARN_ONCE(dev, !adjust, "failed to find matching resource\n"))
656 return adjust_dev_dax_range(dev_dax, adjust, size);
659 static ssize_t dev_dax_resize(struct dax_region *dax_region,
660 struct dev_dax *dev_dax, resource_size_t size)
662 resource_size_t avail = dax_region_avail_size(dax_region), to_alloc;
663 resource_size_t dev_size = range_len(&dev_dax->range);
664 struct resource *region_res = &dax_region->res;
665 struct device *dev = &dev_dax->dev;
666 const char *name = dev_name(dev);
667 struct resource *res, *first;
671 if (size == dev_size)
673 if (size > dev_size && size - dev_size > avail)
676 return dev_dax_shrink(dev_dax, size);
678 to_alloc = size - dev_size;
679 if (dev_WARN_ONCE(dev, !alloc_is_aligned(dax_region, to_alloc),
680 "resize of %pa misaligned\n", &to_alloc))
684 * Expand the device into the unused portion of the region. This
685 * may involve adjusting the end of an existing resource, or
686 * allocating a new resource.
688 first = region_res->child;
690 return alloc_dev_dax_range(dev_dax, dax_region->res.start, to_alloc);
691 for (res = first; to_alloc && res; res = res->sibling) {
692 struct resource *next = res->sibling;
693 resource_size_t free;
695 /* space at the beginning of the region */
697 if (res == first && res->start > dax_region->res.start)
698 free = res->start - dax_region->res.start;
699 if (free >= to_alloc && dev_size == 0)
700 return alloc_dev_dax_range(dev_dax, dax_region->res.start, to_alloc);
703 /* space between allocations */
704 if (next && next->start > res->end + 1)
705 free = next->start - res->end + 1;
707 /* space at the end of the region */
708 if (free < to_alloc && !next && res->end < region_res->end)
709 free = region_res->end - res->end;
711 if (free >= to_alloc && strcmp(name, res->name) == 0)
712 return adjust_dev_dax_range(dev_dax, res, resource_size(res) + to_alloc);
713 else if (free >= to_alloc && dev_size == 0)
714 return alloc_dev_dax_range(dev_dax, res->end + 1, to_alloc);
719 static ssize_t size_store(struct device *dev, struct device_attribute *attr,
720 const char *buf, size_t len)
723 unsigned long long val;
724 struct dev_dax *dev_dax = to_dev_dax(dev);
725 struct dax_region *dax_region = dev_dax->region;
727 rc = kstrtoull(buf, 0, &val);
731 if (!alloc_is_aligned(dax_region, val)) {
732 dev_dbg(dev, "%s: size: %lld misaligned\n", __func__, val);
736 device_lock(dax_region->dev);
737 if (!dax_region->dev->driver) {
738 device_unlock(dax_region->dev);
742 rc = dev_dax_resize(dax_region, dev_dax, val);
744 device_unlock(dax_region->dev);
746 return rc == 0 ? len : rc;
748 static DEVICE_ATTR_RW(size);
750 static int dev_dax_target_node(struct dev_dax *dev_dax)
752 struct dax_region *dax_region = dev_dax->region;
754 return dax_region->target_node;
757 static ssize_t target_node_show(struct device *dev,
758 struct device_attribute *attr, char *buf)
760 struct dev_dax *dev_dax = to_dev_dax(dev);
762 return sprintf(buf, "%d\n", dev_dax_target_node(dev_dax));
764 static DEVICE_ATTR_RO(target_node);
766 static ssize_t resource_show(struct device *dev,
767 struct device_attribute *attr, char *buf)
769 struct dev_dax *dev_dax = to_dev_dax(dev);
771 return sprintf(buf, "%#llx\n", dev_dax->range.start);
773 static DEVICE_ATTR(resource, 0400, resource_show, NULL);
775 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
779 * We only ever expect to handle device-dax instances, i.e. the
780 * @type argument to MODULE_ALIAS_DAX_DEVICE() is always zero
782 return sprintf(buf, DAX_DEVICE_MODALIAS_FMT "\n", 0);
784 static DEVICE_ATTR_RO(modalias);
786 static ssize_t numa_node_show(struct device *dev,
787 struct device_attribute *attr, char *buf)
789 return sprintf(buf, "%d\n", dev_to_node(dev));
791 static DEVICE_ATTR_RO(numa_node);
793 static umode_t dev_dax_visible(struct kobject *kobj, struct attribute *a, int n)
795 struct device *dev = container_of(kobj, struct device, kobj);
796 struct dev_dax *dev_dax = to_dev_dax(dev);
797 struct dax_region *dax_region = dev_dax->region;
799 if (a == &dev_attr_target_node.attr && dev_dax_target_node(dev_dax) < 0)
801 if (a == &dev_attr_numa_node.attr && !IS_ENABLED(CONFIG_NUMA))
803 if (a == &dev_attr_size.attr && is_static(dax_region))
808 static struct attribute *dev_dax_attributes[] = {
809 &dev_attr_modalias.attr,
811 &dev_attr_target_node.attr,
812 &dev_attr_resource.attr,
813 &dev_attr_numa_node.attr,
817 static const struct attribute_group dev_dax_attribute_group = {
818 .attrs = dev_dax_attributes,
819 .is_visible = dev_dax_visible,
822 static const struct attribute_group *dax_attribute_groups[] = {
823 &dev_dax_attribute_group,
827 static void dev_dax_release(struct device *dev)
829 struct dev_dax *dev_dax = to_dev_dax(dev);
830 struct dax_region *dax_region = dev_dax->region;
831 struct dax_device *dax_dev = dev_dax->dax_dev;
834 free_dev_dax_id(dev_dax);
835 dax_region_put(dax_region);
836 kfree(dev_dax->pgmap);
840 static const struct device_type dev_dax_type = {
841 .release = dev_dax_release,
842 .groups = dax_attribute_groups,
845 struct dev_dax *devm_create_dev_dax(struct dev_dax_data *data)
847 struct dax_region *dax_region = data->dax_region;
848 struct device *parent = dax_region->dev;
849 struct dax_device *dax_dev;
850 struct dev_dax *dev_dax;
855 dev_dax = kzalloc(sizeof(*dev_dax), GFP_KERNEL);
857 return ERR_PTR(-ENOMEM);
859 if (is_static(dax_region)) {
860 if (dev_WARN_ONCE(parent, data->id < 0,
861 "dynamic id specified to static region\n")) {
866 dev_dax->id = data->id;
868 if (dev_WARN_ONCE(parent, data->id >= 0,
869 "static id specified to dynamic region\n")) {
874 rc = ida_alloc(&dax_region->ida, GFP_KERNEL);
880 dev_dax->region = dax_region;
882 device_initialize(dev);
883 dev_set_name(dev, "dax%d.%d", dax_region->id, dev_dax->id);
885 rc = alloc_dev_dax_range(dev_dax, dax_region->res.start, data->size);
890 dev_WARN_ONCE(parent, !is_static(dax_region),
891 "custom dev_pagemap requires a static dax_region\n");
893 dev_dax->pgmap = kmemdup(data->pgmap,
894 sizeof(struct dev_pagemap), GFP_KERNEL);
895 if (!dev_dax->pgmap) {
902 * No 'host' or dax_operations since there is no access to this
903 * device outside of mmap of the resulting character device.
905 dax_dev = alloc_dax(dev_dax, NULL, NULL, DAXDEV_F_SYNC);
906 if (IS_ERR(dax_dev)) {
907 rc = PTR_ERR(dax_dev);
911 /* a device_dax instance is dead while the driver is not attached */
914 /* from here on we're committed to teardown via dev_dax_release() */
915 dev_dax->dax_dev = dax_dev;
916 dev_dax->target_node = dax_region->target_node;
917 kref_get(&dax_region->kref);
919 inode = dax_inode(dax_dev);
920 dev->devt = inode->i_rdev;
921 if (data->subsys == DEV_DAX_BUS)
922 dev->bus = &dax_bus_type;
924 dev->class = dax_class;
925 dev->parent = parent;
926 dev->type = &dev_dax_type;
928 rc = device_add(dev);
930 kill_dev_dax(dev_dax);
935 rc = devm_add_action_or_reset(dax_region->dev, unregister_dev_dax, dev);
942 kfree(dev_dax->pgmap);
944 free_dev_dax_range(dev_dax);
946 free_dev_dax_id(dev_dax);
952 EXPORT_SYMBOL_GPL(devm_create_dev_dax);
954 static int match_always_count;
956 int __dax_driver_register(struct dax_device_driver *dax_drv,
957 struct module *module, const char *mod_name)
959 struct device_driver *drv = &dax_drv->drv;
962 INIT_LIST_HEAD(&dax_drv->ids);
964 drv->name = mod_name;
965 drv->mod_name = mod_name;
966 drv->bus = &dax_bus_type;
968 /* there can only be one default driver */
969 mutex_lock(&dax_bus_lock);
970 match_always_count += dax_drv->match_always;
971 if (match_always_count > 1) {
972 match_always_count--;
976 mutex_unlock(&dax_bus_lock);
979 return driver_register(drv);
981 EXPORT_SYMBOL_GPL(__dax_driver_register);
983 void dax_driver_unregister(struct dax_device_driver *dax_drv)
985 struct device_driver *drv = &dax_drv->drv;
986 struct dax_id *dax_id, *_id;
988 mutex_lock(&dax_bus_lock);
989 match_always_count -= dax_drv->match_always;
990 list_for_each_entry_safe(dax_id, _id, &dax_drv->ids, list) {
991 list_del(&dax_id->list);
994 mutex_unlock(&dax_bus_lock);
995 driver_unregister(drv);
997 EXPORT_SYMBOL_GPL(dax_driver_unregister);
999 int __init dax_bus_init(void)
1003 if (IS_ENABLED(CONFIG_DEV_DAX_PMEM_COMPAT)) {
1004 dax_class = class_create(THIS_MODULE, "dax");
1005 if (IS_ERR(dax_class))
1006 return PTR_ERR(dax_class);
1009 rc = bus_register(&dax_bus_type);
1011 class_destroy(dax_class);
1015 void __exit dax_bus_exit(void)
1017 bus_unregister(&dax_bus_type);
1018 class_destroy(dax_class);