return (dax_region->res.flags & IORESOURCE_DAX_STATIC) != 0;
}
+static u64 dev_dax_size(struct dev_dax *dev_dax)
+{
+ u64 size = 0;
+ int i;
+
+ device_lock_assert(&dev_dax->dev);
+
+ for (i = 0; i < dev_dax->nr_range; i++)
+ size += range_len(&dev_dax->ranges[i].range);
+
+ return size;
+}
+
static int dax_bus_probe(struct device *dev)
{
struct dax_device_driver *dax_drv = to_dax_drv(dev->driver);
struct dev_dax *dev_dax = to_dev_dax(dev);
struct dax_region *dax_region = dev_dax->region;
- struct range *range = &dev_dax->range;
int rc;
- if (range_len(range) == 0 || dev_dax->id < 0)
+ if (dev_dax_size(dev_dax) == 0 || dev_dax->id < 0)
return -ENXIO;
rc = dax_drv->probe(dev_dax);
static struct device_attribute dev_attr_region_size = __ATTR(size, 0444,
region_size_show, NULL);
-static ssize_t align_show(struct device *dev,
+static ssize_t region_align_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct dax_region *dax_region = dev_get_drvdata(dev);
return sprintf(buf, "%u\n", dax_region->align);
}
-static DEVICE_ATTR_RO(align);
+static struct device_attribute dev_attr_region_align =
+ __ATTR(align, 0400, region_align_show, NULL);
#define for_each_dax_region_resource(dax_region, res) \
for (res = (dax_region)->res.child; res; res = res->sibling)
}
EXPORT_SYMBOL_GPL(kill_dev_dax);
-static void free_dev_dax_range(struct dev_dax *dev_dax)
+static void free_dev_dax_ranges(struct dev_dax *dev_dax)
{
struct dax_region *dax_region = dev_dax->region;
- struct range *range = &dev_dax->range;
+ int i;
device_lock_assert(dax_region->dev);
- if (range_len(range))
+ for (i = 0; i < dev_dax->nr_range; i++) {
+ struct range *range = &dev_dax->ranges[i].range;
+
__release_region(&dax_region->res, range->start,
range_len(range));
+ }
+ dev_dax->nr_range = 0;
}
static void unregister_dev_dax(void *dev)
dev_dbg(dev, "%s\n", __func__);
kill_dev_dax(dev_dax);
- free_dev_dax_range(dev_dax);
+ free_dev_dax_ranges(dev_dax);
device_del(dev);
put_device(dev);
}
device_lock(dev);
device_lock(victim);
dev_dax = to_dev_dax(victim);
- if (victim->driver || range_len(&dev_dax->range))
+ if (victim->driver || dev_dax_size(dev_dax))
rc = -EBUSY;
else {
/*
static struct attribute *dax_region_attributes[] = {
&dev_attr_available_size.attr,
&dev_attr_region_size.attr,
- &dev_attr_align.attr,
+ &dev_attr_region_align.attr,
&dev_attr_create.attr,
&dev_attr_seed.attr,
&dev_attr_delete.attr,
}
struct dax_region *alloc_dax_region(struct device *parent, int region_id,
- struct resource *res, int target_node, unsigned int align,
+ struct range *range, int target_node, unsigned int align,
unsigned long flags)
{
struct dax_region *dax_region;
return NULL;
}
- if (!IS_ALIGNED(res->start, align)
- || !IS_ALIGNED(resource_size(res), align))
+ if (!IS_ALIGNED(range->start, align)
+ || !IS_ALIGNED(range_len(range), align))
return NULL;
dax_region = kzalloc(sizeof(*dax_region), GFP_KERNEL);
dax_region->target_node = target_node;
ida_init(&dax_region->ida);
dax_region->res = (struct resource) {
- .start = res->start,
- .end = res->end,
+ .start = range->start,
+ .end = range->end,
.flags = IORESOURCE_MEM | flags,
};
}
EXPORT_SYMBOL_GPL(alloc_dax_region);
+static void dax_mapping_release(struct device *dev)
+{
+ struct dax_mapping *mapping = to_dax_mapping(dev);
+ struct dev_dax *dev_dax = to_dev_dax(dev->parent);
+
+ ida_free(&dev_dax->ida, mapping->id);
+ kfree(mapping);
+}
+
+static void unregister_dax_mapping(void *data)
+{
+ struct device *dev = data;
+ struct dax_mapping *mapping = to_dax_mapping(dev);
+ struct dev_dax *dev_dax = to_dev_dax(dev->parent);
+ struct dax_region *dax_region = dev_dax->region;
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ device_lock_assert(dax_region->dev);
+
+ dev_dax->ranges[mapping->range_id].mapping = NULL;
+ mapping->range_id = -1;
+
+ device_del(dev);
+ put_device(dev);
+}
+
+static struct dev_dax_range *get_dax_range(struct device *dev)
+{
+ struct dax_mapping *mapping = to_dax_mapping(dev);
+ struct dev_dax *dev_dax = to_dev_dax(dev->parent);
+ struct dax_region *dax_region = dev_dax->region;
+
+ device_lock(dax_region->dev);
+ if (mapping->range_id < 0) {
+ device_unlock(dax_region->dev);
+ return NULL;
+ }
+
+ return &dev_dax->ranges[mapping->range_id];
+}
+
+static void put_dax_range(struct dev_dax_range *dax_range)
+{
+ struct dax_mapping *mapping = dax_range->mapping;
+ struct dev_dax *dev_dax = to_dev_dax(mapping->dev.parent);
+ struct dax_region *dax_region = dev_dax->region;
+
+ device_unlock(dax_region->dev);
+}
+
+static ssize_t start_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dev_dax_range *dax_range;
+ ssize_t rc;
+
+ dax_range = get_dax_range(dev);
+ if (!dax_range)
+ return -ENXIO;
+ rc = sprintf(buf, "%#llx\n", dax_range->range.start);
+ put_dax_range(dax_range);
+
+ return rc;
+}
+static DEVICE_ATTR(start, 0400, start_show, NULL);
+
+static ssize_t end_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dev_dax_range *dax_range;
+ ssize_t rc;
+
+ dax_range = get_dax_range(dev);
+ if (!dax_range)
+ return -ENXIO;
+ rc = sprintf(buf, "%#llx\n", dax_range->range.end);
+ put_dax_range(dax_range);
+
+ return rc;
+}
+static DEVICE_ATTR(end, 0400, end_show, NULL);
+
+static ssize_t pgoff_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dev_dax_range *dax_range;
+ ssize_t rc;
+
+ dax_range = get_dax_range(dev);
+ if (!dax_range)
+ return -ENXIO;
+ rc = sprintf(buf, "%#lx\n", dax_range->pgoff);
+ put_dax_range(dax_range);
+
+ return rc;
+}
+static DEVICE_ATTR(page_offset, 0400, pgoff_show, NULL);
+
+static struct attribute *dax_mapping_attributes[] = {
+ &dev_attr_start.attr,
+ &dev_attr_end.attr,
+ &dev_attr_page_offset.attr,
+ NULL,
+};
+
+static const struct attribute_group dax_mapping_attribute_group = {
+ .attrs = dax_mapping_attributes,
+};
+
+static const struct attribute_group *dax_mapping_attribute_groups[] = {
+ &dax_mapping_attribute_group,
+ NULL,
+};
+
+static struct device_type dax_mapping_type = {
+ .release = dax_mapping_release,
+ .groups = dax_mapping_attribute_groups,
+};
+
+static int devm_register_dax_mapping(struct dev_dax *dev_dax, int range_id)
+{
+ struct dax_region *dax_region = dev_dax->region;
+ struct dax_mapping *mapping;
+ struct device *dev;
+ int rc;
+
+ device_lock_assert(dax_region->dev);
+
+ if (dev_WARN_ONCE(&dev_dax->dev, !dax_region->dev->driver,
+ "region disabled\n"))
+ return -ENXIO;
+
+ mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
+ if (!mapping)
+ return -ENOMEM;
+ mapping->range_id = range_id;
+ mapping->id = ida_alloc(&dev_dax->ida, GFP_KERNEL);
+ if (mapping->id < 0) {
+ kfree(mapping);
+ return -ENOMEM;
+ }
+ dev_dax->ranges[range_id].mapping = mapping;
+ dev = &mapping->dev;
+ device_initialize(dev);
+ dev->parent = &dev_dax->dev;
+ dev->type = &dax_mapping_type;
+ dev_set_name(dev, "mapping%d", mapping->id);
+ rc = device_add(dev);
+ if (rc) {
+ put_device(dev);
+ return rc;
+ }
+
+ rc = devm_add_action_or_reset(dax_region->dev, unregister_dax_mapping,
+ dev);
+ if (rc)
+ return rc;
+ return 0;
+}
+
static int alloc_dev_dax_range(struct dev_dax *dev_dax, u64 start,
resource_size_t size)
{
struct dax_region *dax_region = dev_dax->region;
struct resource *res = &dax_region->res;
struct device *dev = &dev_dax->dev;
+ struct dev_dax_range *ranges;
+ unsigned long pgoff = 0;
struct resource *alloc;
+ int i, rc;
device_lock_assert(dax_region->dev);
/* handle the seed alloc special case */
if (!size) {
- dev_dax->range = (struct range) {
- .start = res->start,
- .end = res->start - 1,
- };
+ if (dev_WARN_ONCE(dev, dev_dax->nr_range,
+ "0-size allocation must be first\n"))
+ return -EBUSY;
+ /* nr_range == 0 is elsewhere special cased as 0-size device */
return 0;
}
+ ranges = krealloc(dev_dax->ranges, sizeof(*ranges)
+ * (dev_dax->nr_range + 1), GFP_KERNEL);
+ if (!ranges)
+ return -ENOMEM;
+
alloc = __request_region(res, start, size, dev_name(dev), 0);
- if (!alloc)
+ if (!alloc) {
+ /*
+ * If this was an empty set of ranges nothing else
+ * will release @ranges, so do it now.
+ */
+ if (!dev_dax->nr_range) {
+ kfree(ranges);
+ ranges = NULL;
+ }
+ dev_dax->ranges = ranges;
return -ENOMEM;
+ }
- dev_dax->range = (struct range) {
- .start = alloc->start,
- .end = alloc->end,
+ for (i = 0; i < dev_dax->nr_range; i++)
+ pgoff += PHYS_PFN(range_len(&ranges[i].range));
+ dev_dax->ranges = ranges;
+ ranges[dev_dax->nr_range++] = (struct dev_dax_range) {
+ .pgoff = pgoff,
+ .range = {
+ .start = alloc->start,
+ .end = alloc->end,
+ },
};
+ dev_dbg(dev, "alloc range[%d]: %pa:%pa\n", dev_dax->nr_range - 1,
+ &alloc->start, &alloc->end);
+ /*
+ * A dev_dax instance must be registered before mapping device
+ * children can be added. Defer to devm_create_dev_dax() to add
+ * the initial mapping device.
+ */
+ if (!device_is_registered(&dev_dax->dev))
+ return 0;
+
+ rc = devm_register_dax_mapping(dev_dax, dev_dax->nr_range - 1);
+ if (rc) {
+ dev_dbg(dev, "delete range[%d]: %pa:%pa\n", dev_dax->nr_range - 1,
+ &alloc->start, &alloc->end);
+ dev_dax->nr_range--;
+ __release_region(res, alloc->start, resource_size(alloc));
+ return rc;
+ }
+
return 0;
}
static int adjust_dev_dax_range(struct dev_dax *dev_dax, struct resource *res, resource_size_t size)
{
+ int last_range = dev_dax->nr_range - 1;
+ struct dev_dax_range *dax_range = &dev_dax->ranges[last_range];
struct dax_region *dax_region = dev_dax->region;
- struct range *range = &dev_dax->range;
- int rc = 0;
+ bool is_shrink = resource_size(res) > size;
+ struct range *range = &dax_range->range;
+ struct device *dev = &dev_dax->dev;
+ int rc;
device_lock_assert(dax_region->dev);
- if (size)
- rc = adjust_resource(res, range->start, size);
- else
- __release_region(&dax_region->res, range->start, range_len(range));
+ if (dev_WARN_ONCE(dev, !size, "deletion is handled by dev_dax_shrink\n"))
+ return -EINVAL;
+
+ rc = adjust_resource(res, range->start, size);
if (rc)
return rc;
- dev_dax->range = (struct range) {
+ *range = (struct range) {
.start = range->start,
.end = range->start + size - 1,
};
+ dev_dbg(dev, "%s range[%d]: %#llx:%#llx\n", is_shrink ? "shrink" : "extend",
+ last_range, (unsigned long long) range->start,
+ (unsigned long long) range->end);
+
return 0;
}
struct device_attribute *attr, char *buf)
{
struct dev_dax *dev_dax = to_dev_dax(dev);
- unsigned long long size = range_len(&dev_dax->range);
+ unsigned long long size;
+
+ device_lock(dev);
+ size = dev_dax_size(dev_dax);
+ device_unlock(dev);
return sprintf(buf, "%llu\n", size);
}
-static bool alloc_is_aligned(struct dax_region *dax_region,
- resource_size_t size)
+static bool alloc_is_aligned(struct dev_dax *dev_dax, resource_size_t size)
{
/*
* The minimum mapping granularity for a device instance is a
* single subsection, unless the arch says otherwise.
*/
- return IS_ALIGNED(size, max_t(unsigned long, dax_region->align,
- memremap_compat_align()));
+ return IS_ALIGNED(size, max_t(unsigned long, dev_dax->align, memremap_compat_align()));
}
static int dev_dax_shrink(struct dev_dax *dev_dax, resource_size_t size)
{
+ resource_size_t to_shrink = dev_dax_size(dev_dax) - size;
struct dax_region *dax_region = dev_dax->region;
- struct range *range = &dev_dax->range;
- struct resource *res, *adjust = NULL;
struct device *dev = &dev_dax->dev;
-
- for_each_dax_region_resource(dax_region, res)
- if (strcmp(res->name, dev_name(dev)) == 0
- && res->start == range->start) {
- adjust = res;
- break;
+ int i;
+
+ for (i = dev_dax->nr_range - 1; i >= 0; i--) {
+ struct range *range = &dev_dax->ranges[i].range;
+ struct dax_mapping *mapping = dev_dax->ranges[i].mapping;
+ struct resource *adjust = NULL, *res;
+ resource_size_t shrink;
+
+ shrink = min_t(u64, to_shrink, range_len(range));
+ if (shrink >= range_len(range)) {
+ devm_release_action(dax_region->dev,
+ unregister_dax_mapping, &mapping->dev);
+ __release_region(&dax_region->res, range->start,
+ range_len(range));
+ dev_dax->nr_range--;
+ dev_dbg(dev, "delete range[%d]: %#llx:%#llx\n", i,
+ (unsigned long long) range->start,
+ (unsigned long long) range->end);
+ to_shrink -= shrink;
+ if (!to_shrink)
+ break;
+ continue;
}
- if (dev_WARN_ONCE(dev, !adjust, "failed to find matching resource\n"))
- return -ENXIO;
- return adjust_dev_dax_range(dev_dax, adjust, size);
+ for_each_dax_region_resource(dax_region, res)
+ if (strcmp(res->name, dev_name(dev)) == 0
+ && res->start == range->start) {
+ adjust = res;
+ break;
+ }
+
+ if (dev_WARN_ONCE(dev, !adjust || i != dev_dax->nr_range - 1,
+ "failed to find matching resource\n"))
+ return -ENXIO;
+ return adjust_dev_dax_range(dev_dax, adjust, range_len(range)
+ - shrink);
+ }
+ return 0;
+}
+
+/*
+ * Only allow adjustments that preserve the relative pgoff of existing
+ * allocations. I.e. the dev_dax->ranges array is ordered by increasing pgoff.
+ */
+static bool adjust_ok(struct dev_dax *dev_dax, struct resource *res)
+{
+ struct dev_dax_range *last;
+ int i;
+
+ if (dev_dax->nr_range == 0)
+ return false;
+ if (strcmp(res->name, dev_name(&dev_dax->dev)) != 0)
+ return false;
+ last = &dev_dax->ranges[dev_dax->nr_range - 1];
+ if (last->range.start != res->start || last->range.end != res->end)
+ return false;
+ for (i = 0; i < dev_dax->nr_range - 1; i++) {
+ struct dev_dax_range *dax_range = &dev_dax->ranges[i];
+
+ if (dax_range->pgoff > last->pgoff)
+ return false;
+ }
+
+ return true;
}
static ssize_t dev_dax_resize(struct dax_region *dax_region,
struct dev_dax *dev_dax, resource_size_t size)
{
resource_size_t avail = dax_region_avail_size(dax_region), to_alloc;
- resource_size_t dev_size = range_len(&dev_dax->range);
+ resource_size_t dev_size = dev_dax_size(dev_dax);
struct resource *region_res = &dax_region->res;
struct device *dev = &dev_dax->dev;
- const char *name = dev_name(dev);
struct resource *res, *first;
+ resource_size_t alloc = 0;
+ int rc;
if (dev->driver)
return -EBUSY;
return dev_dax_shrink(dev_dax, size);
to_alloc = size - dev_size;
- if (dev_WARN_ONCE(dev, !alloc_is_aligned(dax_region, to_alloc),
+ if (dev_WARN_ONCE(dev, !alloc_is_aligned(dev_dax, to_alloc),
"resize of %pa misaligned\n", &to_alloc))
return -ENXIO;
* may involve adjusting the end of an existing resource, or
* allocating a new resource.
*/
+retry:
first = region_res->child;
if (!first)
return alloc_dev_dax_range(dev_dax, dax_region->res.start, to_alloc);
- for (res = first; to_alloc && res; res = res->sibling) {
+
+ rc = -ENOSPC;
+ for (res = first; res; res = res->sibling) {
struct resource *next = res->sibling;
- resource_size_t free;
/* space at the beginning of the region */
- free = 0;
- if (res == first && res->start > dax_region->res.start)
- free = res->start - dax_region->res.start;
- if (free >= to_alloc && dev_size == 0)
- return alloc_dev_dax_range(dev_dax, dax_region->res.start, to_alloc);
+ if (res == first && res->start > dax_region->res.start) {
+ alloc = min(res->start - dax_region->res.start, to_alloc);
+ rc = alloc_dev_dax_range(dev_dax, dax_region->res.start, alloc);
+ break;
+ }
- free = 0;
+ alloc = 0;
/* space between allocations */
if (next && next->start > res->end + 1)
- free = next->start - res->end + 1;
+ alloc = min(next->start - (res->end + 1), to_alloc);
/* space at the end of the region */
- if (free < to_alloc && !next && res->end < region_res->end)
- free = region_res->end - res->end;
+ if (!alloc && !next && res->end < region_res->end)
+ alloc = min(region_res->end - res->end, to_alloc);
+
+ if (!alloc)
+ continue;
- if (free >= to_alloc && strcmp(name, res->name) == 0)
- return adjust_dev_dax_range(dev_dax, res, resource_size(res) + to_alloc);
- else if (free >= to_alloc && dev_size == 0)
- return alloc_dev_dax_range(dev_dax, res->end + 1, to_alloc);
+ if (adjust_ok(dev_dax, res)) {
+ rc = adjust_dev_dax_range(dev_dax, res, resource_size(res) + alloc);
+ break;
+ }
+ rc = alloc_dev_dax_range(dev_dax, res->end + 1, alloc);
+ break;
}
- return -ENOSPC;
+ if (rc)
+ return rc;
+ to_alloc -= alloc;
+ if (to_alloc)
+ goto retry;
+ return 0;
}
static ssize_t size_store(struct device *dev, struct device_attribute *attr,
if (rc)
return rc;
- if (!alloc_is_aligned(dax_region, val)) {
+ if (!alloc_is_aligned(dev_dax, val)) {
dev_dbg(dev, "%s: size: %lld misaligned\n", __func__, val);
return -EINVAL;
}
}
static DEVICE_ATTR_RW(size);
+static ssize_t range_parse(const char *opt, size_t len, struct range *range)
+{
+ unsigned long long addr = 0;
+ char *start, *end, *str;
+ ssize_t rc = EINVAL;
+
+ str = kstrdup(opt, GFP_KERNEL);
+ if (!str)
+ return rc;
+
+ end = str;
+ start = strsep(&end, "-");
+ if (!start || !end)
+ goto err;
+
+ rc = kstrtoull(start, 16, &addr);
+ if (rc)
+ goto err;
+ range->start = addr;
+
+ rc = kstrtoull(end, 16, &addr);
+ if (rc)
+ goto err;
+ range->end = addr;
+
+err:
+ kfree(str);
+ return rc;
+}
+
+static ssize_t mapping_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct dev_dax *dev_dax = to_dev_dax(dev);
+ struct dax_region *dax_region = dev_dax->region;
+ size_t to_alloc;
+ struct range r;
+ ssize_t rc;
+
+ rc = range_parse(buf, len, &r);
+ if (rc)
+ return rc;
+
+ rc = -ENXIO;
+ device_lock(dax_region->dev);
+ if (!dax_region->dev->driver) {
+ device_unlock(dax_region->dev);
+ return rc;
+ }
+ device_lock(dev);
+
+ to_alloc = range_len(&r);
+ if (alloc_is_aligned(dev_dax, to_alloc))
+ rc = alloc_dev_dax_range(dev_dax, r.start, to_alloc);
+ device_unlock(dev);
+ device_unlock(dax_region->dev);
+
+ return rc == 0 ? len : rc;
+}
+static DEVICE_ATTR_WO(mapping);
+
+static ssize_t align_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dev_dax *dev_dax = to_dev_dax(dev);
+
+ return sprintf(buf, "%d\n", dev_dax->align);
+}
+
+static ssize_t dev_dax_validate_align(struct dev_dax *dev_dax)
+{
+ resource_size_t dev_size = dev_dax_size(dev_dax);
+ struct device *dev = &dev_dax->dev;
+ int i;
+
+ if (dev_size > 0 && !alloc_is_aligned(dev_dax, dev_size)) {
+ dev_dbg(dev, "%s: align %u invalid for size %pa\n",
+ __func__, dev_dax->align, &dev_size);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < dev_dax->nr_range; i++) {
+ size_t len = range_len(&dev_dax->ranges[i].range);
+
+ if (!alloc_is_aligned(dev_dax, len)) {
+ dev_dbg(dev, "%s: align %u invalid for range %d\n",
+ __func__, dev_dax->align, i);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static ssize_t align_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct dev_dax *dev_dax = to_dev_dax(dev);
+ struct dax_region *dax_region = dev_dax->region;
+ unsigned long val, align_save;
+ ssize_t rc;
+
+ rc = kstrtoul(buf, 0, &val);
+ if (rc)
+ return -ENXIO;
+
+ if (!dax_align_valid(val))
+ return -EINVAL;
+
+ device_lock(dax_region->dev);
+ if (!dax_region->dev->driver) {
+ device_unlock(dax_region->dev);
+ return -ENXIO;
+ }
+
+ device_lock(dev);
+ if (dev->driver) {
+ rc = -EBUSY;
+ goto out_unlock;
+ }
+
+ align_save = dev_dax->align;
+ dev_dax->align = val;
+ rc = dev_dax_validate_align(dev_dax);
+ if (rc)
+ dev_dax->align = align_save;
+out_unlock:
+ device_unlock(dev);
+ device_unlock(dax_region->dev);
+ return rc == 0 ? len : rc;
+}
+static DEVICE_ATTR_RW(align);
+
static int dev_dax_target_node(struct dev_dax *dev_dax)
{
struct dax_region *dax_region = dev_dax->region;
struct device_attribute *attr, char *buf)
{
struct dev_dax *dev_dax = to_dev_dax(dev);
+ struct dax_region *dax_region = dev_dax->region;
+ unsigned long long start;
+
+ if (dev_dax->nr_range < 1)
+ start = dax_region->res.start;
+ else
+ start = dev_dax->ranges[0].range.start;
- return sprintf(buf, "%#llx\n", dev_dax->range.start);
+ return sprintf(buf, "%#llx\n", start);
}
static DEVICE_ATTR(resource, 0400, resource_show, NULL);
return 0;
if (a == &dev_attr_numa_node.attr && !IS_ENABLED(CONFIG_NUMA))
return 0;
- if (a == &dev_attr_size.attr && is_static(dax_region))
+ if (a == &dev_attr_mapping.attr && is_static(dax_region))
+ return 0;
+ if ((a == &dev_attr_align.attr ||
+ a == &dev_attr_size.attr) && is_static(dax_region))
return 0444;
return a->mode;
}
static struct attribute *dev_dax_attributes[] = {
&dev_attr_modalias.attr,
&dev_attr_size.attr,
+ &dev_attr_mapping.attr,
&dev_attr_target_node.attr,
+ &dev_attr_align.attr,
&dev_attr_resource.attr,
&dev_attr_numa_node.attr,
NULL,
put_dax(dax_dev);
free_dev_dax_id(dev_dax);
dax_region_put(dax_region);
+ kfree(dev_dax->ranges);
kfree(dev_dax->pgmap);
kfree(dev_dax);
}
/* a device_dax instance is dead while the driver is not attached */
kill_dax(dax_dev);
- /* from here on we're committed to teardown via dev_dax_release() */
dev_dax->dax_dev = dax_dev;
dev_dax->target_node = dax_region->target_node;
+ dev_dax->align = dax_region->align;
+ ida_init(&dev_dax->ida);
kref_get(&dax_region->kref);
inode = dax_inode(dax_dev);
if (rc)
return ERR_PTR(rc);
+ /* register mapping device for the initial allocation range */
+ if (dev_dax->nr_range && range_len(&dev_dax->ranges[0].range)) {
+ rc = devm_register_dax_mapping(dev_dax, 0);
+ if (rc)
+ return ERR_PTR(rc);
+ }
+
return dev_dax;
err_alloc_dax:
kfree(dev_dax->pgmap);
err_pgmap:
- free_dev_dax_range(dev_dax);
+ free_dev_dax_ranges(dev_dax);
err_range:
free_dev_dax_id(dev_dax);
err_id: