device-dax: Avoid an unnecessary check in alloc_dev_dax_range()
[linux-2.6-microblaze.git] / drivers / nvdimm / claim.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4  */
5 #include <linux/device.h>
6 #include <linux/sizes.h>
7 #include "nd-core.h"
8 #include "pmem.h"
9 #include "pfn.h"
10 #include "btt.h"
11 #include "nd.h"
12
13 void __nd_detach_ndns(struct device *dev, struct nd_namespace_common **_ndns)
14 {
15         struct nd_namespace_common *ndns = *_ndns;
16         struct nvdimm_bus *nvdimm_bus;
17
18         if (!ndns)
19                 return;
20
21         nvdimm_bus = walk_to_nvdimm_bus(&ndns->dev);
22         lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
23         dev_WARN_ONCE(dev, ndns->claim != dev, "%s: invalid claim\n", __func__);
24         ndns->claim = NULL;
25         *_ndns = NULL;
26         put_device(&ndns->dev);
27 }
28
29 void nd_detach_ndns(struct device *dev,
30                 struct nd_namespace_common **_ndns)
31 {
32         struct nd_namespace_common *ndns = *_ndns;
33
34         if (!ndns)
35                 return;
36         get_device(&ndns->dev);
37         nvdimm_bus_lock(&ndns->dev);
38         __nd_detach_ndns(dev, _ndns);
39         nvdimm_bus_unlock(&ndns->dev);
40         put_device(&ndns->dev);
41 }
42
43 bool __nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach,
44                 struct nd_namespace_common **_ndns)
45 {
46         struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&attach->dev);
47
48         if (attach->claim)
49                 return false;
50         lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
51         dev_WARN_ONCE(dev, *_ndns, "%s: invalid claim\n", __func__);
52         attach->claim = dev;
53         *_ndns = attach;
54         get_device(&attach->dev);
55         return true;
56 }
57
58 bool nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach,
59                 struct nd_namespace_common **_ndns)
60 {
61         bool claimed;
62
63         nvdimm_bus_lock(&attach->dev);
64         claimed = __nd_attach_ndns(dev, attach, _ndns);
65         nvdimm_bus_unlock(&attach->dev);
66         return claimed;
67 }
68
69 static int namespace_match(struct device *dev, void *data)
70 {
71         char *name = data;
72
73         return strcmp(name, dev_name(dev)) == 0;
74 }
75
76 static bool is_idle(struct device *dev, struct nd_namespace_common *ndns)
77 {
78         struct nd_region *nd_region = to_nd_region(dev->parent);
79         struct device *seed = NULL;
80
81         if (is_nd_btt(dev))
82                 seed = nd_region->btt_seed;
83         else if (is_nd_pfn(dev))
84                 seed = nd_region->pfn_seed;
85         else if (is_nd_dax(dev))
86                 seed = nd_region->dax_seed;
87
88         if (seed == dev || ndns || dev->driver)
89                 return false;
90         return true;
91 }
92
93 struct nd_pfn *to_nd_pfn_safe(struct device *dev)
94 {
95         /*
96          * pfn device attributes are re-used by dax device instances, so we
97          * need to be careful to correct device-to-nd_pfn conversion.
98          */
99         if (is_nd_pfn(dev))
100                 return to_nd_pfn(dev);
101
102         if (is_nd_dax(dev)) {
103                 struct nd_dax *nd_dax = to_nd_dax(dev);
104
105                 return &nd_dax->nd_pfn;
106         }
107
108         WARN_ON(1);
109         return NULL;
110 }
111
112 static void nd_detach_and_reset(struct device *dev,
113                 struct nd_namespace_common **_ndns)
114 {
115         /* detach the namespace and destroy / reset the device */
116         __nd_detach_ndns(dev, _ndns);
117         if (is_idle(dev, *_ndns)) {
118                 nd_device_unregister(dev, ND_ASYNC);
119         } else if (is_nd_btt(dev)) {
120                 struct nd_btt *nd_btt = to_nd_btt(dev);
121
122                 nd_btt->lbasize = 0;
123                 kfree(nd_btt->uuid);
124                 nd_btt->uuid = NULL;
125         } else if (is_nd_pfn(dev) || is_nd_dax(dev)) {
126                 struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
127
128                 kfree(nd_pfn->uuid);
129                 nd_pfn->uuid = NULL;
130                 nd_pfn->mode = PFN_MODE_NONE;
131         }
132 }
133
134 ssize_t nd_namespace_store(struct device *dev,
135                 struct nd_namespace_common **_ndns, const char *buf,
136                 size_t len)
137 {
138         struct nd_namespace_common *ndns;
139         struct device *found;
140         char *name;
141
142         if (dev->driver) {
143                 dev_dbg(dev, "namespace already active\n");
144                 return -EBUSY;
145         }
146
147         name = kstrndup(buf, len, GFP_KERNEL);
148         if (!name)
149                 return -ENOMEM;
150         strim(name);
151
152         if (strncmp(name, "namespace", 9) == 0 || strcmp(name, "") == 0)
153                 /* pass */;
154         else {
155                 len = -EINVAL;
156                 goto out;
157         }
158
159         ndns = *_ndns;
160         if (strcmp(name, "") == 0) {
161                 nd_detach_and_reset(dev, _ndns);
162                 goto out;
163         } else if (ndns) {
164                 dev_dbg(dev, "namespace already set to: %s\n",
165                                 dev_name(&ndns->dev));
166                 len = -EBUSY;
167                 goto out;
168         }
169
170         found = device_find_child(dev->parent, name, namespace_match);
171         if (!found) {
172                 dev_dbg(dev, "'%s' not found under %s\n", name,
173                                 dev_name(dev->parent));
174                 len = -ENODEV;
175                 goto out;
176         }
177
178         ndns = to_ndns(found);
179
180         switch (ndns->claim_class) {
181         case NVDIMM_CCLASS_NONE:
182                 break;
183         case NVDIMM_CCLASS_BTT:
184         case NVDIMM_CCLASS_BTT2:
185                 if (!is_nd_btt(dev)) {
186                         len = -EBUSY;
187                         goto out_attach;
188                 }
189                 break;
190         case NVDIMM_CCLASS_PFN:
191                 if (!is_nd_pfn(dev)) {
192                         len = -EBUSY;
193                         goto out_attach;
194                 }
195                 break;
196         case NVDIMM_CCLASS_DAX:
197                 if (!is_nd_dax(dev)) {
198                         len = -EBUSY;
199                         goto out_attach;
200                 }
201                 break;
202         default:
203                 len = -EBUSY;
204                 goto out_attach;
205                 break;
206         }
207
208         if (__nvdimm_namespace_capacity(ndns) < SZ_16M) {
209                 dev_dbg(dev, "%s too small to host\n", name);
210                 len = -ENXIO;
211                 goto out_attach;
212         }
213
214         WARN_ON_ONCE(!is_nvdimm_bus_locked(dev));
215         if (!__nd_attach_ndns(dev, ndns, _ndns)) {
216                 dev_dbg(dev, "%s already claimed\n",
217                                 dev_name(&ndns->dev));
218                 len = -EBUSY;
219         }
220
221  out_attach:
222         put_device(&ndns->dev); /* from device_find_child */
223  out:
224         kfree(name);
225         return len;
226 }
227
228 /*
229  * nd_sb_checksum: compute checksum for a generic info block
230  *
231  * Returns a fletcher64 checksum of everything in the given info block
232  * except the last field (since that's where the checksum lives).
233  */
234 u64 nd_sb_checksum(struct nd_gen_sb *nd_gen_sb)
235 {
236         u64 sum;
237         __le64 sum_save;
238
239         BUILD_BUG_ON(sizeof(struct btt_sb) != SZ_4K);
240         BUILD_BUG_ON(sizeof(struct nd_pfn_sb) != SZ_4K);
241         BUILD_BUG_ON(sizeof(struct nd_gen_sb) != SZ_4K);
242
243         sum_save = nd_gen_sb->checksum;
244         nd_gen_sb->checksum = 0;
245         sum = nd_fletcher64(nd_gen_sb, sizeof(*nd_gen_sb), 1);
246         nd_gen_sb->checksum = sum_save;
247         return sum;
248 }
249 EXPORT_SYMBOL(nd_sb_checksum);
250
251 static int nsio_rw_bytes(struct nd_namespace_common *ndns,
252                 resource_size_t offset, void *buf, size_t size, int rw,
253                 unsigned long flags)
254 {
255         struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
256         unsigned int sz_align = ALIGN(size + (offset & (512 - 1)), 512);
257         sector_t sector = offset >> 9;
258         int rc = 0, ret = 0;
259
260         if (unlikely(!size))
261                 return 0;
262
263         if (unlikely(offset + size > nsio->size)) {
264                 dev_WARN_ONCE(&ndns->dev, 1, "request out of range\n");
265                 return -EFAULT;
266         }
267
268         if (rw == READ) {
269                 if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align)))
270                         return -EIO;
271                 if (copy_mc_to_kernel(buf, nsio->addr + offset, size) != 0)
272                         return -EIO;
273                 return 0;
274         }
275
276         if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align))) {
277                 if (IS_ALIGNED(offset, 512) && IS_ALIGNED(size, 512)
278                                 && !(flags & NVDIMM_IO_ATOMIC)) {
279                         long cleared;
280
281                         might_sleep();
282                         cleared = nvdimm_clear_poison(&ndns->dev,
283                                         nsio->res.start + offset, size);
284                         if (cleared < size)
285                                 rc = -EIO;
286                         if (cleared > 0 && cleared / 512) {
287                                 cleared /= 512;
288                                 badblocks_clear(&nsio->bb, sector, cleared);
289                         }
290                         arch_invalidate_pmem(nsio->addr + offset, size);
291                 } else
292                         rc = -EIO;
293         }
294
295         memcpy_flushcache(nsio->addr + offset, buf, size);
296         ret = nvdimm_flush(to_nd_region(ndns->dev.parent), NULL);
297         if (ret)
298                 rc = ret;
299
300         return rc;
301 }
302
303 int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio,
304                 resource_size_t size)
305 {
306         struct nd_namespace_common *ndns = &nsio->common;
307         struct range range = {
308                 .start = nsio->res.start,
309                 .end = nsio->res.end,
310         };
311
312         nsio->size = size;
313         if (!devm_request_mem_region(dev, range.start, size,
314                                 dev_name(&ndns->dev))) {
315                 dev_warn(dev, "could not reserve region %pR\n", &nsio->res);
316                 return -EBUSY;
317         }
318
319         ndns->rw_bytes = nsio_rw_bytes;
320         if (devm_init_badblocks(dev, &nsio->bb))
321                 return -ENOMEM;
322         nvdimm_badblocks_populate(to_nd_region(ndns->dev.parent), &nsio->bb,
323                         &range);
324
325         nsio->addr = devm_memremap(dev, range.start, size, ARCH_MEMREMAP_PMEM);
326
327         return PTR_ERR_OR_ZERO(nsio->addr);
328 }
329
330 void devm_nsio_disable(struct device *dev, struct nd_namespace_io *nsio)
331 {
332         struct resource *res = &nsio->res;
333
334         devm_memunmap(dev, nsio->addr);
335         devm_exit_badblocks(dev, &nsio->bb);
336         devm_release_mem_region(dev, res->start, nsio->size);
337 }