Merge tag 'iommu-updates-v4.19' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / drivers / nvdimm / pmem.c
1 /*
2  * Persistent Memory Driver
3  *
4  * Copyright (c) 2014-2015, Intel Corporation.
5  * Copyright (c) 2015, Christoph Hellwig <hch@lst.de>.
6  * Copyright (c) 2015, Boaz Harrosh <boaz@plexistor.com>.
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms and conditions of the GNU General Public License,
10  * version 2, as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope it will be useful, but WITHOUT
13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15  * more details.
16  */
17
18 #include <asm/cacheflush.h>
19 #include <linux/blkdev.h>
20 #include <linux/hdreg.h>
21 #include <linux/init.h>
22 #include <linux/platform_device.h>
23 #include <linux/module.h>
24 #include <linux/moduleparam.h>
25 #include <linux/badblocks.h>
26 #include <linux/memremap.h>
27 #include <linux/vmalloc.h>
28 #include <linux/blk-mq.h>
29 #include <linux/pfn_t.h>
30 #include <linux/slab.h>
31 #include <linux/uio.h>
32 #include <linux/dax.h>
33 #include <linux/nd.h>
34 #include <linux/backing-dev.h>
35 #include "pmem.h"
36 #include "pfn.h"
37 #include "nd.h"
38 #include "nd-core.h"
39
40 static struct device *to_dev(struct pmem_device *pmem)
41 {
42         /*
43          * nvdimm bus services need a 'dev' parameter, and we record the device
44          * at init in bb.dev.
45          */
46         return pmem->bb.dev;
47 }
48
49 static struct nd_region *to_region(struct pmem_device *pmem)
50 {
51         return to_nd_region(to_dev(pmem)->parent);
52 }
53
54 static blk_status_t pmem_clear_poison(struct pmem_device *pmem,
55                 phys_addr_t offset, unsigned int len)
56 {
57         struct device *dev = to_dev(pmem);
58         sector_t sector;
59         long cleared;
60         blk_status_t rc = BLK_STS_OK;
61
62         sector = (offset - pmem->data_offset) / 512;
63
64         cleared = nvdimm_clear_poison(dev, pmem->phys_addr + offset, len);
65         if (cleared < len)
66                 rc = BLK_STS_IOERR;
67         if (cleared > 0 && cleared / 512) {
68                 cleared /= 512;
69                 dev_dbg(dev, "%#llx clear %ld sector%s\n",
70                                 (unsigned long long) sector, cleared,
71                                 cleared > 1 ? "s" : "");
72                 badblocks_clear(&pmem->bb, sector, cleared);
73                 if (pmem->bb_state)
74                         sysfs_notify_dirent(pmem->bb_state);
75         }
76
77         arch_invalidate_pmem(pmem->virt_addr + offset, len);
78
79         return rc;
80 }
81
82 static void write_pmem(void *pmem_addr, struct page *page,
83                 unsigned int off, unsigned int len)
84 {
85         unsigned int chunk;
86         void *mem;
87
88         while (len) {
89                 mem = kmap_atomic(page);
90                 chunk = min_t(unsigned int, len, PAGE_SIZE);
91                 memcpy_flushcache(pmem_addr, mem + off, chunk);
92                 kunmap_atomic(mem);
93                 len -= chunk;
94                 off = 0;
95                 page++;
96                 pmem_addr += PAGE_SIZE;
97         }
98 }
99
100 static blk_status_t read_pmem(struct page *page, unsigned int off,
101                 void *pmem_addr, unsigned int len)
102 {
103         unsigned int chunk;
104         unsigned long rem;
105         void *mem;
106
107         while (len) {
108                 mem = kmap_atomic(page);
109                 chunk = min_t(unsigned int, len, PAGE_SIZE);
110                 rem = memcpy_mcsafe(mem + off, pmem_addr, chunk);
111                 kunmap_atomic(mem);
112                 if (rem)
113                         return BLK_STS_IOERR;
114                 len -= chunk;
115                 off = 0;
116                 page++;
117                 pmem_addr += PAGE_SIZE;
118         }
119         return BLK_STS_OK;
120 }
121
122 static blk_status_t pmem_do_bvec(struct pmem_device *pmem, struct page *page,
123                         unsigned int len, unsigned int off, unsigned int op,
124                         sector_t sector)
125 {
126         blk_status_t rc = BLK_STS_OK;
127         bool bad_pmem = false;
128         phys_addr_t pmem_off = sector * 512 + pmem->data_offset;
129         void *pmem_addr = pmem->virt_addr + pmem_off;
130
131         if (unlikely(is_bad_pmem(&pmem->bb, sector, len)))
132                 bad_pmem = true;
133
134         if (!op_is_write(op)) {
135                 if (unlikely(bad_pmem))
136                         rc = BLK_STS_IOERR;
137                 else {
138                         rc = read_pmem(page, off, pmem_addr, len);
139                         flush_dcache_page(page);
140                 }
141         } else {
142                 /*
143                  * Note that we write the data both before and after
144                  * clearing poison.  The write before clear poison
145                  * handles situations where the latest written data is
146                  * preserved and the clear poison operation simply marks
147                  * the address range as valid without changing the data.
148                  * In this case application software can assume that an
149                  * interrupted write will either return the new good
150                  * data or an error.
151                  *
152                  * However, if pmem_clear_poison() leaves the data in an
153                  * indeterminate state we need to perform the write
154                  * after clear poison.
155                  */
156                 flush_dcache_page(page);
157                 write_pmem(pmem_addr, page, off, len);
158                 if (unlikely(bad_pmem)) {
159                         rc = pmem_clear_poison(pmem, pmem_off, len);
160                         write_pmem(pmem_addr, page, off, len);
161                 }
162         }
163
164         return rc;
165 }
166
167 static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
168 {
169         blk_status_t rc = 0;
170         bool do_acct;
171         unsigned long start;
172         struct bio_vec bvec;
173         struct bvec_iter iter;
174         struct pmem_device *pmem = q->queuedata;
175         struct nd_region *nd_region = to_region(pmem);
176
177         if (bio->bi_opf & REQ_PREFLUSH)
178                 nvdimm_flush(nd_region);
179
180         do_acct = nd_iostat_start(bio, &start);
181         bio_for_each_segment(bvec, bio, iter) {
182                 rc = pmem_do_bvec(pmem, bvec.bv_page, bvec.bv_len,
183                                 bvec.bv_offset, bio_op(bio), iter.bi_sector);
184                 if (rc) {
185                         bio->bi_status = rc;
186                         break;
187                 }
188         }
189         if (do_acct)
190                 nd_iostat_end(bio, start);
191
192         if (bio->bi_opf & REQ_FUA)
193                 nvdimm_flush(nd_region);
194
195         bio_endio(bio);
196         return BLK_QC_T_NONE;
197 }
198
199 static int pmem_rw_page(struct block_device *bdev, sector_t sector,
200                        struct page *page, unsigned int op)
201 {
202         struct pmem_device *pmem = bdev->bd_queue->queuedata;
203         blk_status_t rc;
204
205         rc = pmem_do_bvec(pmem, page, hpage_nr_pages(page) * PAGE_SIZE,
206                           0, op, sector);
207
208         /*
209          * The ->rw_page interface is subtle and tricky.  The core
210          * retries on any error, so we can only invoke page_endio() in
211          * the successful completion case.  Otherwise, we'll see crashes
212          * caused by double completion.
213          */
214         if (rc == 0)
215                 page_endio(page, op_is_write(op), 0);
216
217         return blk_status_to_errno(rc);
218 }
219
220 /* see "strong" declaration in tools/testing/nvdimm/pmem-dax.c */
221 __weak long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
222                 long nr_pages, void **kaddr, pfn_t *pfn)
223 {
224         resource_size_t offset = PFN_PHYS(pgoff) + pmem->data_offset;
225
226         if (unlikely(is_bad_pmem(&pmem->bb, PFN_PHYS(pgoff) / 512,
227                                         PFN_PHYS(nr_pages))))
228                 return -EIO;
229         *kaddr = pmem->virt_addr + offset;
230         *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
231
232         /*
233          * If badblocks are present, limit known good range to the
234          * requested range.
235          */
236         if (unlikely(pmem->bb.count))
237                 return nr_pages;
238         return PHYS_PFN(pmem->size - pmem->pfn_pad - offset);
239 }
240
241 static const struct block_device_operations pmem_fops = {
242         .owner =                THIS_MODULE,
243         .rw_page =              pmem_rw_page,
244         .revalidate_disk =      nvdimm_revalidate_disk,
245 };
246
247 static long pmem_dax_direct_access(struct dax_device *dax_dev,
248                 pgoff_t pgoff, long nr_pages, void **kaddr, pfn_t *pfn)
249 {
250         struct pmem_device *pmem = dax_get_private(dax_dev);
251
252         return __pmem_direct_access(pmem, pgoff, nr_pages, kaddr, pfn);
253 }
254
255 static size_t pmem_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
256                 void *addr, size_t bytes, struct iov_iter *i)
257 {
258         return copy_from_iter_flushcache(addr, bytes, i);
259 }
260
261 static size_t pmem_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff,
262                 void *addr, size_t bytes, struct iov_iter *i)
263 {
264         return copy_to_iter_mcsafe(addr, bytes, i);
265 }
266
267 static const struct dax_operations pmem_dax_ops = {
268         .direct_access = pmem_dax_direct_access,
269         .copy_from_iter = pmem_copy_from_iter,
270         .copy_to_iter = pmem_copy_to_iter,
271 };
272
273 static const struct attribute_group *pmem_attribute_groups[] = {
274         &dax_attribute_group,
275         NULL,
276 };
277
278 static void pmem_release_queue(void *q)
279 {
280         blk_cleanup_queue(q);
281 }
282
283 static void pmem_freeze_queue(void *q)
284 {
285         blk_freeze_queue_start(q);
286 }
287
288 static void pmem_release_disk(void *__pmem)
289 {
290         struct pmem_device *pmem = __pmem;
291
292         kill_dax(pmem->dax_dev);
293         put_dax(pmem->dax_dev);
294         del_gendisk(pmem->disk);
295         put_disk(pmem->disk);
296 }
297
298 static void pmem_release_pgmap_ops(void *__pgmap)
299 {
300         dev_pagemap_put_ops();
301 }
302
303 static void fsdax_pagefree(struct page *page, void *data)
304 {
305         wake_up_var(&page->_refcount);
306 }
307
308 static int setup_pagemap_fsdax(struct device *dev, struct dev_pagemap *pgmap)
309 {
310         dev_pagemap_get_ops();
311         if (devm_add_action_or_reset(dev, pmem_release_pgmap_ops, pgmap))
312                 return -ENOMEM;
313         pgmap->type = MEMORY_DEVICE_FS_DAX;
314         pgmap->page_free = fsdax_pagefree;
315
316         return 0;
317 }
318
319 static int pmem_attach_disk(struct device *dev,
320                 struct nd_namespace_common *ndns)
321 {
322         struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
323         struct nd_region *nd_region = to_nd_region(dev->parent);
324         int nid = dev_to_node(dev), fua;
325         struct resource *res = &nsio->res;
326         struct resource bb_res;
327         struct nd_pfn *nd_pfn = NULL;
328         struct dax_device *dax_dev;
329         struct nd_pfn_sb *pfn_sb;
330         struct pmem_device *pmem;
331         struct request_queue *q;
332         struct device *gendev;
333         struct gendisk *disk;
334         void *addr;
335         int rc;
336
337         pmem = devm_kzalloc(dev, sizeof(*pmem), GFP_KERNEL);
338         if (!pmem)
339                 return -ENOMEM;
340
341         /* while nsio_rw_bytes is active, parse a pfn info block if present */
342         if (is_nd_pfn(dev)) {
343                 nd_pfn = to_nd_pfn(dev);
344                 rc = nvdimm_setup_pfn(nd_pfn, &pmem->pgmap);
345                 if (rc)
346                         return rc;
347         }
348
349         /* we're attaching a block device, disable raw namespace access */
350         devm_nsio_disable(dev, nsio);
351
352         dev_set_drvdata(dev, pmem);
353         pmem->phys_addr = res->start;
354         pmem->size = resource_size(res);
355         fua = nvdimm_has_flush(nd_region);
356         if (!IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) || fua < 0) {
357                 dev_warn(dev, "unable to guarantee persistence of writes\n");
358                 fua = 0;
359         }
360
361         if (!devm_request_mem_region(dev, res->start, resource_size(res),
362                                 dev_name(&ndns->dev))) {
363                 dev_warn(dev, "could not reserve region %pR\n", res);
364                 return -EBUSY;
365         }
366
367         q = blk_alloc_queue_node(GFP_KERNEL, dev_to_node(dev), NULL);
368         if (!q)
369                 return -ENOMEM;
370
371         if (devm_add_action_or_reset(dev, pmem_release_queue, q))
372                 return -ENOMEM;
373
374         pmem->pfn_flags = PFN_DEV;
375         pmem->pgmap.ref = &q->q_usage_counter;
376         if (is_nd_pfn(dev)) {
377                 if (setup_pagemap_fsdax(dev, &pmem->pgmap))
378                         return -ENOMEM;
379                 addr = devm_memremap_pages(dev, &pmem->pgmap);
380                 pfn_sb = nd_pfn->pfn_sb;
381                 pmem->data_offset = le64_to_cpu(pfn_sb->dataoff);
382                 pmem->pfn_pad = resource_size(res) -
383                         resource_size(&pmem->pgmap.res);
384                 pmem->pfn_flags |= PFN_MAP;
385                 memcpy(&bb_res, &pmem->pgmap.res, sizeof(bb_res));
386                 bb_res.start += pmem->data_offset;
387         } else if (pmem_should_map_pages(dev)) {
388                 memcpy(&pmem->pgmap.res, &nsio->res, sizeof(pmem->pgmap.res));
389                 pmem->pgmap.altmap_valid = false;
390                 if (setup_pagemap_fsdax(dev, &pmem->pgmap))
391                         return -ENOMEM;
392                 addr = devm_memremap_pages(dev, &pmem->pgmap);
393                 pmem->pfn_flags |= PFN_MAP;
394                 memcpy(&bb_res, &pmem->pgmap.res, sizeof(bb_res));
395         } else
396                 addr = devm_memremap(dev, pmem->phys_addr,
397                                 pmem->size, ARCH_MEMREMAP_PMEM);
398
399         /*
400          * At release time the queue must be frozen before
401          * devm_memremap_pages is unwound
402          */
403         if (devm_add_action_or_reset(dev, pmem_freeze_queue, q))
404                 return -ENOMEM;
405
406         if (IS_ERR(addr))
407                 return PTR_ERR(addr);
408         pmem->virt_addr = addr;
409
410         blk_queue_write_cache(q, true, fua);
411         blk_queue_make_request(q, pmem_make_request);
412         blk_queue_physical_block_size(q, PAGE_SIZE);
413         blk_queue_logical_block_size(q, pmem_sector_size(ndns));
414         blk_queue_max_hw_sectors(q, UINT_MAX);
415         blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
416         if (pmem->pfn_flags & PFN_MAP)
417                 blk_queue_flag_set(QUEUE_FLAG_DAX, q);
418         q->queuedata = pmem;
419
420         disk = alloc_disk_node(0, nid);
421         if (!disk)
422                 return -ENOMEM;
423         pmem->disk = disk;
424
425         disk->fops              = &pmem_fops;
426         disk->queue             = q;
427         disk->flags             = GENHD_FL_EXT_DEVT;
428         disk->queue->backing_dev_info->capabilities |= BDI_CAP_SYNCHRONOUS_IO;
429         nvdimm_namespace_disk_name(ndns, disk->disk_name);
430         set_capacity(disk, (pmem->size - pmem->pfn_pad - pmem->data_offset)
431                         / 512);
432         if (devm_init_badblocks(dev, &pmem->bb))
433                 return -ENOMEM;
434         nvdimm_badblocks_populate(nd_region, &pmem->bb, &bb_res);
435         disk->bb = &pmem->bb;
436
437         dax_dev = alloc_dax(pmem, disk->disk_name, &pmem_dax_ops);
438         if (!dax_dev) {
439                 put_disk(disk);
440                 return -ENOMEM;
441         }
442         dax_write_cache(dax_dev, nvdimm_has_cache(nd_region));
443         pmem->dax_dev = dax_dev;
444
445         gendev = disk_to_dev(disk);
446         gendev->groups = pmem_attribute_groups;
447
448         device_add_disk(dev, disk);
449         if (devm_add_action_or_reset(dev, pmem_release_disk, pmem))
450                 return -ENOMEM;
451
452         revalidate_disk(disk);
453
454         pmem->bb_state = sysfs_get_dirent(disk_to_dev(disk)->kobj.sd,
455                                           "badblocks");
456         if (!pmem->bb_state)
457                 dev_warn(dev, "'badblocks' notification disabled\n");
458
459         return 0;
460 }
461
462 static int nd_pmem_probe(struct device *dev)
463 {
464         struct nd_namespace_common *ndns;
465
466         ndns = nvdimm_namespace_common_probe(dev);
467         if (IS_ERR(ndns))
468                 return PTR_ERR(ndns);
469
470         if (devm_nsio_enable(dev, to_nd_namespace_io(&ndns->dev)))
471                 return -ENXIO;
472
473         if (is_nd_btt(dev))
474                 return nvdimm_namespace_attach_btt(ndns);
475
476         if (is_nd_pfn(dev))
477                 return pmem_attach_disk(dev, ndns);
478
479         /* if we find a valid info-block we'll come back as that personality */
480         if (nd_btt_probe(dev, ndns) == 0 || nd_pfn_probe(dev, ndns) == 0
481                         || nd_dax_probe(dev, ndns) == 0)
482                 return -ENXIO;
483
484         /* ...otherwise we're just a raw pmem device */
485         return pmem_attach_disk(dev, ndns);
486 }
487
488 static int nd_pmem_remove(struct device *dev)
489 {
490         struct pmem_device *pmem = dev_get_drvdata(dev);
491
492         if (is_nd_btt(dev))
493                 nvdimm_namespace_detach_btt(to_nd_btt(dev));
494         else {
495                 /*
496                  * Note, this assumes device_lock() context to not race
497                  * nd_pmem_notify()
498                  */
499                 sysfs_put(pmem->bb_state);
500                 pmem->bb_state = NULL;
501         }
502         nvdimm_flush(to_nd_region(dev->parent));
503
504         return 0;
505 }
506
507 static void nd_pmem_shutdown(struct device *dev)
508 {
509         nvdimm_flush(to_nd_region(dev->parent));
510 }
511
512 static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
513 {
514         struct nd_region *nd_region;
515         resource_size_t offset = 0, end_trunc = 0;
516         struct nd_namespace_common *ndns;
517         struct nd_namespace_io *nsio;
518         struct resource res;
519         struct badblocks *bb;
520         struct kernfs_node *bb_state;
521
522         if (event != NVDIMM_REVALIDATE_POISON)
523                 return;
524
525         if (is_nd_btt(dev)) {
526                 struct nd_btt *nd_btt = to_nd_btt(dev);
527
528                 ndns = nd_btt->ndns;
529                 nd_region = to_nd_region(ndns->dev.parent);
530                 nsio = to_nd_namespace_io(&ndns->dev);
531                 bb = &nsio->bb;
532                 bb_state = NULL;
533         } else {
534                 struct pmem_device *pmem = dev_get_drvdata(dev);
535
536                 nd_region = to_region(pmem);
537                 bb = &pmem->bb;
538                 bb_state = pmem->bb_state;
539
540                 if (is_nd_pfn(dev)) {
541                         struct nd_pfn *nd_pfn = to_nd_pfn(dev);
542                         struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
543
544                         ndns = nd_pfn->ndns;
545                         offset = pmem->data_offset +
546                                         __le32_to_cpu(pfn_sb->start_pad);
547                         end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
548                 } else {
549                         ndns = to_ndns(dev);
550                 }
551
552                 nsio = to_nd_namespace_io(&ndns->dev);
553         }
554
555         res.start = nsio->res.start + offset;
556         res.end = nsio->res.end - end_trunc;
557         nvdimm_badblocks_populate(nd_region, bb, &res);
558         if (bb_state)
559                 sysfs_notify_dirent(bb_state);
560 }
561
562 MODULE_ALIAS("pmem");
563 MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_IO);
564 MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_PMEM);
565 static struct nd_device_driver nd_pmem_driver = {
566         .probe = nd_pmem_probe,
567         .remove = nd_pmem_remove,
568         .notify = nd_pmem_notify,
569         .shutdown = nd_pmem_shutdown,
570         .drv = {
571                 .name = "nd_pmem",
572         },
573         .type = ND_DRIVER_NAMESPACE_IO | ND_DRIVER_NAMESPACE_PMEM,
574 };
575
576 module_nd_driver(nd_pmem_driver);
577
578 MODULE_AUTHOR("Ross Zwisler <ross.zwisler@linux.intel.com>");
579 MODULE_LICENSE("GPL v2");