1 // SPDX-License-Identifier: GPL-2.0
3 * dcssblk.c -- the S/390 block driver for dcss memory
5 * Authors: Carsten Otte, Stefan Weinhuber, Gerald Schaefer
8 #define KMSG_COMPONENT "dcssblk"
9 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/ctype.h>
14 #include <linux/errno.h>
15 #include <linux/init.h>
16 #include <linux/slab.h>
17 #include <linux/blkdev.h>
18 #include <linux/completion.h>
19 #include <linux/interrupt.h>
20 #include <linux/pfn_t.h>
21 #include <linux/uio.h>
22 #include <linux/dax.h>
23 #include <asm/extmem.h>
26 #define DCSSBLK_NAME "dcssblk"
27 #define DCSSBLK_MINORS_PER_DISK 1
28 #define DCSSBLK_PARM_LEN 400
29 #define DCSS_BUS_ID_SIZE 20
31 static int dcssblk_open(struct block_device *bdev, fmode_t mode);
32 static void dcssblk_release(struct gendisk *disk, fmode_t mode);
33 static void dcssblk_submit_bio(struct bio *bio);
34 static long dcssblk_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
35 long nr_pages, void **kaddr, pfn_t *pfn);
37 static char dcssblk_segments[DCSSBLK_PARM_LEN] = "\0";
39 static int dcssblk_major;
40 static const struct block_device_operations dcssblk_devops = {
42 .submit_bio = dcssblk_submit_bio,
44 .release = dcssblk_release,
47 static size_t dcssblk_dax_copy_from_iter(struct dax_device *dax_dev,
48 pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i)
50 return copy_from_iter(addr, bytes, i);
53 static size_t dcssblk_dax_copy_to_iter(struct dax_device *dax_dev,
54 pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i)
56 return copy_to_iter(addr, bytes, i);
59 static int dcssblk_dax_zero_page_range(struct dax_device *dax_dev,
60 pgoff_t pgoff, size_t nr_pages)
65 rc = dax_direct_access(dax_dev, pgoff, nr_pages, &kaddr, NULL);
68 memset(kaddr, 0, nr_pages << PAGE_SHIFT);
69 dax_flush(dax_dev, kaddr, nr_pages << PAGE_SHIFT);
73 static const struct dax_operations dcssblk_dax_ops = {
74 .direct_access = dcssblk_dax_direct_access,
75 .dax_supported = generic_fsdax_supported,
76 .copy_from_iter = dcssblk_dax_copy_from_iter,
77 .copy_to_iter = dcssblk_dax_copy_to_iter,
78 .zero_page_range = dcssblk_dax_zero_page_range,
81 struct dcssblk_dev_info {
84 char segment_name[DCSS_BUS_ID_SIZE];
90 unsigned char save_pending;
91 unsigned char is_shared;
93 struct list_head seg_list;
94 struct dax_device *dax_dev;
99 char segment_name[DCSS_BUS_ID_SIZE];
105 static ssize_t dcssblk_add_store(struct device * dev, struct device_attribute *attr, const char * buf,
107 static ssize_t dcssblk_remove_store(struct device * dev, struct device_attribute *attr, const char * buf,
110 static DEVICE_ATTR(add, S_IWUSR, NULL, dcssblk_add_store);
111 static DEVICE_ATTR(remove, S_IWUSR, NULL, dcssblk_remove_store);
113 static struct device *dcssblk_root_dev;
115 static LIST_HEAD(dcssblk_devices);
116 static struct rw_semaphore dcssblk_devices_sem;
119 * release function for segment device.
122 dcssblk_release_segment(struct device *dev)
124 struct dcssblk_dev_info *dev_info;
125 struct segment_info *entry, *temp;
127 dev_info = container_of(dev, struct dcssblk_dev_info, dev);
128 list_for_each_entry_safe(entry, temp, &dev_info->seg_list, lh) {
129 list_del(&entry->lh);
133 module_put(THIS_MODULE);
137 * get a minor number. needs to be called with
138 * down_write(&dcssblk_devices_sem) and the
139 * device needs to be enqueued before the semaphore is
143 dcssblk_assign_free_minor(struct dcssblk_dev_info *dev_info)
146 struct dcssblk_dev_info *entry;
148 if (dev_info == NULL)
150 for (minor = 0; minor < (1<<MINORBITS); minor++) {
152 // test if minor available
153 list_for_each_entry(entry, &dcssblk_devices, lh)
154 if (minor == entry->gd->first_minor)
156 if (!found) break; // got unused minor
160 dev_info->gd->first_minor = minor;
165 * get the struct dcssblk_dev_info from dcssblk_devices
166 * for the given name.
167 * down_read(&dcssblk_devices_sem) must be held.
169 static struct dcssblk_dev_info *
170 dcssblk_get_device_by_name(char *name)
172 struct dcssblk_dev_info *entry;
174 list_for_each_entry(entry, &dcssblk_devices, lh) {
175 if (!strcmp(name, entry->segment_name)) {
183 * get the struct segment_info from seg_list
184 * for the given name.
185 * down_read(&dcssblk_devices_sem) must be held.
187 static struct segment_info *
188 dcssblk_get_segment_by_name(char *name)
190 struct dcssblk_dev_info *dev_info;
191 struct segment_info *entry;
193 list_for_each_entry(dev_info, &dcssblk_devices, lh) {
194 list_for_each_entry(entry, &dev_info->seg_list, lh) {
195 if (!strcmp(name, entry->segment_name))
203 * get the highest address of the multi-segment block.
206 dcssblk_find_highest_addr(struct dcssblk_dev_info *dev_info)
208 unsigned long highest_addr;
209 struct segment_info *entry;
212 list_for_each_entry(entry, &dev_info->seg_list, lh) {
213 if (highest_addr < entry->end)
214 highest_addr = entry->end;
220 * get the lowest address of the multi-segment block.
223 dcssblk_find_lowest_addr(struct dcssblk_dev_info *dev_info)
226 unsigned long lowest_addr;
227 struct segment_info *entry;
231 list_for_each_entry(entry, &dev_info->seg_list, lh) {
232 if (set_first == 0) {
233 lowest_addr = entry->start;
236 if (lowest_addr > entry->start)
237 lowest_addr = entry->start;
244 * Check continuity of segments.
247 dcssblk_is_continuous(struct dcssblk_dev_info *dev_info)
250 struct segment_info *sort_list, *entry, temp;
252 if (dev_info->num_of_segments <= 1)
255 sort_list = kcalloc(dev_info->num_of_segments,
256 sizeof(struct segment_info),
258 if (sort_list == NULL)
261 list_for_each_entry(entry, &dev_info->seg_list, lh) {
262 memcpy(&sort_list[i], entry, sizeof(struct segment_info));
267 for (i = 0; i < dev_info->num_of_segments; i++)
268 for (j = 0; j < dev_info->num_of_segments; j++)
269 if (sort_list[j].start > sort_list[i].start) {
270 memcpy(&temp, &sort_list[i],
271 sizeof(struct segment_info));
272 memcpy(&sort_list[i], &sort_list[j],
273 sizeof(struct segment_info));
274 memcpy(&sort_list[j], &temp,
275 sizeof(struct segment_info));
278 /* check continuity */
279 for (i = 0; i < dev_info->num_of_segments - 1; i++) {
280 if ((sort_list[i].end + 1) != sort_list[i+1].start) {
281 pr_err("Adjacent DCSSs %s and %s are not "
282 "contiguous\n", sort_list[i].segment_name,
283 sort_list[i+1].segment_name);
287 /* EN and EW are allowed in a block device */
288 if (sort_list[i].segment_type != sort_list[i+1].segment_type) {
289 if (!(sort_list[i].segment_type & SEGMENT_EXCLUSIVE) ||
290 (sort_list[i].segment_type == SEG_TYPE_ER) ||
291 !(sort_list[i+1].segment_type &
292 SEGMENT_EXCLUSIVE) ||
293 (sort_list[i+1].segment_type == SEG_TYPE_ER)) {
294 pr_err("DCSS %s and DCSS %s have "
295 "incompatible types\n",
296 sort_list[i].segment_name,
297 sort_list[i+1].segment_name);
313 dcssblk_load_segment(char *name, struct segment_info **seg_info)
317 /* already loaded? */
318 down_read(&dcssblk_devices_sem);
319 *seg_info = dcssblk_get_segment_by_name(name);
320 up_read(&dcssblk_devices_sem);
321 if (*seg_info != NULL)
324 /* get a struct segment_info */
325 *seg_info = kzalloc(sizeof(struct segment_info), GFP_KERNEL);
326 if (*seg_info == NULL)
329 strcpy((*seg_info)->segment_name, name);
331 /* load the segment */
332 rc = segment_load(name, SEGMENT_SHARED,
333 &(*seg_info)->start, &(*seg_info)->end);
335 segment_warning(rc, (*seg_info)->segment_name);
338 INIT_LIST_HEAD(&(*seg_info)->lh);
339 (*seg_info)->segment_type = rc;
345 * device attribute for switching shared/nonshared (exclusive)
346 * operation (show + store)
349 dcssblk_shared_show(struct device *dev, struct device_attribute *attr, char *buf)
351 struct dcssblk_dev_info *dev_info;
353 dev_info = container_of(dev, struct dcssblk_dev_info, dev);
354 return sprintf(buf, dev_info->is_shared ? "1\n" : "0\n");
358 dcssblk_shared_store(struct device *dev, struct device_attribute *attr, const char *inbuf, size_t count)
360 struct dcssblk_dev_info *dev_info;
361 struct segment_info *entry, *temp;
364 if ((count > 1) && (inbuf[1] != '\n') && (inbuf[1] != '\0'))
366 down_write(&dcssblk_devices_sem);
367 dev_info = container_of(dev, struct dcssblk_dev_info, dev);
368 if (atomic_read(&dev_info->use_count)) {
372 if (inbuf[0] == '1') {
373 /* reload segments in shared mode */
374 list_for_each_entry(entry, &dev_info->seg_list, lh) {
375 rc = segment_modify_shared(entry->segment_name,
378 BUG_ON(rc == -EINVAL);
383 dev_info->is_shared = 1;
384 switch (dev_info->segment_type) {
388 set_disk_ro(dev_info->gd, 1);
390 } else if (inbuf[0] == '0') {
391 /* reload segments in exclusive mode */
392 if (dev_info->segment_type == SEG_TYPE_SC) {
393 pr_err("DCSS %s is of type SC and cannot be "
394 "loaded as exclusive-writable\n",
395 dev_info->segment_name);
399 list_for_each_entry(entry, &dev_info->seg_list, lh) {
400 rc = segment_modify_shared(entry->segment_name,
403 BUG_ON(rc == -EINVAL);
408 dev_info->is_shared = 0;
409 set_disk_ro(dev_info->gd, 0);
418 pr_err("DCSS device %s is removed after a failed access mode "
419 "change\n", dev_info->segment_name);
421 list_for_each_entry(entry, &dev_info->seg_list, lh) {
423 segment_unload(entry->segment_name);
425 list_del(&dev_info->lh);
427 kill_dax(dev_info->dax_dev);
428 put_dax(dev_info->dax_dev);
429 del_gendisk(dev_info->gd);
430 blk_cleanup_disk(dev_info->gd);
431 up_write(&dcssblk_devices_sem);
433 if (device_remove_file_self(dev, attr)) {
434 device_unregister(dev);
439 up_write(&dcssblk_devices_sem);
442 static DEVICE_ATTR(shared, S_IWUSR | S_IRUSR, dcssblk_shared_show,
443 dcssblk_shared_store);
446 * device attribute for save operation on current copy
447 * of the segment. If the segment is busy, saving will
448 * become pending until it gets released, which can be
449 * undone by storing a non-true value to this entry.
453 dcssblk_save_show(struct device *dev, struct device_attribute *attr, char *buf)
455 struct dcssblk_dev_info *dev_info;
457 dev_info = container_of(dev, struct dcssblk_dev_info, dev);
458 return sprintf(buf, dev_info->save_pending ? "1\n" : "0\n");
462 dcssblk_save_store(struct device *dev, struct device_attribute *attr, const char *inbuf, size_t count)
464 struct dcssblk_dev_info *dev_info;
465 struct segment_info *entry;
467 if ((count > 1) && (inbuf[1] != '\n') && (inbuf[1] != '\0'))
469 dev_info = container_of(dev, struct dcssblk_dev_info, dev);
471 down_write(&dcssblk_devices_sem);
472 if (inbuf[0] == '1') {
473 if (atomic_read(&dev_info->use_count) == 0) {
474 // device is idle => we save immediately
475 pr_info("All DCSSs that map to device %s are "
476 "saved\n", dev_info->segment_name);
477 list_for_each_entry(entry, &dev_info->seg_list, lh) {
478 if (entry->segment_type == SEG_TYPE_EN ||
479 entry->segment_type == SEG_TYPE_SN)
480 pr_warn("DCSS %s is of type SN or EN"
481 " and cannot be saved\n",
482 entry->segment_name);
484 segment_save(entry->segment_name);
487 // device is busy => we save it when it becomes
488 // idle in dcssblk_release
489 pr_info("Device %s is in use, its DCSSs will be "
490 "saved when it becomes idle\n",
491 dev_info->segment_name);
492 dev_info->save_pending = 1;
494 } else if (inbuf[0] == '0') {
495 if (dev_info->save_pending) {
496 // device is busy & the user wants to undo his save
498 dev_info->save_pending = 0;
499 pr_info("A pending save request for device %s "
500 "has been canceled\n",
501 dev_info->segment_name);
504 up_write(&dcssblk_devices_sem);
507 up_write(&dcssblk_devices_sem);
510 static DEVICE_ATTR(save, S_IWUSR | S_IRUSR, dcssblk_save_show,
514 * device attribute for showing all segments in a device
517 dcssblk_seglist_show(struct device *dev, struct device_attribute *attr,
522 struct dcssblk_dev_info *dev_info;
523 struct segment_info *entry;
525 down_read(&dcssblk_devices_sem);
526 dev_info = container_of(dev, struct dcssblk_dev_info, dev);
529 list_for_each_entry(entry, &dev_info->seg_list, lh) {
530 strcpy(&buf[i], entry->segment_name);
531 i += strlen(entry->segment_name);
535 up_read(&dcssblk_devices_sem);
538 static DEVICE_ATTR(seglist, S_IRUSR, dcssblk_seglist_show, NULL);
540 static struct attribute *dcssblk_dev_attrs[] = {
541 &dev_attr_shared.attr,
543 &dev_attr_seglist.attr,
546 static struct attribute_group dcssblk_dev_attr_group = {
547 .attrs = dcssblk_dev_attrs,
549 static const struct attribute_group *dcssblk_dev_attr_groups[] = {
550 &dcssblk_dev_attr_group,
555 * device attribute for adding devices
558 dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
560 int rc, i, j, num_of_segments;
561 struct dcssblk_dev_info *dev_info;
562 struct segment_info *seg_info, *temp;
564 unsigned long seg_byte_size;
568 if (dev != dcssblk_root_dev) {
572 if ((count < 1) || (buf[0] == '\0') || (buf[0] == '\n')) {
577 local_buf = kmalloc(count + 1, GFP_KERNEL);
578 if (local_buf == NULL) {
587 for (i = 0; (i < count && (buf[i] != '\0') && (buf[i] != '\n')); i++) {
588 for (j = i; j < count &&
591 (buf[j] != '\n'); j++) {
592 local_buf[j-i] = toupper(buf[j]);
594 local_buf[j-i] = '\0';
595 if (((j - i) == 0) || ((j - i) > 8)) {
600 rc = dcssblk_load_segment(local_buf, &seg_info);
604 * get a struct dcssblk_dev_info
606 if (num_of_segments == 0) {
607 dev_info = kzalloc(sizeof(struct dcssblk_dev_info),
609 if (dev_info == NULL) {
613 strcpy(dev_info->segment_name, local_buf);
614 dev_info->segment_type = seg_info->segment_type;
615 INIT_LIST_HEAD(&dev_info->seg_list);
617 list_add_tail(&seg_info->lh, &dev_info->seg_list);
621 if ((buf[j] == '\0') || (buf[j] == '\n'))
625 /* no trailing colon at the end of the input */
626 if ((i > 0) && (buf[i-1] == ':')) {
630 strlcpy(local_buf, buf, i + 1);
631 dev_info->num_of_segments = num_of_segments;
632 rc = dcssblk_is_continuous(dev_info);
636 dev_info->start = dcssblk_find_lowest_addr(dev_info);
637 dev_info->end = dcssblk_find_highest_addr(dev_info);
639 dev_set_name(&dev_info->dev, "%s", dev_info->segment_name);
640 dev_info->dev.release = dcssblk_release_segment;
641 dev_info->dev.groups = dcssblk_dev_attr_groups;
642 INIT_LIST_HEAD(&dev_info->lh);
643 dev_info->gd = blk_alloc_disk(NUMA_NO_NODE);
644 if (dev_info->gd == NULL) {
648 dev_info->gd->major = dcssblk_major;
649 dev_info->gd->minors = DCSSBLK_MINORS_PER_DISK;
650 dev_info->gd->fops = &dcssblk_devops;
651 dev_info->gd->private_data = dev_info;
652 blk_queue_logical_block_size(dev_info->gd->queue, 4096);
653 blk_queue_flag_set(QUEUE_FLAG_DAX, dev_info->gd->queue);
655 seg_byte_size = (dev_info->end - dev_info->start + 1);
656 set_capacity(dev_info->gd, seg_byte_size >> 9); // size in sectors
657 pr_info("Loaded %s with total size %lu bytes and capacity %lu "
658 "sectors\n", local_buf, seg_byte_size, seg_byte_size >> 9);
660 dev_info->save_pending = 0;
661 dev_info->is_shared = 1;
662 dev_info->dev.parent = dcssblk_root_dev;
665 *get minor, add to list
667 down_write(&dcssblk_devices_sem);
668 if (dcssblk_get_segment_by_name(local_buf)) {
672 rc = dcssblk_assign_free_minor(dev_info);
675 sprintf(dev_info->gd->disk_name, "dcssblk%d",
676 dev_info->gd->first_minor);
677 list_add_tail(&dev_info->lh, &dcssblk_devices);
679 if (!try_module_get(THIS_MODULE)) {
684 * register the device
686 rc = device_register(&dev_info->dev);
690 dev_info->dax_dev = alloc_dax(dev_info, dev_info->gd->disk_name,
691 &dcssblk_dax_ops, DAXDEV_F_SYNC);
692 if (IS_ERR(dev_info->dax_dev)) {
693 rc = PTR_ERR(dev_info->dax_dev);
694 dev_info->dax_dev = NULL;
698 get_device(&dev_info->dev);
699 rc = device_add_disk(&dev_info->dev, dev_info->gd, NULL);
703 switch (dev_info->segment_type) {
707 set_disk_ro(dev_info->gd,1);
710 set_disk_ro(dev_info->gd,0);
713 up_write(&dcssblk_devices_sem);
718 put_device(&dev_info->dev);
719 kill_dax(dev_info->dax_dev);
720 put_dax(dev_info->dax_dev);
722 list_del(&dev_info->lh);
723 blk_cleanup_disk(dev_info->gd);
724 list_for_each_entry(seg_info, &dev_info->seg_list, lh) {
725 segment_unload(seg_info->segment_name);
727 put_device(&dev_info->dev);
728 up_write(&dcssblk_devices_sem);
731 list_del(&dev_info->lh);
733 blk_cleanup_disk(dev_info->gd);
734 up_write(&dcssblk_devices_sem);
736 if (dev_info == NULL)
738 list_for_each_entry_safe(seg_info, temp, &dev_info->seg_list, lh) {
739 list_del(&seg_info->lh);
740 segment_unload(seg_info->segment_name);
751 * device attribute for removing devices
754 dcssblk_remove_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
756 struct dcssblk_dev_info *dev_info;
757 struct segment_info *entry;
761 if (dev != dcssblk_root_dev) {
764 local_buf = kmalloc(count + 1, GFP_KERNEL);
765 if (local_buf == NULL) {
771 for (i = 0; (i < count && (*(buf+i)!='\0') && (*(buf+i)!='\n')); i++) {
772 local_buf[i] = toupper(buf[i]);
775 if ((i == 0) || (i > 8)) {
780 down_write(&dcssblk_devices_sem);
781 dev_info = dcssblk_get_device_by_name(local_buf);
782 if (dev_info == NULL) {
783 up_write(&dcssblk_devices_sem);
784 pr_warn("Device %s cannot be removed because it is not a known device\n",
789 if (atomic_read(&dev_info->use_count) != 0) {
790 up_write(&dcssblk_devices_sem);
791 pr_warn("Device %s cannot be removed while it is in use\n",
797 list_del(&dev_info->lh);
798 kill_dax(dev_info->dax_dev);
799 put_dax(dev_info->dax_dev);
800 del_gendisk(dev_info->gd);
801 blk_cleanup_disk(dev_info->gd);
803 /* unload all related segments */
804 list_for_each_entry(entry, &dev_info->seg_list, lh)
805 segment_unload(entry->segment_name);
807 up_write(&dcssblk_devices_sem);
809 device_unregister(&dev_info->dev);
810 put_device(&dev_info->dev);
819 dcssblk_open(struct block_device *bdev, fmode_t mode)
821 struct dcssblk_dev_info *dev_info;
824 dev_info = bdev->bd_disk->private_data;
825 if (NULL == dev_info) {
829 atomic_inc(&dev_info->use_count);
836 dcssblk_release(struct gendisk *disk, fmode_t mode)
838 struct dcssblk_dev_info *dev_info = disk->private_data;
839 struct segment_info *entry;
845 down_write(&dcssblk_devices_sem);
846 if (atomic_dec_and_test(&dev_info->use_count)
847 && (dev_info->save_pending)) {
848 pr_info("Device %s has become idle and is being saved "
849 "now\n", dev_info->segment_name);
850 list_for_each_entry(entry, &dev_info->seg_list, lh) {
851 if (entry->segment_type == SEG_TYPE_EN ||
852 entry->segment_type == SEG_TYPE_SN)
853 pr_warn("DCSS %s is of type SN or EN and cannot"
854 " be saved\n", entry->segment_name);
856 segment_save(entry->segment_name);
858 dev_info->save_pending = 0;
860 up_write(&dcssblk_devices_sem);
864 dcssblk_submit_bio(struct bio *bio)
866 struct dcssblk_dev_info *dev_info;
868 struct bvec_iter iter;
870 unsigned long page_addr;
871 unsigned long source_addr;
872 unsigned long bytes_done;
874 blk_queue_split(&bio);
877 dev_info = bio->bi_bdev->bd_disk->private_data;
878 if (dev_info == NULL)
880 if ((bio->bi_iter.bi_sector & 7) != 0 ||
881 (bio->bi_iter.bi_size & 4095) != 0)
882 /* Request is not page-aligned. */
884 /* verify data transfer direction */
885 if (dev_info->is_shared) {
886 switch (dev_info->segment_type) {
890 /* cannot write to these segments */
891 if (bio_data_dir(bio) == WRITE) {
892 pr_warn("Writing to %s failed because it is a read-only device\n",
893 dev_name(&dev_info->dev));
899 index = (bio->bi_iter.bi_sector >> 3);
900 bio_for_each_segment(bvec, bio, iter) {
901 page_addr = (unsigned long)bvec_virt(&bvec);
902 source_addr = dev_info->start + (index<<12) + bytes_done;
903 if (unlikely((page_addr & 4095) != 0) || (bvec.bv_len & 4095) != 0)
906 if (bio_data_dir(bio) == READ) {
907 memcpy((void*)page_addr, (void*)source_addr,
910 memcpy((void*)source_addr, (void*)page_addr,
913 bytes_done += bvec.bv_len;
922 __dcssblk_direct_access(struct dcssblk_dev_info *dev_info, pgoff_t pgoff,
923 long nr_pages, void **kaddr, pfn_t *pfn)
925 resource_size_t offset = pgoff * PAGE_SIZE;
926 unsigned long dev_sz;
928 dev_sz = dev_info->end - dev_info->start + 1;
930 *kaddr = (void *) dev_info->start + offset;
932 *pfn = __pfn_to_pfn_t(PFN_DOWN(dev_info->start + offset),
933 PFN_DEV|PFN_SPECIAL);
935 return (dev_sz - offset) / PAGE_SIZE;
939 dcssblk_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
940 long nr_pages, void **kaddr, pfn_t *pfn)
942 struct dcssblk_dev_info *dev_info = dax_get_private(dax_dev);
944 return __dcssblk_direct_access(dev_info, pgoff, nr_pages, kaddr, pfn);
948 dcssblk_check_params(void)
951 char buf[DCSSBLK_PARM_LEN + 1];
952 struct dcssblk_dev_info *dev_info;
954 for (i = 0; (i < DCSSBLK_PARM_LEN) && (dcssblk_segments[i] != '\0');
956 for (j = i; (j < DCSSBLK_PARM_LEN) &&
957 (dcssblk_segments[j] != ',') &&
958 (dcssblk_segments[j] != '\0') &&
959 (dcssblk_segments[j] != '('); j++)
961 buf[j-i] = dcssblk_segments[j];
964 rc = dcssblk_add_store(dcssblk_root_dev, NULL, buf, j-i);
965 if ((rc >= 0) && (dcssblk_segments[j] == '(')) {
966 for (k = 0; (buf[k] != ':') && (buf[k] != '\0'); k++)
967 buf[k] = toupper(buf[k]);
969 if (!strncmp(&dcssblk_segments[j], "(local)", 7)) {
970 down_read(&dcssblk_devices_sem);
971 dev_info = dcssblk_get_device_by_name(buf);
972 up_read(&dcssblk_devices_sem);
974 dcssblk_shared_store(&dev_info->dev,
978 while ((dcssblk_segments[j] != ',') &&
979 (dcssblk_segments[j] != '\0'))
983 if (dcssblk_segments[j] == '\0')
990 * The init/exit functions.
995 root_device_unregister(dcssblk_root_dev);
996 unregister_blkdev(dcssblk_major, DCSSBLK_NAME);
1004 dcssblk_root_dev = root_device_register("dcssblk");
1005 if (IS_ERR(dcssblk_root_dev))
1006 return PTR_ERR(dcssblk_root_dev);
1007 rc = device_create_file(dcssblk_root_dev, &dev_attr_add);
1010 rc = device_create_file(dcssblk_root_dev, &dev_attr_remove);
1013 rc = register_blkdev(0, DCSSBLK_NAME);
1017 init_rwsem(&dcssblk_devices_sem);
1019 dcssblk_check_params();
1023 root_device_unregister(dcssblk_root_dev);
1028 module_init(dcssblk_init);
1029 module_exit(dcssblk_exit);
1031 module_param_string(segments, dcssblk_segments, DCSSBLK_PARM_LEN, 0444);
1032 MODULE_PARM_DESC(segments, "Name of DCSS segment(s) to be loaded, "
1033 "comma-separated list, names in each set separated "
1034 "by commas are separated by colons, each set contains "
1035 "names of contiguous segments and each name max. 8 chars.\n"
1036 "Adding \"(local)\" to the end of each set equals echoing 0 "
1037 "to /sys/devices/dcssblk/<device name>/shared after loading "
1038 "the contiguous segments - \n"
1039 "e.g. segments=\"mydcss1,mydcss2:mydcss3,mydcss4(local)\"");
1041 MODULE_LICENSE("GPL");