1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*******************************************************************************
3 * Filename: target_core_iblock.c
5 * This file contains the Storage Engine <-> Linux BlockIO transport
8 * (c) Copyright 2003-2013 Datera, Inc.
10 * Nicholas A. Bellinger <nab@kernel.org>
12 ******************************************************************************/
14 #include <linux/string.h>
15 #include <linux/parser.h>
16 #include <linux/timer.h>
18 #include <linux/blkdev.h>
19 #include <linux/slab.h>
20 #include <linux/spinlock.h>
21 #include <linux/bio.h>
22 #include <linux/genhd.h>
23 #include <linux/file.h>
24 #include <linux/module.h>
25 #include <scsi/scsi_proto.h>
26 #include <asm/unaligned.h>
28 #include <target/target_core_base.h>
29 #include <target/target_core_backend.h>
31 #include "target_core_iblock.h"
33 #define IBLOCK_MAX_BIO_PER_TASK 32 /* max # of bios to submit at a time */
34 #define IBLOCK_BIO_POOL_SIZE 128
36 static inline struct iblock_dev *IBLOCK_DEV(struct se_device *dev)
38 return container_of(dev, struct iblock_dev, dev);
42 static int iblock_attach_hba(struct se_hba *hba, u32 host_id)
44 pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
45 " Generic Target Core Stack %s\n", hba->hba_id,
46 IBLOCK_VERSION, TARGET_CORE_VERSION);
50 static void iblock_detach_hba(struct se_hba *hba)
54 static struct se_device *iblock_alloc_device(struct se_hba *hba, const char *name)
56 struct iblock_dev *ib_dev = NULL;
58 ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL);
60 pr_err("Unable to allocate struct iblock_dev\n");
64 pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name);
69 static int iblock_configure_device(struct se_device *dev)
71 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
72 struct request_queue *q;
73 struct block_device *bd = NULL;
74 struct blk_integrity *bi;
76 unsigned int max_write_zeroes_sectors;
79 if (!(ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)) {
80 pr_err("Missing udev_path= parameters for IBLOCK\n");
84 ret = bioset_init(&ib_dev->ibd_bio_set, IBLOCK_BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
86 pr_err("IBLOCK: Unable to create bioset\n");
90 pr_debug( "IBLOCK: Claiming struct block_device: %s\n",
91 ib_dev->ibd_udev_path);
93 mode = FMODE_READ|FMODE_EXCL;
94 if (!ib_dev->ibd_readonly)
97 dev->dev_flags |= DF_READ_ONLY;
99 bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev);
102 goto out_free_bioset;
106 q = bdev_get_queue(bd);
108 dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd);
109 dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
110 dev->dev_attrib.hw_queue_depth = q->nr_requests;
112 if (target_configure_unmap_from_queue(&dev->dev_attrib, q))
113 pr_debug("IBLOCK: BLOCK Discard support available,"
114 " disabled by default\n");
117 * Enable write same emulation for IBLOCK and use 0xFFFF as
118 * the smaller WRITE_SAME(10) only has a two-byte block count.
120 max_write_zeroes_sectors = bdev_write_zeroes_sectors(bd);
121 if (max_write_zeroes_sectors)
122 dev->dev_attrib.max_write_same_len = max_write_zeroes_sectors;
124 dev->dev_attrib.max_write_same_len = 0xFFFF;
126 if (blk_queue_nonrot(q))
127 dev->dev_attrib.is_nonrot = 1;
129 bi = bdev_get_integrity(bd);
131 struct bio_set *bs = &ib_dev->ibd_bio_set;
133 if (!strcmp(bi->profile->name, "T10-DIF-TYPE3-IP") ||
134 !strcmp(bi->profile->name, "T10-DIF-TYPE1-IP")) {
135 pr_err("IBLOCK export of blk_integrity: %s not"
136 " supported\n", bi->profile->name);
141 if (!strcmp(bi->profile->name, "T10-DIF-TYPE3-CRC")) {
142 dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE3_PROT;
143 } else if (!strcmp(bi->profile->name, "T10-DIF-TYPE1-CRC")) {
144 dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE1_PROT;
147 if (dev->dev_attrib.pi_prot_type) {
148 if (bioset_integrity_create(bs, IBLOCK_BIO_POOL_SIZE) < 0) {
149 pr_err("Unable to allocate bioset for PI\n");
153 pr_debug("IBLOCK setup BIP bs->bio_integrity_pool: %p\n",
154 &bs->bio_integrity_pool);
156 dev->dev_attrib.hw_pi_prot_type = dev->dev_attrib.pi_prot_type;
162 blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
164 bioset_exit(&ib_dev->ibd_bio_set);
169 static void iblock_dev_call_rcu(struct rcu_head *p)
171 struct se_device *dev = container_of(p, struct se_device, rcu_head);
172 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
177 static void iblock_free_device(struct se_device *dev)
179 call_rcu(&dev->rcu_head, iblock_dev_call_rcu);
182 static void iblock_destroy_device(struct se_device *dev)
184 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
186 if (ib_dev->ibd_bd != NULL)
187 blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
188 bioset_exit(&ib_dev->ibd_bio_set);
191 static unsigned long long iblock_emulate_read_cap_with_block_size(
192 struct se_device *dev,
193 struct block_device *bd,
194 struct request_queue *q)
196 unsigned long long blocks_long = (div_u64(i_size_read(bd->bd_inode),
197 bdev_logical_block_size(bd)) - 1);
198 u32 block_size = bdev_logical_block_size(bd);
200 if (block_size == dev->dev_attrib.block_size)
203 switch (block_size) {
205 switch (dev->dev_attrib.block_size) {
220 switch (dev->dev_attrib.block_size) {
235 switch (dev->dev_attrib.block_size) {
250 switch (dev->dev_attrib.block_size) {
271 static void iblock_complete_cmd(struct se_cmd *cmd)
273 struct iblock_req *ibr = cmd->priv;
276 if (!refcount_dec_and_test(&ibr->pending))
279 if (atomic_read(&ibr->ib_bio_err_cnt))
280 status = SAM_STAT_CHECK_CONDITION;
282 status = SAM_STAT_GOOD;
284 target_complete_cmd(cmd, status);
288 static void iblock_bio_done(struct bio *bio)
290 struct se_cmd *cmd = bio->bi_private;
291 struct iblock_req *ibr = cmd->priv;
293 if (bio->bi_status) {
294 pr_err("bio error: %p, err: %d\n", bio, bio->bi_status);
296 * Bump the ib_bio_err_cnt and release bio.
298 atomic_inc(&ibr->ib_bio_err_cnt);
299 smp_mb__after_atomic();
304 iblock_complete_cmd(cmd);
308 iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num, int op,
311 struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
315 * Only allocate as many vector entries as the bio code allows us to,
316 * we'll loop later on until we have handled the whole request.
318 bio = bio_alloc_bioset(GFP_NOIO, bio_max_segs(sg_num),
319 &ib_dev->ibd_bio_set);
321 pr_err("Unable to allocate memory for bio\n");
325 bio_set_dev(bio, ib_dev->ibd_bd);
326 bio->bi_private = cmd;
327 bio->bi_end_io = &iblock_bio_done;
328 bio->bi_iter.bi_sector = lba;
329 bio_set_op_attrs(bio, op, op_flags);
334 static void iblock_submit_bios(struct bio_list *list)
336 struct blk_plug plug;
339 blk_start_plug(&plug);
340 while ((bio = bio_list_pop(list)))
342 blk_finish_plug(&plug);
345 static void iblock_end_io_flush(struct bio *bio)
347 struct se_cmd *cmd = bio->bi_private;
350 pr_err("IBLOCK: cache flush failed: %d\n", bio->bi_status);
354 target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
356 target_complete_cmd(cmd, SAM_STAT_GOOD);
363 * Implement SYCHRONIZE CACHE. Note that we can't handle lba ranges and must
364 * always flush the whole cache.
366 static sense_reason_t
367 iblock_execute_sync_cache(struct se_cmd *cmd)
369 struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
370 int immed = (cmd->t_task_cdb[1] & 0x2);
374 * If the Immediate bit is set, queue up the GOOD response
375 * for this SYNCHRONIZE_CACHE op.
378 target_complete_cmd(cmd, SAM_STAT_GOOD);
380 bio = bio_alloc(GFP_KERNEL, 0);
381 bio->bi_end_io = iblock_end_io_flush;
382 bio_set_dev(bio, ib_dev->ibd_bd);
383 bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
385 bio->bi_private = cmd;
390 static sense_reason_t
391 iblock_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
393 struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
394 struct se_device *dev = cmd->se_dev;
397 ret = blkdev_issue_discard(bdev,
398 target_to_linux_sector(dev, lba),
399 target_to_linux_sector(dev, nolb),
402 pr_err("blkdev_issue_discard() failed: %d\n", ret);
403 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
409 static sense_reason_t
410 iblock_execute_zero_out(struct block_device *bdev, struct se_cmd *cmd)
412 struct se_device *dev = cmd->se_dev;
413 struct scatterlist *sg = &cmd->t_data_sg[0];
414 unsigned char *buf, *not_zero;
417 buf = kmap(sg_page(sg)) + sg->offset;
419 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
421 * Fall back to block_execute_write_same() slow-path if
422 * incoming WRITE_SAME payload does not contain zeros.
424 not_zero = memchr_inv(buf, 0x00, cmd->data_length);
428 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
430 ret = blkdev_issue_zeroout(bdev,
431 target_to_linux_sector(dev, cmd->t_task_lba),
432 target_to_linux_sector(dev,
433 sbc_get_write_same_sectors(cmd)),
434 GFP_KERNEL, BLKDEV_ZERO_NOUNMAP);
436 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
438 target_complete_cmd(cmd, GOOD);
442 static sense_reason_t
443 iblock_execute_write_same(struct se_cmd *cmd)
445 struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
446 struct iblock_req *ibr;
447 struct scatterlist *sg;
449 struct bio_list list;
450 struct se_device *dev = cmd->se_dev;
451 sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba);
452 sector_t sectors = target_to_linux_sector(dev,
453 sbc_get_write_same_sectors(cmd));
456 pr_err("WRITE_SAME: Protection information with IBLOCK"
457 " backends not supported\n");
458 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
460 sg = &cmd->t_data_sg[0];
462 if (cmd->t_data_nents > 1 ||
463 sg->length != cmd->se_dev->dev_attrib.block_size) {
464 pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u"
465 " block_size: %u\n", cmd->t_data_nents, sg->length,
466 cmd->se_dev->dev_attrib.block_size);
467 return TCM_INVALID_CDB_FIELD;
470 if (bdev_write_zeroes_sectors(bdev)) {
471 if (!iblock_execute_zero_out(bdev, cmd))
475 ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
480 bio = iblock_get_bio(cmd, block_lba, 1, REQ_OP_WRITE, 0);
484 bio_list_init(&list);
485 bio_list_add(&list, bio);
487 refcount_set(&ibr->pending, 1);
490 while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
493 bio = iblock_get_bio(cmd, block_lba, 1, REQ_OP_WRITE,
498 refcount_inc(&ibr->pending);
499 bio_list_add(&list, bio);
502 /* Always in 512 byte units for Linux/Block */
503 block_lba += sg->length >> SECTOR_SHIFT;
504 sectors -= sg->length >> SECTOR_SHIFT;
507 iblock_submit_bios(&list);
511 while ((bio = bio_list_pop(&list)))
516 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
520 Opt_udev_path, Opt_readonly, Opt_force, Opt_err
523 static match_table_t tokens = {
524 {Opt_udev_path, "udev_path=%s"},
525 {Opt_readonly, "readonly=%d"},
526 {Opt_force, "force=%d"},
530 static ssize_t iblock_set_configfs_dev_params(struct se_device *dev,
531 const char *page, ssize_t count)
533 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
534 char *orig, *ptr, *arg_p, *opts;
535 substring_t args[MAX_OPT_ARGS];
537 unsigned long tmp_readonly;
539 opts = kstrdup(page, GFP_KERNEL);
545 while ((ptr = strsep(&opts, ",\n")) != NULL) {
549 token = match_token(ptr, tokens, args);
552 if (ib_dev->ibd_bd) {
553 pr_err("Unable to set udev_path= while"
554 " ib_dev->ibd_bd exists\n");
558 if (match_strlcpy(ib_dev->ibd_udev_path, &args[0],
559 SE_UDEV_PATH_LEN) == 0) {
563 pr_debug("IBLOCK: Referencing UDEV path: %s\n",
564 ib_dev->ibd_udev_path);
565 ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH;
568 arg_p = match_strdup(&args[0]);
573 ret = kstrtoul(arg_p, 0, &tmp_readonly);
576 pr_err("kstrtoul() failed for"
580 ib_dev->ibd_readonly = tmp_readonly;
581 pr_debug("IBLOCK: readonly: %d\n", ib_dev->ibd_readonly);
592 return (!ret) ? count : ret;
595 static ssize_t iblock_show_configfs_dev_params(struct se_device *dev, char *b)
597 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
598 struct block_device *bd = ib_dev->ibd_bd;
599 char buf[BDEVNAME_SIZE];
603 bl += sprintf(b + bl, "iBlock device: %s",
605 if (ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)
606 bl += sprintf(b + bl, " UDEV PATH: %s",
607 ib_dev->ibd_udev_path);
608 bl += sprintf(b + bl, " readonly: %d\n", ib_dev->ibd_readonly);
610 bl += sprintf(b + bl, " ");
612 bl += sprintf(b + bl, "Major: %d Minor: %d %s\n",
613 MAJOR(bd->bd_dev), MINOR(bd->bd_dev),
616 bl += sprintf(b + bl, "Major: 0 Minor: 0\n");
623 iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio,
624 struct sg_mapping_iter *miter)
626 struct se_device *dev = cmd->se_dev;
627 struct blk_integrity *bi;
628 struct bio_integrity_payload *bip;
629 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
633 bi = bdev_get_integrity(ib_dev->ibd_bd);
635 pr_err("Unable to locate bio_integrity\n");
639 bip = bio_integrity_alloc(bio, GFP_NOIO, bio_max_segs(cmd->t_prot_nents));
641 pr_err("Unable to allocate bio_integrity_payload\n");
645 bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio));
646 /* virtual start sector must be in integrity interval units */
647 bip_set_seed(bip, bio->bi_iter.bi_sector >>
648 (bi->interval_exp - SECTOR_SHIFT));
650 pr_debug("IBLOCK BIP Size: %u Sector: %llu\n", bip->bip_iter.bi_size,
651 (unsigned long long)bip->bip_iter.bi_sector);
653 resid = bip->bip_iter.bi_size;
654 while (resid > 0 && sg_miter_next(miter)) {
656 len = min_t(size_t, miter->length, resid);
657 rc = bio_integrity_add_page(bio, miter->page, len,
658 offset_in_page(miter->addr));
660 pr_err("bio_integrity_add_page() failed; %d\n", rc);
661 sg_miter_stop(miter);
665 pr_debug("Added bio integrity page: %p length: %zu offset: %lu\n",
666 miter->page, len, offset_in_page(miter->addr));
669 if (len < miter->length)
670 miter->consumed -= miter->length - len;
672 sg_miter_stop(miter);
677 static sense_reason_t
678 iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
679 enum dma_data_direction data_direction)
681 struct se_device *dev = cmd->se_dev;
682 sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba);
683 struct iblock_req *ibr;
685 struct bio_list list;
686 struct scatterlist *sg;
687 u32 sg_num = sgl_nents;
689 int i, rc, op, op_flags = 0;
690 struct sg_mapping_iter prot_miter;
692 if (data_direction == DMA_TO_DEVICE) {
693 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
694 struct request_queue *q = bdev_get_queue(ib_dev->ibd_bd);
696 * Force writethrough using REQ_FUA if a volatile write cache
697 * is not enabled, or if initiator set the Force Unit Access bit.
700 if (test_bit(QUEUE_FLAG_FUA, &q->queue_flags)) {
701 if (cmd->se_cmd_flags & SCF_FUA)
703 else if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags))
710 ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
716 refcount_set(&ibr->pending, 1);
717 iblock_complete_cmd(cmd);
721 bio = iblock_get_bio(cmd, block_lba, sgl_nents, op, op_flags);
725 bio_list_init(&list);
726 bio_list_add(&list, bio);
728 refcount_set(&ibr->pending, 2);
731 if (cmd->prot_type && dev->dev_attrib.pi_prot_type)
732 sg_miter_start(&prot_miter, cmd->t_prot_sg, cmd->t_prot_nents,
733 op == REQ_OP_READ ? SG_MITER_FROM_SG :
736 for_each_sg(sgl, sg, sgl_nents, i) {
738 * XXX: if the length the device accepts is shorter than the
739 * length of the S/G list entry this will cause and
740 * endless loop. Better hope no driver uses huge pages.
742 while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
744 if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
745 rc = iblock_alloc_bip(cmd, bio, &prot_miter);
750 if (bio_cnt >= IBLOCK_MAX_BIO_PER_TASK) {
751 iblock_submit_bios(&list);
755 bio = iblock_get_bio(cmd, block_lba, sg_num, op,
760 refcount_inc(&ibr->pending);
761 bio_list_add(&list, bio);
765 /* Always in 512 byte units for Linux/Block */
766 block_lba += sg->length >> SECTOR_SHIFT;
770 if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
771 rc = iblock_alloc_bip(cmd, bio, &prot_miter);
776 iblock_submit_bios(&list);
777 iblock_complete_cmd(cmd);
781 while ((bio = bio_list_pop(&list)))
786 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
789 static sector_t iblock_get_blocks(struct se_device *dev)
791 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
792 struct block_device *bd = ib_dev->ibd_bd;
793 struct request_queue *q = bdev_get_queue(bd);
795 return iblock_emulate_read_cap_with_block_size(dev, bd, q);
798 static sector_t iblock_get_alignment_offset_lbas(struct se_device *dev)
800 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
801 struct block_device *bd = ib_dev->ibd_bd;
804 ret = bdev_alignment_offset(bd);
808 /* convert offset-bytes to offset-lbas */
809 return ret / bdev_logical_block_size(bd);
812 static unsigned int iblock_get_lbppbe(struct se_device *dev)
814 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
815 struct block_device *bd = ib_dev->ibd_bd;
816 int logs_per_phys = bdev_physical_block_size(bd) / bdev_logical_block_size(bd);
818 return ilog2(logs_per_phys);
821 static unsigned int iblock_get_io_min(struct se_device *dev)
823 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
824 struct block_device *bd = ib_dev->ibd_bd;
826 return bdev_io_min(bd);
829 static unsigned int iblock_get_io_opt(struct se_device *dev)
831 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
832 struct block_device *bd = ib_dev->ibd_bd;
834 return bdev_io_opt(bd);
837 static struct sbc_ops iblock_sbc_ops = {
838 .execute_rw = iblock_execute_rw,
839 .execute_sync_cache = iblock_execute_sync_cache,
840 .execute_write_same = iblock_execute_write_same,
841 .execute_unmap = iblock_execute_unmap,
844 static sense_reason_t
845 iblock_parse_cdb(struct se_cmd *cmd)
847 return sbc_parse_cdb(cmd, &iblock_sbc_ops);
850 static bool iblock_get_write_cache(struct se_device *dev)
852 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
853 struct block_device *bd = ib_dev->ibd_bd;
854 struct request_queue *q = bdev_get_queue(bd);
856 return test_bit(QUEUE_FLAG_WC, &q->queue_flags);
859 static const struct target_backend_ops iblock_ops = {
861 .inquiry_prod = "IBLOCK",
862 .inquiry_rev = IBLOCK_VERSION,
863 .owner = THIS_MODULE,
864 .attach_hba = iblock_attach_hba,
865 .detach_hba = iblock_detach_hba,
866 .alloc_device = iblock_alloc_device,
867 .configure_device = iblock_configure_device,
868 .destroy_device = iblock_destroy_device,
869 .free_device = iblock_free_device,
870 .parse_cdb = iblock_parse_cdb,
871 .set_configfs_dev_params = iblock_set_configfs_dev_params,
872 .show_configfs_dev_params = iblock_show_configfs_dev_params,
873 .get_device_type = sbc_get_device_type,
874 .get_blocks = iblock_get_blocks,
875 .get_alignment_offset_lbas = iblock_get_alignment_offset_lbas,
876 .get_lbppbe = iblock_get_lbppbe,
877 .get_io_min = iblock_get_io_min,
878 .get_io_opt = iblock_get_io_opt,
879 .get_write_cache = iblock_get_write_cache,
880 .tb_dev_attrib_attrs = sbc_attrib_attrs,
883 static int __init iblock_module_init(void)
885 return transport_backend_register(&iblock_ops);
888 static void __exit iblock_module_exit(void)
890 target_backend_unregister(&iblock_ops);
893 MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin");
894 MODULE_AUTHOR("nab@Linux-iSCSI.org");
895 MODULE_LICENSE("GPL");
897 module_init(iblock_module_init);
898 module_exit(iblock_module_exit);