block: Add bio_max_segs
[linux-2.6-microblaze.git] / drivers / target / target_core_iblock.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*******************************************************************************
3  * Filename:  target_core_iblock.c
4  *
5  * This file contains the Storage Engine  <-> Linux BlockIO transport
6  * specific functions.
7  *
8  * (c) Copyright 2003-2013 Datera, Inc.
9  *
10  * Nicholas A. Bellinger <nab@kernel.org>
11  *
12  ******************************************************************************/
13
14 #include <linux/string.h>
15 #include <linux/parser.h>
16 #include <linux/timer.h>
17 #include <linux/fs.h>
18 #include <linux/blkdev.h>
19 #include <linux/slab.h>
20 #include <linux/spinlock.h>
21 #include <linux/bio.h>
22 #include <linux/genhd.h>
23 #include <linux/file.h>
24 #include <linux/module.h>
25 #include <scsi/scsi_proto.h>
26 #include <asm/unaligned.h>
27
28 #include <target/target_core_base.h>
29 #include <target/target_core_backend.h>
30
31 #include "target_core_iblock.h"
32
33 #define IBLOCK_MAX_BIO_PER_TASK  32     /* max # of bios to submit at a time */
34 #define IBLOCK_BIO_POOL_SIZE    128
35
36 static inline struct iblock_dev *IBLOCK_DEV(struct se_device *dev)
37 {
38         return container_of(dev, struct iblock_dev, dev);
39 }
40
41
42 static int iblock_attach_hba(struct se_hba *hba, u32 host_id)
43 {
44         pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
45                 " Generic Target Core Stack %s\n", hba->hba_id,
46                 IBLOCK_VERSION, TARGET_CORE_VERSION);
47         return 0;
48 }
49
50 static void iblock_detach_hba(struct se_hba *hba)
51 {
52 }
53
54 static struct se_device *iblock_alloc_device(struct se_hba *hba, const char *name)
55 {
56         struct iblock_dev *ib_dev = NULL;
57
58         ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL);
59         if (!ib_dev) {
60                 pr_err("Unable to allocate struct iblock_dev\n");
61                 return NULL;
62         }
63
64         pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name);
65
66         return &ib_dev->dev;
67 }
68
69 static int iblock_configure_device(struct se_device *dev)
70 {
71         struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
72         struct request_queue *q;
73         struct block_device *bd = NULL;
74         struct blk_integrity *bi;
75         fmode_t mode;
76         unsigned int max_write_zeroes_sectors;
77         int ret = -ENOMEM;
78
79         if (!(ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)) {
80                 pr_err("Missing udev_path= parameters for IBLOCK\n");
81                 return -EINVAL;
82         }
83
84         ret = bioset_init(&ib_dev->ibd_bio_set, IBLOCK_BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
85         if (ret) {
86                 pr_err("IBLOCK: Unable to create bioset\n");
87                 goto out;
88         }
89
90         pr_debug( "IBLOCK: Claiming struct block_device: %s\n",
91                         ib_dev->ibd_udev_path);
92
93         mode = FMODE_READ|FMODE_EXCL;
94         if (!ib_dev->ibd_readonly)
95                 mode |= FMODE_WRITE;
96         else
97                 dev->dev_flags |= DF_READ_ONLY;
98
99         bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev);
100         if (IS_ERR(bd)) {
101                 ret = PTR_ERR(bd);
102                 goto out_free_bioset;
103         }
104         ib_dev->ibd_bd = bd;
105
106         q = bdev_get_queue(bd);
107
108         dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd);
109         dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
110         dev->dev_attrib.hw_queue_depth = q->nr_requests;
111
112         if (target_configure_unmap_from_queue(&dev->dev_attrib, q))
113                 pr_debug("IBLOCK: BLOCK Discard support available,"
114                          " disabled by default\n");
115
116         /*
117          * Enable write same emulation for IBLOCK and use 0xFFFF as
118          * the smaller WRITE_SAME(10) only has a two-byte block count.
119          */
120         max_write_zeroes_sectors = bdev_write_zeroes_sectors(bd);
121         if (max_write_zeroes_sectors)
122                 dev->dev_attrib.max_write_same_len = max_write_zeroes_sectors;
123         else
124                 dev->dev_attrib.max_write_same_len = 0xFFFF;
125
126         if (blk_queue_nonrot(q))
127                 dev->dev_attrib.is_nonrot = 1;
128
129         bi = bdev_get_integrity(bd);
130         if (bi) {
131                 struct bio_set *bs = &ib_dev->ibd_bio_set;
132
133                 if (!strcmp(bi->profile->name, "T10-DIF-TYPE3-IP") ||
134                     !strcmp(bi->profile->name, "T10-DIF-TYPE1-IP")) {
135                         pr_err("IBLOCK export of blk_integrity: %s not"
136                                " supported\n", bi->profile->name);
137                         ret = -ENOSYS;
138                         goto out_blkdev_put;
139                 }
140
141                 if (!strcmp(bi->profile->name, "T10-DIF-TYPE3-CRC")) {
142                         dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE3_PROT;
143                 } else if (!strcmp(bi->profile->name, "T10-DIF-TYPE1-CRC")) {
144                         dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE1_PROT;
145                 }
146
147                 if (dev->dev_attrib.pi_prot_type) {
148                         if (bioset_integrity_create(bs, IBLOCK_BIO_POOL_SIZE) < 0) {
149                                 pr_err("Unable to allocate bioset for PI\n");
150                                 ret = -ENOMEM;
151                                 goto out_blkdev_put;
152                         }
153                         pr_debug("IBLOCK setup BIP bs->bio_integrity_pool: %p\n",
154                                  &bs->bio_integrity_pool);
155                 }
156                 dev->dev_attrib.hw_pi_prot_type = dev->dev_attrib.pi_prot_type;
157         }
158
159         return 0;
160
161 out_blkdev_put:
162         blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
163 out_free_bioset:
164         bioset_exit(&ib_dev->ibd_bio_set);
165 out:
166         return ret;
167 }
168
169 static void iblock_dev_call_rcu(struct rcu_head *p)
170 {
171         struct se_device *dev = container_of(p, struct se_device, rcu_head);
172         struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
173
174         kfree(ib_dev);
175 }
176
177 static void iblock_free_device(struct se_device *dev)
178 {
179         call_rcu(&dev->rcu_head, iblock_dev_call_rcu);
180 }
181
182 static void iblock_destroy_device(struct se_device *dev)
183 {
184         struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
185
186         if (ib_dev->ibd_bd != NULL)
187                 blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
188         bioset_exit(&ib_dev->ibd_bio_set);
189 }
190
191 static unsigned long long iblock_emulate_read_cap_with_block_size(
192         struct se_device *dev,
193         struct block_device *bd,
194         struct request_queue *q)
195 {
196         unsigned long long blocks_long = (div_u64(i_size_read(bd->bd_inode),
197                                         bdev_logical_block_size(bd)) - 1);
198         u32 block_size = bdev_logical_block_size(bd);
199
200         if (block_size == dev->dev_attrib.block_size)
201                 return blocks_long;
202
203         switch (block_size) {
204         case 4096:
205                 switch (dev->dev_attrib.block_size) {
206                 case 2048:
207                         blocks_long <<= 1;
208                         break;
209                 case 1024:
210                         blocks_long <<= 2;
211                         break;
212                 case 512:
213                         blocks_long <<= 3;
214                         break;
215                 default:
216                         break;
217                 }
218                 break;
219         case 2048:
220                 switch (dev->dev_attrib.block_size) {
221                 case 4096:
222                         blocks_long >>= 1;
223                         break;
224                 case 1024:
225                         blocks_long <<= 1;
226                         break;
227                 case 512:
228                         blocks_long <<= 2;
229                         break;
230                 default:
231                         break;
232                 }
233                 break;
234         case 1024:
235                 switch (dev->dev_attrib.block_size) {
236                 case 4096:
237                         blocks_long >>= 2;
238                         break;
239                 case 2048:
240                         blocks_long >>= 1;
241                         break;
242                 case 512:
243                         blocks_long <<= 1;
244                         break;
245                 default:
246                         break;
247                 }
248                 break;
249         case 512:
250                 switch (dev->dev_attrib.block_size) {
251                 case 4096:
252                         blocks_long >>= 3;
253                         break;
254                 case 2048:
255                         blocks_long >>= 2;
256                         break;
257                 case 1024:
258                         blocks_long >>= 1;
259                         break;
260                 default:
261                         break;
262                 }
263                 break;
264         default:
265                 break;
266         }
267
268         return blocks_long;
269 }
270
271 static void iblock_complete_cmd(struct se_cmd *cmd)
272 {
273         struct iblock_req *ibr = cmd->priv;
274         u8 status;
275
276         if (!refcount_dec_and_test(&ibr->pending))
277                 return;
278
279         if (atomic_read(&ibr->ib_bio_err_cnt))
280                 status = SAM_STAT_CHECK_CONDITION;
281         else
282                 status = SAM_STAT_GOOD;
283
284         target_complete_cmd(cmd, status);
285         kfree(ibr);
286 }
287
288 static void iblock_bio_done(struct bio *bio)
289 {
290         struct se_cmd *cmd = bio->bi_private;
291         struct iblock_req *ibr = cmd->priv;
292
293         if (bio->bi_status) {
294                 pr_err("bio error: %p,  err: %d\n", bio, bio->bi_status);
295                 /*
296                  * Bump the ib_bio_err_cnt and release bio.
297                  */
298                 atomic_inc(&ibr->ib_bio_err_cnt);
299                 smp_mb__after_atomic();
300         }
301
302         bio_put(bio);
303
304         iblock_complete_cmd(cmd);
305 }
306
307 static struct bio *
308 iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num, int op,
309                int op_flags)
310 {
311         struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
312         struct bio *bio;
313
314         /*
315          * Only allocate as many vector entries as the bio code allows us to,
316          * we'll loop later on until we have handled the whole request.
317          */
318         bio = bio_alloc_bioset(GFP_NOIO, bio_max_segs(sg_num),
319                                 &ib_dev->ibd_bio_set);
320         if (!bio) {
321                 pr_err("Unable to allocate memory for bio\n");
322                 return NULL;
323         }
324
325         bio_set_dev(bio, ib_dev->ibd_bd);
326         bio->bi_private = cmd;
327         bio->bi_end_io = &iblock_bio_done;
328         bio->bi_iter.bi_sector = lba;
329         bio_set_op_attrs(bio, op, op_flags);
330
331         return bio;
332 }
333
334 static void iblock_submit_bios(struct bio_list *list)
335 {
336         struct blk_plug plug;
337         struct bio *bio;
338
339         blk_start_plug(&plug);
340         while ((bio = bio_list_pop(list)))
341                 submit_bio(bio);
342         blk_finish_plug(&plug);
343 }
344
345 static void iblock_end_io_flush(struct bio *bio)
346 {
347         struct se_cmd *cmd = bio->bi_private;
348
349         if (bio->bi_status)
350                 pr_err("IBLOCK: cache flush failed: %d\n", bio->bi_status);
351
352         if (cmd) {
353                 if (bio->bi_status)
354                         target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
355                 else
356                         target_complete_cmd(cmd, SAM_STAT_GOOD);
357         }
358
359         bio_put(bio);
360 }
361
362 /*
363  * Implement SYCHRONIZE CACHE.  Note that we can't handle lba ranges and must
364  * always flush the whole cache.
365  */
366 static sense_reason_t
367 iblock_execute_sync_cache(struct se_cmd *cmd)
368 {
369         struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
370         int immed = (cmd->t_task_cdb[1] & 0x2);
371         struct bio *bio;
372
373         /*
374          * If the Immediate bit is set, queue up the GOOD response
375          * for this SYNCHRONIZE_CACHE op.
376          */
377         if (immed)
378                 target_complete_cmd(cmd, SAM_STAT_GOOD);
379
380         bio = bio_alloc(GFP_KERNEL, 0);
381         bio->bi_end_io = iblock_end_io_flush;
382         bio_set_dev(bio, ib_dev->ibd_bd);
383         bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
384         if (!immed)
385                 bio->bi_private = cmd;
386         submit_bio(bio);
387         return 0;
388 }
389
390 static sense_reason_t
391 iblock_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
392 {
393         struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
394         struct se_device *dev = cmd->se_dev;
395         int ret;
396
397         ret = blkdev_issue_discard(bdev,
398                                    target_to_linux_sector(dev, lba),
399                                    target_to_linux_sector(dev,  nolb),
400                                    GFP_KERNEL, 0);
401         if (ret < 0) {
402                 pr_err("blkdev_issue_discard() failed: %d\n", ret);
403                 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
404         }
405
406         return 0;
407 }
408
409 static sense_reason_t
410 iblock_execute_zero_out(struct block_device *bdev, struct se_cmd *cmd)
411 {
412         struct se_device *dev = cmd->se_dev;
413         struct scatterlist *sg = &cmd->t_data_sg[0];
414         unsigned char *buf, *not_zero;
415         int ret;
416
417         buf = kmap(sg_page(sg)) + sg->offset;
418         if (!buf)
419                 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
420         /*
421          * Fall back to block_execute_write_same() slow-path if
422          * incoming WRITE_SAME payload does not contain zeros.
423          */
424         not_zero = memchr_inv(buf, 0x00, cmd->data_length);
425         kunmap(sg_page(sg));
426
427         if (not_zero)
428                 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
429
430         ret = blkdev_issue_zeroout(bdev,
431                                 target_to_linux_sector(dev, cmd->t_task_lba),
432                                 target_to_linux_sector(dev,
433                                         sbc_get_write_same_sectors(cmd)),
434                                 GFP_KERNEL, BLKDEV_ZERO_NOUNMAP);
435         if (ret)
436                 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
437
438         target_complete_cmd(cmd, GOOD);
439         return 0;
440 }
441
442 static sense_reason_t
443 iblock_execute_write_same(struct se_cmd *cmd)
444 {
445         struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
446         struct iblock_req *ibr;
447         struct scatterlist *sg;
448         struct bio *bio;
449         struct bio_list list;
450         struct se_device *dev = cmd->se_dev;
451         sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba);
452         sector_t sectors = target_to_linux_sector(dev,
453                                         sbc_get_write_same_sectors(cmd));
454
455         if (cmd->prot_op) {
456                 pr_err("WRITE_SAME: Protection information with IBLOCK"
457                        " backends not supported\n");
458                 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
459         }
460         sg = &cmd->t_data_sg[0];
461
462         if (cmd->t_data_nents > 1 ||
463             sg->length != cmd->se_dev->dev_attrib.block_size) {
464                 pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u"
465                         " block_size: %u\n", cmd->t_data_nents, sg->length,
466                         cmd->se_dev->dev_attrib.block_size);
467                 return TCM_INVALID_CDB_FIELD;
468         }
469
470         if (bdev_write_zeroes_sectors(bdev)) {
471                 if (!iblock_execute_zero_out(bdev, cmd))
472                         return 0;
473         }
474
475         ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
476         if (!ibr)
477                 goto fail;
478         cmd->priv = ibr;
479
480         bio = iblock_get_bio(cmd, block_lba, 1, REQ_OP_WRITE, 0);
481         if (!bio)
482                 goto fail_free_ibr;
483
484         bio_list_init(&list);
485         bio_list_add(&list, bio);
486
487         refcount_set(&ibr->pending, 1);
488
489         while (sectors) {
490                 while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
491                                 != sg->length) {
492
493                         bio = iblock_get_bio(cmd, block_lba, 1, REQ_OP_WRITE,
494                                              0);
495                         if (!bio)
496                                 goto fail_put_bios;
497
498                         refcount_inc(&ibr->pending);
499                         bio_list_add(&list, bio);
500                 }
501
502                 /* Always in 512 byte units for Linux/Block */
503                 block_lba += sg->length >> SECTOR_SHIFT;
504                 sectors -= sg->length >> SECTOR_SHIFT;
505         }
506
507         iblock_submit_bios(&list);
508         return 0;
509
510 fail_put_bios:
511         while ((bio = bio_list_pop(&list)))
512                 bio_put(bio);
513 fail_free_ibr:
514         kfree(ibr);
515 fail:
516         return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
517 }
518
519 enum {
520         Opt_udev_path, Opt_readonly, Opt_force, Opt_err
521 };
522
523 static match_table_t tokens = {
524         {Opt_udev_path, "udev_path=%s"},
525         {Opt_readonly, "readonly=%d"},
526         {Opt_force, "force=%d"},
527         {Opt_err, NULL}
528 };
529
530 static ssize_t iblock_set_configfs_dev_params(struct se_device *dev,
531                 const char *page, ssize_t count)
532 {
533         struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
534         char *orig, *ptr, *arg_p, *opts;
535         substring_t args[MAX_OPT_ARGS];
536         int ret = 0, token;
537         unsigned long tmp_readonly;
538
539         opts = kstrdup(page, GFP_KERNEL);
540         if (!opts)
541                 return -ENOMEM;
542
543         orig = opts;
544
545         while ((ptr = strsep(&opts, ",\n")) != NULL) {
546                 if (!*ptr)
547                         continue;
548
549                 token = match_token(ptr, tokens, args);
550                 switch (token) {
551                 case Opt_udev_path:
552                         if (ib_dev->ibd_bd) {
553                                 pr_err("Unable to set udev_path= while"
554                                         " ib_dev->ibd_bd exists\n");
555                                 ret = -EEXIST;
556                                 goto out;
557                         }
558                         if (match_strlcpy(ib_dev->ibd_udev_path, &args[0],
559                                 SE_UDEV_PATH_LEN) == 0) {
560                                 ret = -EINVAL;
561                                 break;
562                         }
563                         pr_debug("IBLOCK: Referencing UDEV path: %s\n",
564                                         ib_dev->ibd_udev_path);
565                         ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH;
566                         break;
567                 case Opt_readonly:
568                         arg_p = match_strdup(&args[0]);
569                         if (!arg_p) {
570                                 ret = -ENOMEM;
571                                 break;
572                         }
573                         ret = kstrtoul(arg_p, 0, &tmp_readonly);
574                         kfree(arg_p);
575                         if (ret < 0) {
576                                 pr_err("kstrtoul() failed for"
577                                                 " readonly=\n");
578                                 goto out;
579                         }
580                         ib_dev->ibd_readonly = tmp_readonly;
581                         pr_debug("IBLOCK: readonly: %d\n", ib_dev->ibd_readonly);
582                         break;
583                 case Opt_force:
584                         break;
585                 default:
586                         break;
587                 }
588         }
589
590 out:
591         kfree(orig);
592         return (!ret) ? count : ret;
593 }
594
595 static ssize_t iblock_show_configfs_dev_params(struct se_device *dev, char *b)
596 {
597         struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
598         struct block_device *bd = ib_dev->ibd_bd;
599         char buf[BDEVNAME_SIZE];
600         ssize_t bl = 0;
601
602         if (bd)
603                 bl += sprintf(b + bl, "iBlock device: %s",
604                                 bdevname(bd, buf));
605         if (ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)
606                 bl += sprintf(b + bl, "  UDEV PATH: %s",
607                                 ib_dev->ibd_udev_path);
608         bl += sprintf(b + bl, "  readonly: %d\n", ib_dev->ibd_readonly);
609
610         bl += sprintf(b + bl, "        ");
611         if (bd) {
612                 bl += sprintf(b + bl, "Major: %d Minor: %d  %s\n",
613                         MAJOR(bd->bd_dev), MINOR(bd->bd_dev),
614                         "CLAIMED: IBLOCK");
615         } else {
616                 bl += sprintf(b + bl, "Major: 0 Minor: 0\n");
617         }
618
619         return bl;
620 }
621
622 static int
623 iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio,
624                  struct sg_mapping_iter *miter)
625 {
626         struct se_device *dev = cmd->se_dev;
627         struct blk_integrity *bi;
628         struct bio_integrity_payload *bip;
629         struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
630         int rc;
631         size_t resid, len;
632
633         bi = bdev_get_integrity(ib_dev->ibd_bd);
634         if (!bi) {
635                 pr_err("Unable to locate bio_integrity\n");
636                 return -ENODEV;
637         }
638
639         bip = bio_integrity_alloc(bio, GFP_NOIO, bio_max_segs(cmd->t_prot_nents));
640         if (IS_ERR(bip)) {
641                 pr_err("Unable to allocate bio_integrity_payload\n");
642                 return PTR_ERR(bip);
643         }
644
645         bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio));
646         /* virtual start sector must be in integrity interval units */
647         bip_set_seed(bip, bio->bi_iter.bi_sector >>
648                                   (bi->interval_exp - SECTOR_SHIFT));
649
650         pr_debug("IBLOCK BIP Size: %u Sector: %llu\n", bip->bip_iter.bi_size,
651                  (unsigned long long)bip->bip_iter.bi_sector);
652
653         resid = bip->bip_iter.bi_size;
654         while (resid > 0 && sg_miter_next(miter)) {
655
656                 len = min_t(size_t, miter->length, resid);
657                 rc = bio_integrity_add_page(bio, miter->page, len,
658                                             offset_in_page(miter->addr));
659                 if (rc != len) {
660                         pr_err("bio_integrity_add_page() failed; %d\n", rc);
661                         sg_miter_stop(miter);
662                         return -ENOMEM;
663                 }
664
665                 pr_debug("Added bio integrity page: %p length: %zu offset: %lu\n",
666                           miter->page, len, offset_in_page(miter->addr));
667
668                 resid -= len;
669                 if (len < miter->length)
670                         miter->consumed -= miter->length - len;
671         }
672         sg_miter_stop(miter);
673
674         return 0;
675 }
676
677 static sense_reason_t
678 iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
679                   enum dma_data_direction data_direction)
680 {
681         struct se_device *dev = cmd->se_dev;
682         sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba);
683         struct iblock_req *ibr;
684         struct bio *bio;
685         struct bio_list list;
686         struct scatterlist *sg;
687         u32 sg_num = sgl_nents;
688         unsigned bio_cnt;
689         int i, rc, op, op_flags = 0;
690         struct sg_mapping_iter prot_miter;
691
692         if (data_direction == DMA_TO_DEVICE) {
693                 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
694                 struct request_queue *q = bdev_get_queue(ib_dev->ibd_bd);
695                 /*
696                  * Force writethrough using REQ_FUA if a volatile write cache
697                  * is not enabled, or if initiator set the Force Unit Access bit.
698                  */
699                 op = REQ_OP_WRITE;
700                 if (test_bit(QUEUE_FLAG_FUA, &q->queue_flags)) {
701                         if (cmd->se_cmd_flags & SCF_FUA)
702                                 op_flags = REQ_FUA;
703                         else if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags))
704                                 op_flags = REQ_FUA;
705                 }
706         } else {
707                 op = REQ_OP_READ;
708         }
709
710         ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
711         if (!ibr)
712                 goto fail;
713         cmd->priv = ibr;
714
715         if (!sgl_nents) {
716                 refcount_set(&ibr->pending, 1);
717                 iblock_complete_cmd(cmd);
718                 return 0;
719         }
720
721         bio = iblock_get_bio(cmd, block_lba, sgl_nents, op, op_flags);
722         if (!bio)
723                 goto fail_free_ibr;
724
725         bio_list_init(&list);
726         bio_list_add(&list, bio);
727
728         refcount_set(&ibr->pending, 2);
729         bio_cnt = 1;
730
731         if (cmd->prot_type && dev->dev_attrib.pi_prot_type)
732                 sg_miter_start(&prot_miter, cmd->t_prot_sg, cmd->t_prot_nents,
733                                op == REQ_OP_READ ? SG_MITER_FROM_SG :
734                                                    SG_MITER_TO_SG);
735
736         for_each_sg(sgl, sg, sgl_nents, i) {
737                 /*
738                  * XXX: if the length the device accepts is shorter than the
739                  *      length of the S/G list entry this will cause and
740                  *      endless loop.  Better hope no driver uses huge pages.
741                  */
742                 while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
743                                 != sg->length) {
744                         if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
745                                 rc = iblock_alloc_bip(cmd, bio, &prot_miter);
746                                 if (rc)
747                                         goto fail_put_bios;
748                         }
749
750                         if (bio_cnt >= IBLOCK_MAX_BIO_PER_TASK) {
751                                 iblock_submit_bios(&list);
752                                 bio_cnt = 0;
753                         }
754
755                         bio = iblock_get_bio(cmd, block_lba, sg_num, op,
756                                              op_flags);
757                         if (!bio)
758                                 goto fail_put_bios;
759
760                         refcount_inc(&ibr->pending);
761                         bio_list_add(&list, bio);
762                         bio_cnt++;
763                 }
764
765                 /* Always in 512 byte units for Linux/Block */
766                 block_lba += sg->length >> SECTOR_SHIFT;
767                 sg_num--;
768         }
769
770         if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
771                 rc = iblock_alloc_bip(cmd, bio, &prot_miter);
772                 if (rc)
773                         goto fail_put_bios;
774         }
775
776         iblock_submit_bios(&list);
777         iblock_complete_cmd(cmd);
778         return 0;
779
780 fail_put_bios:
781         while ((bio = bio_list_pop(&list)))
782                 bio_put(bio);
783 fail_free_ibr:
784         kfree(ibr);
785 fail:
786         return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
787 }
788
789 static sector_t iblock_get_blocks(struct se_device *dev)
790 {
791         struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
792         struct block_device *bd = ib_dev->ibd_bd;
793         struct request_queue *q = bdev_get_queue(bd);
794
795         return iblock_emulate_read_cap_with_block_size(dev, bd, q);
796 }
797
798 static sector_t iblock_get_alignment_offset_lbas(struct se_device *dev)
799 {
800         struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
801         struct block_device *bd = ib_dev->ibd_bd;
802         int ret;
803
804         ret = bdev_alignment_offset(bd);
805         if (ret == -1)
806                 return 0;
807
808         /* convert offset-bytes to offset-lbas */
809         return ret / bdev_logical_block_size(bd);
810 }
811
812 static unsigned int iblock_get_lbppbe(struct se_device *dev)
813 {
814         struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
815         struct block_device *bd = ib_dev->ibd_bd;
816         int logs_per_phys = bdev_physical_block_size(bd) / bdev_logical_block_size(bd);
817
818         return ilog2(logs_per_phys);
819 }
820
821 static unsigned int iblock_get_io_min(struct se_device *dev)
822 {
823         struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
824         struct block_device *bd = ib_dev->ibd_bd;
825
826         return bdev_io_min(bd);
827 }
828
829 static unsigned int iblock_get_io_opt(struct se_device *dev)
830 {
831         struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
832         struct block_device *bd = ib_dev->ibd_bd;
833
834         return bdev_io_opt(bd);
835 }
836
837 static struct sbc_ops iblock_sbc_ops = {
838         .execute_rw             = iblock_execute_rw,
839         .execute_sync_cache     = iblock_execute_sync_cache,
840         .execute_write_same     = iblock_execute_write_same,
841         .execute_unmap          = iblock_execute_unmap,
842 };
843
844 static sense_reason_t
845 iblock_parse_cdb(struct se_cmd *cmd)
846 {
847         return sbc_parse_cdb(cmd, &iblock_sbc_ops);
848 }
849
850 static bool iblock_get_write_cache(struct se_device *dev)
851 {
852         struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
853         struct block_device *bd = ib_dev->ibd_bd;
854         struct request_queue *q = bdev_get_queue(bd);
855
856         return test_bit(QUEUE_FLAG_WC, &q->queue_flags);
857 }
858
859 static const struct target_backend_ops iblock_ops = {
860         .name                   = "iblock",
861         .inquiry_prod           = "IBLOCK",
862         .inquiry_rev            = IBLOCK_VERSION,
863         .owner                  = THIS_MODULE,
864         .attach_hba             = iblock_attach_hba,
865         .detach_hba             = iblock_detach_hba,
866         .alloc_device           = iblock_alloc_device,
867         .configure_device       = iblock_configure_device,
868         .destroy_device         = iblock_destroy_device,
869         .free_device            = iblock_free_device,
870         .parse_cdb              = iblock_parse_cdb,
871         .set_configfs_dev_params = iblock_set_configfs_dev_params,
872         .show_configfs_dev_params = iblock_show_configfs_dev_params,
873         .get_device_type        = sbc_get_device_type,
874         .get_blocks             = iblock_get_blocks,
875         .get_alignment_offset_lbas = iblock_get_alignment_offset_lbas,
876         .get_lbppbe             = iblock_get_lbppbe,
877         .get_io_min             = iblock_get_io_min,
878         .get_io_opt             = iblock_get_io_opt,
879         .get_write_cache        = iblock_get_write_cache,
880         .tb_dev_attrib_attrs    = sbc_attrib_attrs,
881 };
882
883 static int __init iblock_module_init(void)
884 {
885         return transport_backend_register(&iblock_ops);
886 }
887
888 static void __exit iblock_module_exit(void)
889 {
890         target_backend_unregister(&iblock_ops);
891 }
892
893 MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin");
894 MODULE_AUTHOR("nab@Linux-iSCSI.org");
895 MODULE_LICENSE("GPL");
896
897 module_init(iblock_module_init);
898 module_exit(iblock_module_exit);