Merge tag 'amd-drm-fixes-5.19-2022-07-06' of https://gitlab.freedesktop.org/agd5f...
[linux-2.6-microblaze.git] / drivers / target / target_core_iblock.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*******************************************************************************
3  * Filename:  target_core_iblock.c
4  *
5  * This file contains the Storage Engine  <-> Linux BlockIO transport
6  * specific functions.
7  *
8  * (c) Copyright 2003-2013 Datera, Inc.
9  *
10  * Nicholas A. Bellinger <nab@kernel.org>
11  *
12  ******************************************************************************/
13
14 #include <linux/string.h>
15 #include <linux/parser.h>
16 #include <linux/timer.h>
17 #include <linux/fs.h>
18 #include <linux/blkdev.h>
19 #include <linux/blk-integrity.h>
20 #include <linux/slab.h>
21 #include <linux/spinlock.h>
22 #include <linux/bio.h>
23 #include <linux/file.h>
24 #include <linux/module.h>
25 #include <linux/scatterlist.h>
26 #include <scsi/scsi_proto.h>
27 #include <asm/unaligned.h>
28
29 #include <target/target_core_base.h>
30 #include <target/target_core_backend.h>
31
32 #include "target_core_iblock.h"
33
34 #define IBLOCK_MAX_BIO_PER_TASK  32     /* max # of bios to submit at a time */
35 #define IBLOCK_BIO_POOL_SIZE    128
36
37 static inline struct iblock_dev *IBLOCK_DEV(struct se_device *dev)
38 {
39         return container_of(dev, struct iblock_dev, dev);
40 }
41
42
43 static int iblock_attach_hba(struct se_hba *hba, u32 host_id)
44 {
45         pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
46                 " Generic Target Core Stack %s\n", hba->hba_id,
47                 IBLOCK_VERSION, TARGET_CORE_VERSION);
48         return 0;
49 }
50
51 static void iblock_detach_hba(struct se_hba *hba)
52 {
53 }
54
55 static struct se_device *iblock_alloc_device(struct se_hba *hba, const char *name)
56 {
57         struct iblock_dev *ib_dev = NULL;
58
59         ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL);
60         if (!ib_dev) {
61                 pr_err("Unable to allocate struct iblock_dev\n");
62                 return NULL;
63         }
64
65         ib_dev->ibd_plug = kcalloc(nr_cpu_ids, sizeof(*ib_dev->ibd_plug),
66                                    GFP_KERNEL);
67         if (!ib_dev->ibd_plug)
68                 goto free_dev;
69
70         pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name);
71
72         return &ib_dev->dev;
73
74 free_dev:
75         kfree(ib_dev);
76         return NULL;
77 }
78
79 static int iblock_configure_device(struct se_device *dev)
80 {
81         struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
82         struct request_queue *q;
83         struct block_device *bd = NULL;
84         struct blk_integrity *bi;
85         fmode_t mode;
86         unsigned int max_write_zeroes_sectors;
87         int ret;
88
89         if (!(ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)) {
90                 pr_err("Missing udev_path= parameters for IBLOCK\n");
91                 return -EINVAL;
92         }
93
94         ret = bioset_init(&ib_dev->ibd_bio_set, IBLOCK_BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
95         if (ret) {
96                 pr_err("IBLOCK: Unable to create bioset\n");
97                 goto out;
98         }
99
100         pr_debug( "IBLOCK: Claiming struct block_device: %s\n",
101                         ib_dev->ibd_udev_path);
102
103         mode = FMODE_READ|FMODE_EXCL;
104         if (!ib_dev->ibd_readonly)
105                 mode |= FMODE_WRITE;
106         else
107                 dev->dev_flags |= DF_READ_ONLY;
108
109         bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev);
110         if (IS_ERR(bd)) {
111                 ret = PTR_ERR(bd);
112                 goto out_free_bioset;
113         }
114         ib_dev->ibd_bd = bd;
115
116         q = bdev_get_queue(bd);
117
118         dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd);
119         dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
120         dev->dev_attrib.hw_queue_depth = q->nr_requests;
121
122         if (target_configure_unmap_from_queue(&dev->dev_attrib, bd))
123                 pr_debug("IBLOCK: BLOCK Discard support available,"
124                          " disabled by default\n");
125
126         /*
127          * Enable write same emulation for IBLOCK and use 0xFFFF as
128          * the smaller WRITE_SAME(10) only has a two-byte block count.
129          */
130         max_write_zeroes_sectors = bdev_write_zeroes_sectors(bd);
131         if (max_write_zeroes_sectors)
132                 dev->dev_attrib.max_write_same_len = max_write_zeroes_sectors;
133         else
134                 dev->dev_attrib.max_write_same_len = 0xFFFF;
135
136         if (bdev_nonrot(bd))
137                 dev->dev_attrib.is_nonrot = 1;
138
139         bi = bdev_get_integrity(bd);
140         if (bi) {
141                 struct bio_set *bs = &ib_dev->ibd_bio_set;
142
143                 if (!strcmp(bi->profile->name, "T10-DIF-TYPE3-IP") ||
144                     !strcmp(bi->profile->name, "T10-DIF-TYPE1-IP")) {
145                         pr_err("IBLOCK export of blk_integrity: %s not"
146                                " supported\n", bi->profile->name);
147                         ret = -ENOSYS;
148                         goto out_blkdev_put;
149                 }
150
151                 if (!strcmp(bi->profile->name, "T10-DIF-TYPE3-CRC")) {
152                         dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE3_PROT;
153                 } else if (!strcmp(bi->profile->name, "T10-DIF-TYPE1-CRC")) {
154                         dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE1_PROT;
155                 }
156
157                 if (dev->dev_attrib.pi_prot_type) {
158                         if (bioset_integrity_create(bs, IBLOCK_BIO_POOL_SIZE) < 0) {
159                                 pr_err("Unable to allocate bioset for PI\n");
160                                 ret = -ENOMEM;
161                                 goto out_blkdev_put;
162                         }
163                         pr_debug("IBLOCK setup BIP bs->bio_integrity_pool: %p\n",
164                                  &bs->bio_integrity_pool);
165                 }
166                 dev->dev_attrib.hw_pi_prot_type = dev->dev_attrib.pi_prot_type;
167         }
168
169         return 0;
170
171 out_blkdev_put:
172         blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
173 out_free_bioset:
174         bioset_exit(&ib_dev->ibd_bio_set);
175 out:
176         return ret;
177 }
178
179 static void iblock_dev_call_rcu(struct rcu_head *p)
180 {
181         struct se_device *dev = container_of(p, struct se_device, rcu_head);
182         struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
183
184         kfree(ib_dev->ibd_plug);
185         kfree(ib_dev);
186 }
187
188 static void iblock_free_device(struct se_device *dev)
189 {
190         call_rcu(&dev->rcu_head, iblock_dev_call_rcu);
191 }
192
193 static void iblock_destroy_device(struct se_device *dev)
194 {
195         struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
196
197         if (ib_dev->ibd_bd != NULL)
198                 blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
199         bioset_exit(&ib_dev->ibd_bio_set);
200 }
201
202 static struct se_dev_plug *iblock_plug_device(struct se_device *se_dev)
203 {
204         struct iblock_dev *ib_dev = IBLOCK_DEV(se_dev);
205         struct iblock_dev_plug *ib_dev_plug;
206
207         /*
208          * Each se_device has a per cpu work this can be run from. We
209          * shouldn't have multiple threads on the same cpu calling this
210          * at the same time.
211          */
212         ib_dev_plug = &ib_dev->ibd_plug[raw_smp_processor_id()];
213         if (test_and_set_bit(IBD_PLUGF_PLUGGED, &ib_dev_plug->flags))
214                 return NULL;
215
216         blk_start_plug(&ib_dev_plug->blk_plug);
217         return &ib_dev_plug->se_plug;
218 }
219
220 static void iblock_unplug_device(struct se_dev_plug *se_plug)
221 {
222         struct iblock_dev_plug *ib_dev_plug = container_of(se_plug,
223                                         struct iblock_dev_plug, se_plug);
224
225         blk_finish_plug(&ib_dev_plug->blk_plug);
226         clear_bit(IBD_PLUGF_PLUGGED, &ib_dev_plug->flags);
227 }
228
229 static unsigned long long iblock_emulate_read_cap_with_block_size(
230         struct se_device *dev,
231         struct block_device *bd,
232         struct request_queue *q)
233 {
234         u32 block_size = bdev_logical_block_size(bd);
235         unsigned long long blocks_long =
236                 div_u64(bdev_nr_bytes(bd), block_size) - 1;
237
238         if (block_size == dev->dev_attrib.block_size)
239                 return blocks_long;
240
241         switch (block_size) {
242         case 4096:
243                 switch (dev->dev_attrib.block_size) {
244                 case 2048:
245                         blocks_long <<= 1;
246                         break;
247                 case 1024:
248                         blocks_long <<= 2;
249                         break;
250                 case 512:
251                         blocks_long <<= 3;
252                         break;
253                 default:
254                         break;
255                 }
256                 break;
257         case 2048:
258                 switch (dev->dev_attrib.block_size) {
259                 case 4096:
260                         blocks_long >>= 1;
261                         break;
262                 case 1024:
263                         blocks_long <<= 1;
264                         break;
265                 case 512:
266                         blocks_long <<= 2;
267                         break;
268                 default:
269                         break;
270                 }
271                 break;
272         case 1024:
273                 switch (dev->dev_attrib.block_size) {
274                 case 4096:
275                         blocks_long >>= 2;
276                         break;
277                 case 2048:
278                         blocks_long >>= 1;
279                         break;
280                 case 512:
281                         blocks_long <<= 1;
282                         break;
283                 default:
284                         break;
285                 }
286                 break;
287         case 512:
288                 switch (dev->dev_attrib.block_size) {
289                 case 4096:
290                         blocks_long >>= 3;
291                         break;
292                 case 2048:
293                         blocks_long >>= 2;
294                         break;
295                 case 1024:
296                         blocks_long >>= 1;
297                         break;
298                 default:
299                         break;
300                 }
301                 break;
302         default:
303                 break;
304         }
305
306         return blocks_long;
307 }
308
309 static void iblock_complete_cmd(struct se_cmd *cmd)
310 {
311         struct iblock_req *ibr = cmd->priv;
312         u8 status;
313
314         if (!refcount_dec_and_test(&ibr->pending))
315                 return;
316
317         if (atomic_read(&ibr->ib_bio_err_cnt))
318                 status = SAM_STAT_CHECK_CONDITION;
319         else
320                 status = SAM_STAT_GOOD;
321
322         target_complete_cmd(cmd, status);
323         kfree(ibr);
324 }
325
326 static void iblock_bio_done(struct bio *bio)
327 {
328         struct se_cmd *cmd = bio->bi_private;
329         struct iblock_req *ibr = cmd->priv;
330
331         if (bio->bi_status) {
332                 pr_err("bio error: %p,  err: %d\n", bio, bio->bi_status);
333                 /*
334                  * Bump the ib_bio_err_cnt and release bio.
335                  */
336                 atomic_inc(&ibr->ib_bio_err_cnt);
337                 smp_mb__after_atomic();
338         }
339
340         bio_put(bio);
341
342         iblock_complete_cmd(cmd);
343 }
344
345 static struct bio *iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num,
346                                   unsigned int opf)
347 {
348         struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
349         struct bio *bio;
350
351         /*
352          * Only allocate as many vector entries as the bio code allows us to,
353          * we'll loop later on until we have handled the whole request.
354          */
355         bio = bio_alloc_bioset(ib_dev->ibd_bd, bio_max_segs(sg_num), opf,
356                                GFP_NOIO, &ib_dev->ibd_bio_set);
357         if (!bio) {
358                 pr_err("Unable to allocate memory for bio\n");
359                 return NULL;
360         }
361
362         bio->bi_private = cmd;
363         bio->bi_end_io = &iblock_bio_done;
364         bio->bi_iter.bi_sector = lba;
365
366         return bio;
367 }
368
369 static void iblock_submit_bios(struct bio_list *list)
370 {
371         struct blk_plug plug;
372         struct bio *bio;
373         /*
374          * The block layer handles nested plugs, so just plug/unplug to handle
375          * fabric drivers that didn't support batching and multi bio cmds.
376          */
377         blk_start_plug(&plug);
378         while ((bio = bio_list_pop(list)))
379                 submit_bio(bio);
380         blk_finish_plug(&plug);
381 }
382
383 static void iblock_end_io_flush(struct bio *bio)
384 {
385         struct se_cmd *cmd = bio->bi_private;
386
387         if (bio->bi_status)
388                 pr_err("IBLOCK: cache flush failed: %d\n", bio->bi_status);
389
390         if (cmd) {
391                 if (bio->bi_status)
392                         target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
393                 else
394                         target_complete_cmd(cmd, SAM_STAT_GOOD);
395         }
396
397         bio_put(bio);
398 }
399
400 /*
401  * Implement SYCHRONIZE CACHE.  Note that we can't handle lba ranges and must
402  * always flush the whole cache.
403  */
404 static sense_reason_t
405 iblock_execute_sync_cache(struct se_cmd *cmd)
406 {
407         struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
408         int immed = (cmd->t_task_cdb[1] & 0x2);
409         struct bio *bio;
410
411         /*
412          * If the Immediate bit is set, queue up the GOOD response
413          * for this SYNCHRONIZE_CACHE op.
414          */
415         if (immed)
416                 target_complete_cmd(cmd, SAM_STAT_GOOD);
417
418         bio = bio_alloc(ib_dev->ibd_bd, 0, REQ_OP_WRITE | REQ_PREFLUSH,
419                         GFP_KERNEL);
420         bio->bi_end_io = iblock_end_io_flush;
421         if (!immed)
422                 bio->bi_private = cmd;
423         submit_bio(bio);
424         return 0;
425 }
426
427 static sense_reason_t
428 iblock_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
429 {
430         struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
431         struct se_device *dev = cmd->se_dev;
432         int ret;
433
434         ret = blkdev_issue_discard(bdev,
435                                    target_to_linux_sector(dev, lba),
436                                    target_to_linux_sector(dev,  nolb),
437                                    GFP_KERNEL);
438         if (ret < 0) {
439                 pr_err("blkdev_issue_discard() failed: %d\n", ret);
440                 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
441         }
442
443         return 0;
444 }
445
446 static sense_reason_t
447 iblock_execute_zero_out(struct block_device *bdev, struct se_cmd *cmd)
448 {
449         struct se_device *dev = cmd->se_dev;
450         struct scatterlist *sg = &cmd->t_data_sg[0];
451         unsigned char *buf, *not_zero;
452         int ret;
453
454         buf = kmap(sg_page(sg)) + sg->offset;
455         if (!buf)
456                 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
457         /*
458          * Fall back to block_execute_write_same() slow-path if
459          * incoming WRITE_SAME payload does not contain zeros.
460          */
461         not_zero = memchr_inv(buf, 0x00, cmd->data_length);
462         kunmap(sg_page(sg));
463
464         if (not_zero)
465                 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
466
467         ret = blkdev_issue_zeroout(bdev,
468                                 target_to_linux_sector(dev, cmd->t_task_lba),
469                                 target_to_linux_sector(dev,
470                                         sbc_get_write_same_sectors(cmd)),
471                                 GFP_KERNEL, BLKDEV_ZERO_NOUNMAP);
472         if (ret)
473                 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
474
475         target_complete_cmd(cmd, SAM_STAT_GOOD);
476         return 0;
477 }
478
479 static sense_reason_t
480 iblock_execute_write_same(struct se_cmd *cmd)
481 {
482         struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
483         struct iblock_req *ibr;
484         struct scatterlist *sg;
485         struct bio *bio;
486         struct bio_list list;
487         struct se_device *dev = cmd->se_dev;
488         sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba);
489         sector_t sectors = target_to_linux_sector(dev,
490                                         sbc_get_write_same_sectors(cmd));
491
492         if (cmd->prot_op) {
493                 pr_err("WRITE_SAME: Protection information with IBLOCK"
494                        " backends not supported\n");
495                 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
496         }
497         sg = &cmd->t_data_sg[0];
498
499         if (cmd->t_data_nents > 1 ||
500             sg->length != cmd->se_dev->dev_attrib.block_size) {
501                 pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u"
502                         " block_size: %u\n", cmd->t_data_nents, sg->length,
503                         cmd->se_dev->dev_attrib.block_size);
504                 return TCM_INVALID_CDB_FIELD;
505         }
506
507         if (bdev_write_zeroes_sectors(bdev)) {
508                 if (!iblock_execute_zero_out(bdev, cmd))
509                         return 0;
510         }
511
512         ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
513         if (!ibr)
514                 goto fail;
515         cmd->priv = ibr;
516
517         bio = iblock_get_bio(cmd, block_lba, 1, REQ_OP_WRITE);
518         if (!bio)
519                 goto fail_free_ibr;
520
521         bio_list_init(&list);
522         bio_list_add(&list, bio);
523
524         refcount_set(&ibr->pending, 1);
525
526         while (sectors) {
527                 while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
528                                 != sg->length) {
529
530                         bio = iblock_get_bio(cmd, block_lba, 1, REQ_OP_WRITE);
531                         if (!bio)
532                                 goto fail_put_bios;
533
534                         refcount_inc(&ibr->pending);
535                         bio_list_add(&list, bio);
536                 }
537
538                 /* Always in 512 byte units for Linux/Block */
539                 block_lba += sg->length >> SECTOR_SHIFT;
540                 sectors -= sg->length >> SECTOR_SHIFT;
541         }
542
543         iblock_submit_bios(&list);
544         return 0;
545
546 fail_put_bios:
547         while ((bio = bio_list_pop(&list)))
548                 bio_put(bio);
549 fail_free_ibr:
550         kfree(ibr);
551 fail:
552         return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
553 }
554
555 enum {
556         Opt_udev_path, Opt_readonly, Opt_force, Opt_err
557 };
558
559 static match_table_t tokens = {
560         {Opt_udev_path, "udev_path=%s"},
561         {Opt_readonly, "readonly=%d"},
562         {Opt_force, "force=%d"},
563         {Opt_err, NULL}
564 };
565
566 static ssize_t iblock_set_configfs_dev_params(struct se_device *dev,
567                 const char *page, ssize_t count)
568 {
569         struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
570         char *orig, *ptr, *arg_p, *opts;
571         substring_t args[MAX_OPT_ARGS];
572         int ret = 0, token;
573         unsigned long tmp_readonly;
574
575         opts = kstrdup(page, GFP_KERNEL);
576         if (!opts)
577                 return -ENOMEM;
578
579         orig = opts;
580
581         while ((ptr = strsep(&opts, ",\n")) != NULL) {
582                 if (!*ptr)
583                         continue;
584
585                 token = match_token(ptr, tokens, args);
586                 switch (token) {
587                 case Opt_udev_path:
588                         if (ib_dev->ibd_bd) {
589                                 pr_err("Unable to set udev_path= while"
590                                         " ib_dev->ibd_bd exists\n");
591                                 ret = -EEXIST;
592                                 goto out;
593                         }
594                         if (match_strlcpy(ib_dev->ibd_udev_path, &args[0],
595                                 SE_UDEV_PATH_LEN) == 0) {
596                                 ret = -EINVAL;
597                                 break;
598                         }
599                         pr_debug("IBLOCK: Referencing UDEV path: %s\n",
600                                         ib_dev->ibd_udev_path);
601                         ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH;
602                         break;
603                 case Opt_readonly:
604                         arg_p = match_strdup(&args[0]);
605                         if (!arg_p) {
606                                 ret = -ENOMEM;
607                                 break;
608                         }
609                         ret = kstrtoul(arg_p, 0, &tmp_readonly);
610                         kfree(arg_p);
611                         if (ret < 0) {
612                                 pr_err("kstrtoul() failed for"
613                                                 " readonly=\n");
614                                 goto out;
615                         }
616                         ib_dev->ibd_readonly = tmp_readonly;
617                         pr_debug("IBLOCK: readonly: %d\n", ib_dev->ibd_readonly);
618                         break;
619                 case Opt_force:
620                         break;
621                 default:
622                         break;
623                 }
624         }
625
626 out:
627         kfree(orig);
628         return (!ret) ? count : ret;
629 }
630
631 static ssize_t iblock_show_configfs_dev_params(struct se_device *dev, char *b)
632 {
633         struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
634         struct block_device *bd = ib_dev->ibd_bd;
635         ssize_t bl = 0;
636
637         if (bd)
638                 bl += sprintf(b + bl, "iBlock device: %pg", bd);
639         if (ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)
640                 bl += sprintf(b + bl, "  UDEV PATH: %s",
641                                 ib_dev->ibd_udev_path);
642         bl += sprintf(b + bl, "  readonly: %d\n", ib_dev->ibd_readonly);
643
644         bl += sprintf(b + bl, "        ");
645         if (bd) {
646                 bl += sprintf(b + bl, "Major: %d Minor: %d  %s\n",
647                         MAJOR(bd->bd_dev), MINOR(bd->bd_dev),
648                         "CLAIMED: IBLOCK");
649         } else {
650                 bl += sprintf(b + bl, "Major: 0 Minor: 0\n");
651         }
652
653         return bl;
654 }
655
656 static int
657 iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio,
658                  struct sg_mapping_iter *miter)
659 {
660         struct se_device *dev = cmd->se_dev;
661         struct blk_integrity *bi;
662         struct bio_integrity_payload *bip;
663         struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
664         int rc;
665         size_t resid, len;
666
667         bi = bdev_get_integrity(ib_dev->ibd_bd);
668         if (!bi) {
669                 pr_err("Unable to locate bio_integrity\n");
670                 return -ENODEV;
671         }
672
673         bip = bio_integrity_alloc(bio, GFP_NOIO, bio_max_segs(cmd->t_prot_nents));
674         if (IS_ERR(bip)) {
675                 pr_err("Unable to allocate bio_integrity_payload\n");
676                 return PTR_ERR(bip);
677         }
678
679         bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio));
680         /* virtual start sector must be in integrity interval units */
681         bip_set_seed(bip, bio->bi_iter.bi_sector >>
682                                   (bi->interval_exp - SECTOR_SHIFT));
683
684         pr_debug("IBLOCK BIP Size: %u Sector: %llu\n", bip->bip_iter.bi_size,
685                  (unsigned long long)bip->bip_iter.bi_sector);
686
687         resid = bip->bip_iter.bi_size;
688         while (resid > 0 && sg_miter_next(miter)) {
689
690                 len = min_t(size_t, miter->length, resid);
691                 rc = bio_integrity_add_page(bio, miter->page, len,
692                                             offset_in_page(miter->addr));
693                 if (rc != len) {
694                         pr_err("bio_integrity_add_page() failed; %d\n", rc);
695                         sg_miter_stop(miter);
696                         return -ENOMEM;
697                 }
698
699                 pr_debug("Added bio integrity page: %p length: %zu offset: %lu\n",
700                           miter->page, len, offset_in_page(miter->addr));
701
702                 resid -= len;
703                 if (len < miter->length)
704                         miter->consumed -= miter->length - len;
705         }
706         sg_miter_stop(miter);
707
708         return 0;
709 }
710
711 static sense_reason_t
712 iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
713                   enum dma_data_direction data_direction)
714 {
715         struct se_device *dev = cmd->se_dev;
716         sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba);
717         struct iblock_req *ibr;
718         struct bio *bio;
719         struct bio_list list;
720         struct scatterlist *sg;
721         u32 sg_num = sgl_nents;
722         unsigned int opf;
723         unsigned bio_cnt;
724         int i, rc;
725         struct sg_mapping_iter prot_miter;
726         unsigned int miter_dir;
727
728         if (data_direction == DMA_TO_DEVICE) {
729                 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
730                 /*
731                  * Force writethrough using REQ_FUA if a volatile write cache
732                  * is not enabled, or if initiator set the Force Unit Access bit.
733                  */
734                 opf = REQ_OP_WRITE;
735                 miter_dir = SG_MITER_TO_SG;
736                 if (bdev_fua(ib_dev->ibd_bd)) {
737                         if (cmd->se_cmd_flags & SCF_FUA)
738                                 opf |= REQ_FUA;
739                         else if (!bdev_write_cache(ib_dev->ibd_bd))
740                                 opf |= REQ_FUA;
741                 }
742         } else {
743                 opf = REQ_OP_READ;
744                 miter_dir = SG_MITER_FROM_SG;
745         }
746
747         ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
748         if (!ibr)
749                 goto fail;
750         cmd->priv = ibr;
751
752         if (!sgl_nents) {
753                 refcount_set(&ibr->pending, 1);
754                 iblock_complete_cmd(cmd);
755                 return 0;
756         }
757
758         bio = iblock_get_bio(cmd, block_lba, sgl_nents, opf);
759         if (!bio)
760                 goto fail_free_ibr;
761
762         bio_list_init(&list);
763         bio_list_add(&list, bio);
764
765         refcount_set(&ibr->pending, 2);
766         bio_cnt = 1;
767
768         if (cmd->prot_type && dev->dev_attrib.pi_prot_type)
769                 sg_miter_start(&prot_miter, cmd->t_prot_sg, cmd->t_prot_nents,
770                                miter_dir);
771
772         for_each_sg(sgl, sg, sgl_nents, i) {
773                 /*
774                  * XXX: if the length the device accepts is shorter than the
775                  *      length of the S/G list entry this will cause and
776                  *      endless loop.  Better hope no driver uses huge pages.
777                  */
778                 while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
779                                 != sg->length) {
780                         if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
781                                 rc = iblock_alloc_bip(cmd, bio, &prot_miter);
782                                 if (rc)
783                                         goto fail_put_bios;
784                         }
785
786                         if (bio_cnt >= IBLOCK_MAX_BIO_PER_TASK) {
787                                 iblock_submit_bios(&list);
788                                 bio_cnt = 0;
789                         }
790
791                         bio = iblock_get_bio(cmd, block_lba, sg_num, opf);
792                         if (!bio)
793                                 goto fail_put_bios;
794
795                         refcount_inc(&ibr->pending);
796                         bio_list_add(&list, bio);
797                         bio_cnt++;
798                 }
799
800                 /* Always in 512 byte units for Linux/Block */
801                 block_lba += sg->length >> SECTOR_SHIFT;
802                 sg_num--;
803         }
804
805         if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
806                 rc = iblock_alloc_bip(cmd, bio, &prot_miter);
807                 if (rc)
808                         goto fail_put_bios;
809         }
810
811         iblock_submit_bios(&list);
812         iblock_complete_cmd(cmd);
813         return 0;
814
815 fail_put_bios:
816         while ((bio = bio_list_pop(&list)))
817                 bio_put(bio);
818 fail_free_ibr:
819         kfree(ibr);
820 fail:
821         return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
822 }
823
824 static sector_t iblock_get_blocks(struct se_device *dev)
825 {
826         struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
827         struct block_device *bd = ib_dev->ibd_bd;
828         struct request_queue *q = bdev_get_queue(bd);
829
830         return iblock_emulate_read_cap_with_block_size(dev, bd, q);
831 }
832
833 static sector_t iblock_get_alignment_offset_lbas(struct se_device *dev)
834 {
835         struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
836         struct block_device *bd = ib_dev->ibd_bd;
837         int ret;
838
839         ret = bdev_alignment_offset(bd);
840         if (ret == -1)
841                 return 0;
842
843         /* convert offset-bytes to offset-lbas */
844         return ret / bdev_logical_block_size(bd);
845 }
846
847 static unsigned int iblock_get_lbppbe(struct se_device *dev)
848 {
849         struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
850         struct block_device *bd = ib_dev->ibd_bd;
851         unsigned int logs_per_phys =
852                 bdev_physical_block_size(bd) / bdev_logical_block_size(bd);
853
854         return ilog2(logs_per_phys);
855 }
856
857 static unsigned int iblock_get_io_min(struct se_device *dev)
858 {
859         struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
860         struct block_device *bd = ib_dev->ibd_bd;
861
862         return bdev_io_min(bd);
863 }
864
865 static unsigned int iblock_get_io_opt(struct se_device *dev)
866 {
867         struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
868         struct block_device *bd = ib_dev->ibd_bd;
869
870         return bdev_io_opt(bd);
871 }
872
873 static struct sbc_ops iblock_sbc_ops = {
874         .execute_rw             = iblock_execute_rw,
875         .execute_sync_cache     = iblock_execute_sync_cache,
876         .execute_write_same     = iblock_execute_write_same,
877         .execute_unmap          = iblock_execute_unmap,
878 };
879
880 static sense_reason_t
881 iblock_parse_cdb(struct se_cmd *cmd)
882 {
883         return sbc_parse_cdb(cmd, &iblock_sbc_ops);
884 }
885
886 static bool iblock_get_write_cache(struct se_device *dev)
887 {
888         return bdev_write_cache(IBLOCK_DEV(dev)->ibd_bd);
889 }
890
891 static const struct target_backend_ops iblock_ops = {
892         .name                   = "iblock",
893         .inquiry_prod           = "IBLOCK",
894         .inquiry_rev            = IBLOCK_VERSION,
895         .owner                  = THIS_MODULE,
896         .attach_hba             = iblock_attach_hba,
897         .detach_hba             = iblock_detach_hba,
898         .alloc_device           = iblock_alloc_device,
899         .configure_device       = iblock_configure_device,
900         .destroy_device         = iblock_destroy_device,
901         .free_device            = iblock_free_device,
902         .plug_device            = iblock_plug_device,
903         .unplug_device          = iblock_unplug_device,
904         .parse_cdb              = iblock_parse_cdb,
905         .set_configfs_dev_params = iblock_set_configfs_dev_params,
906         .show_configfs_dev_params = iblock_show_configfs_dev_params,
907         .get_device_type        = sbc_get_device_type,
908         .get_blocks             = iblock_get_blocks,
909         .get_alignment_offset_lbas = iblock_get_alignment_offset_lbas,
910         .get_lbppbe             = iblock_get_lbppbe,
911         .get_io_min             = iblock_get_io_min,
912         .get_io_opt             = iblock_get_io_opt,
913         .get_write_cache        = iblock_get_write_cache,
914         .tb_dev_attrib_attrs    = sbc_attrib_attrs,
915 };
916
917 static int __init iblock_module_init(void)
918 {
919         return transport_backend_register(&iblock_ops);
920 }
921
922 static void __exit iblock_module_exit(void)
923 {
924         target_backend_unregister(&iblock_ops);
925 }
926
927 MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin");
928 MODULE_AUTHOR("nab@Linux-iSCSI.org");
929 MODULE_LICENSE("GPL");
930
931 module_init(iblock_module_init);
932 module_exit(iblock_module_exit);