GNU Linux-libre 5.19.9-gnu
[releases.git] / drivers / target / target_core_iblock.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*******************************************************************************
3  * Filename:  target_core_iblock.c
4  *
5  * This file contains the Storage Engine  <-> Linux BlockIO transport
6  * specific functions.
7  *
8  * (c) Copyright 2003-2013 Datera, Inc.
9  *
10  * Nicholas A. Bellinger <nab@kernel.org>
11  *
12  ******************************************************************************/
13
14 #include <linux/string.h>
15 #include <linux/parser.h>
16 #include <linux/timer.h>
17 #include <linux/fs.h>
18 #include <linux/blkdev.h>
19 #include <linux/blk-integrity.h>
20 #include <linux/slab.h>
21 #include <linux/spinlock.h>
22 #include <linux/bio.h>
23 #include <linux/file.h>
24 #include <linux/module.h>
25 #include <linux/scatterlist.h>
26 #include <scsi/scsi_proto.h>
27 #include <asm/unaligned.h>
28
29 #include <target/target_core_base.h>
30 #include <target/target_core_backend.h>
31
32 #include "target_core_iblock.h"
33
34 #define IBLOCK_MAX_BIO_PER_TASK  32     /* max # of bios to submit at a time */
35 #define IBLOCK_BIO_POOL_SIZE    128
36
37 static inline struct iblock_dev *IBLOCK_DEV(struct se_device *dev)
38 {
39         return container_of(dev, struct iblock_dev, dev);
40 }
41
42
43 static int iblock_attach_hba(struct se_hba *hba, u32 host_id)
44 {
45         pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
46                 " Generic Target Core Stack %s\n", hba->hba_id,
47                 IBLOCK_VERSION, TARGET_CORE_VERSION);
48         return 0;
49 }
50
51 static void iblock_detach_hba(struct se_hba *hba)
52 {
53 }
54
55 static struct se_device *iblock_alloc_device(struct se_hba *hba, const char *name)
56 {
57         struct iblock_dev *ib_dev = NULL;
58
59         ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL);
60         if (!ib_dev) {
61                 pr_err("Unable to allocate struct iblock_dev\n");
62                 return NULL;
63         }
64
65         ib_dev->ibd_plug = kcalloc(nr_cpu_ids, sizeof(*ib_dev->ibd_plug),
66                                    GFP_KERNEL);
67         if (!ib_dev->ibd_plug)
68                 goto free_dev;
69
70         pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name);
71
72         return &ib_dev->dev;
73
74 free_dev:
75         kfree(ib_dev);
76         return NULL;
77 }
78
79 static int iblock_configure_device(struct se_device *dev)
80 {
81         struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
82         struct request_queue *q;
83         struct block_device *bd = NULL;
84         struct blk_integrity *bi;
85         fmode_t mode;
86         unsigned int max_write_zeroes_sectors;
87         int ret;
88
89         if (!(ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)) {
90                 pr_err("Missing udev_path= parameters for IBLOCK\n");
91                 return -EINVAL;
92         }
93
94         ret = bioset_init(&ib_dev->ibd_bio_set, IBLOCK_BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
95         if (ret) {
96                 pr_err("IBLOCK: Unable to create bioset\n");
97                 goto out;
98         }
99
100         pr_debug( "IBLOCK: Claiming struct block_device: %s\n",
101                         ib_dev->ibd_udev_path);
102
103         mode = FMODE_READ|FMODE_EXCL;
104         if (!ib_dev->ibd_readonly)
105                 mode |= FMODE_WRITE;
106         else
107                 dev->dev_flags |= DF_READ_ONLY;
108
109         bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev);
110         if (IS_ERR(bd)) {
111                 ret = PTR_ERR(bd);
112                 goto out_free_bioset;
113         }
114         ib_dev->ibd_bd = bd;
115
116         q = bdev_get_queue(bd);
117
118         dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd);
119         dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
120         dev->dev_attrib.hw_queue_depth = q->nr_requests;
121
122         if (target_configure_unmap_from_queue(&dev->dev_attrib, bd))
123                 pr_debug("IBLOCK: BLOCK Discard support available,"
124                          " disabled by default\n");
125
126         /*
127          * Enable write same emulation for IBLOCK and use 0xFFFF as
128          * the smaller WRITE_SAME(10) only has a two-byte block count.
129          */
130         max_write_zeroes_sectors = bdev_write_zeroes_sectors(bd);
131         if (max_write_zeroes_sectors)
132                 dev->dev_attrib.max_write_same_len = max_write_zeroes_sectors;
133         else
134                 dev->dev_attrib.max_write_same_len = 0xFFFF;
135
136         if (bdev_nonrot(bd))
137                 dev->dev_attrib.is_nonrot = 1;
138
139         bi = bdev_get_integrity(bd);
140         if (bi) {
141                 struct bio_set *bs = &ib_dev->ibd_bio_set;
142
143                 if (!strcmp(bi->profile->name, "T10-DIF-TYPE3-IP") ||
144                     !strcmp(bi->profile->name, "T10-DIF-TYPE1-IP")) {
145                         pr_err("IBLOCK export of blk_integrity: %s not"
146                                " supported\n", bi->profile->name);
147                         ret = -ENOSYS;
148                         goto out_blkdev_put;
149                 }
150
151                 if (!strcmp(bi->profile->name, "T10-DIF-TYPE3-CRC")) {
152                         dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE3_PROT;
153                 } else if (!strcmp(bi->profile->name, "T10-DIF-TYPE1-CRC")) {
154                         dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE1_PROT;
155                 }
156
157                 if (dev->dev_attrib.pi_prot_type) {
158                         if (bioset_integrity_create(bs, IBLOCK_BIO_POOL_SIZE) < 0) {
159                                 pr_err("Unable to allocate bioset for PI\n");
160                                 ret = -ENOMEM;
161                                 goto out_blkdev_put;
162                         }
163                         pr_debug("IBLOCK setup BIP bs->bio_integrity_pool: %p\n",
164                                  &bs->bio_integrity_pool);
165                 }
166                 dev->dev_attrib.hw_pi_prot_type = dev->dev_attrib.pi_prot_type;
167         }
168
169         return 0;
170
171 out_blkdev_put:
172         blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
173 out_free_bioset:
174         bioset_exit(&ib_dev->ibd_bio_set);
175 out:
176         return ret;
177 }
178
179 static void iblock_dev_call_rcu(struct rcu_head *p)
180 {
181         struct se_device *dev = container_of(p, struct se_device, rcu_head);
182         struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
183
184         kfree(ib_dev->ibd_plug);
185         kfree(ib_dev);
186 }
187
188 static void iblock_free_device(struct se_device *dev)
189 {
190         call_rcu(&dev->rcu_head, iblock_dev_call_rcu);
191 }
192
193 static void iblock_destroy_device(struct se_device *dev)
194 {
195         struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
196
197         if (ib_dev->ibd_bd != NULL)
198                 blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
199         bioset_exit(&ib_dev->ibd_bio_set);
200 }
201
202 static struct se_dev_plug *iblock_plug_device(struct se_device *se_dev)
203 {
204         struct iblock_dev *ib_dev = IBLOCK_DEV(se_dev);
205         struct iblock_dev_plug *ib_dev_plug;
206
207         /*
208          * Each se_device has a per cpu work this can be run from. We
209          * shouldn't have multiple threads on the same cpu calling this
210          * at the same time.
211          */
212         ib_dev_plug = &ib_dev->ibd_plug[raw_smp_processor_id()];
213         if (test_and_set_bit(IBD_PLUGF_PLUGGED, &ib_dev_plug->flags))
214                 return NULL;
215
216         blk_start_plug(&ib_dev_plug->blk_plug);
217         return &ib_dev_plug->se_plug;
218 }
219
220 static void iblock_unplug_device(struct se_dev_plug *se_plug)
221 {
222         struct iblock_dev_plug *ib_dev_plug = container_of(se_plug,
223                                         struct iblock_dev_plug, se_plug);
224
225         blk_finish_plug(&ib_dev_plug->blk_plug);
226         clear_bit(IBD_PLUGF_PLUGGED, &ib_dev_plug->flags);
227 }
228
229 static unsigned long long iblock_emulate_read_cap_with_block_size(
230         struct se_device *dev,
231         struct block_device *bd,
232         struct request_queue *q)
233 {
234         u32 block_size = bdev_logical_block_size(bd);
235         unsigned long long blocks_long =
236                 div_u64(bdev_nr_bytes(bd), block_size) - 1;
237
238         if (block_size == dev->dev_attrib.block_size)
239                 return blocks_long;
240
241         switch (block_size) {
242         case 4096:
243                 switch (dev->dev_attrib.block_size) {
244                 case 2048:
245                         blocks_long <<= 1;
246                         break;
247                 case 1024:
248                         blocks_long <<= 2;
249                         break;
250                 case 512:
251                         blocks_long <<= 3;
252                         break;
253                 default:
254                         break;
255                 }
256                 break;
257         case 2048:
258                 switch (dev->dev_attrib.block_size) {
259                 case 4096:
260                         blocks_long >>= 1;
261                         break;
262                 case 1024:
263                         blocks_long <<= 1;
264                         break;
265                 case 512:
266                         blocks_long <<= 2;
267                         break;
268                 default:
269                         break;
270                 }
271                 break;
272         case 1024:
273                 switch (dev->dev_attrib.block_size) {
274                 case 4096:
275                         blocks_long >>= 2;
276                         break;
277                 case 2048:
278                         blocks_long >>= 1;
279                         break;
280                 case 512:
281                         blocks_long <<= 1;
282                         break;
283                 default:
284                         break;
285                 }
286                 break;
287         case 512:
288                 switch (dev->dev_attrib.block_size) {
289                 case 4096:
290                         blocks_long >>= 3;
291                         break;
292                 case 2048:
293                         blocks_long >>= 2;
294                         break;
295                 case 1024:
296                         blocks_long >>= 1;
297                         break;
298                 default:
299                         break;
300                 }
301                 break;
302         default:
303                 break;
304         }
305
306         return blocks_long;
307 }
308
309 static void iblock_complete_cmd(struct se_cmd *cmd)
310 {
311         struct iblock_req *ibr = cmd->priv;
312         u8 status;
313
314         if (!refcount_dec_and_test(&ibr->pending))
315                 return;
316
317         if (atomic_read(&ibr->ib_bio_err_cnt))
318                 status = SAM_STAT_CHECK_CONDITION;
319         else
320                 status = SAM_STAT_GOOD;
321
322         target_complete_cmd(cmd, status);
323         kfree(ibr);
324 }
325
326 static void iblock_bio_done(struct bio *bio)
327 {
328         struct se_cmd *cmd = bio->bi_private;
329         struct iblock_req *ibr = cmd->priv;
330
331         if (bio->bi_status) {
332                 pr_err("bio error: %p,  err: %d\n", bio, bio->bi_status);
333                 /*
334                  * Bump the ib_bio_err_cnt and release bio.
335                  */
336                 atomic_inc(&ibr->ib_bio_err_cnt);
337                 smp_mb__after_atomic();
338         }
339
340         bio_put(bio);
341
342         iblock_complete_cmd(cmd);
343 }
344
345 static struct bio *iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num,
346                                   unsigned int opf)
347 {
348         struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
349         struct bio *bio;
350
351         /*
352          * Only allocate as many vector entries as the bio code allows us to,
353          * we'll loop later on until we have handled the whole request.
354          */
355         bio = bio_alloc_bioset(ib_dev->ibd_bd, bio_max_segs(sg_num), opf,
356                                GFP_NOIO, &ib_dev->ibd_bio_set);
357         if (!bio) {
358                 pr_err("Unable to allocate memory for bio\n");
359                 return NULL;
360         }
361
362         bio->bi_private = cmd;
363         bio->bi_end_io = &iblock_bio_done;
364         bio->bi_iter.bi_sector = lba;
365
366         return bio;
367 }
368
369 static void iblock_submit_bios(struct bio_list *list)
370 {
371         struct blk_plug plug;
372         struct bio *bio;
373         /*
374          * The block layer handles nested plugs, so just plug/unplug to handle
375          * fabric drivers that didn't support batching and multi bio cmds.
376          */
377         blk_start_plug(&plug);
378         while ((bio = bio_list_pop(list)))
379                 submit_bio(bio);
380         blk_finish_plug(&plug);
381 }
382
383 static void iblock_end_io_flush(struct bio *bio)
384 {
385         struct se_cmd *cmd = bio->bi_private;
386
387         if (bio->bi_status)
388                 pr_err("IBLOCK: cache flush failed: %d\n", bio->bi_status);
389
390         if (cmd) {
391                 if (bio->bi_status)
392                         target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
393                 else
394                         target_complete_cmd(cmd, SAM_STAT_GOOD);
395         }
396
397         bio_put(bio);
398 }
399
400 /*
401  * Implement SYCHRONIZE CACHE.  Note that we can't handle lba ranges and must
402  * always flush the whole cache.
403  */
404 static sense_reason_t
405 iblock_execute_sync_cache(struct se_cmd *cmd)
406 {
407         struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
408         int immed = (cmd->t_task_cdb[1] & 0x2);
409         struct bio *bio;
410
411         /*
412          * If the Immediate bit is set, queue up the GOOD response
413          * for this SYNCHRONIZE_CACHE op.
414          */
415         if (immed)
416                 target_complete_cmd(cmd, SAM_STAT_GOOD);
417
418         bio = bio_alloc(ib_dev->ibd_bd, 0, REQ_OP_WRITE | REQ_PREFLUSH,
419                         GFP_KERNEL);
420         bio->bi_end_io = iblock_end_io_flush;
421         if (!immed)
422                 bio->bi_private = cmd;
423         submit_bio(bio);
424         return 0;
425 }
426
427 static sense_reason_t
428 iblock_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
429 {
430         struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
431         struct se_device *dev = cmd->se_dev;
432         int ret;
433
434         ret = blkdev_issue_discard(bdev,
435                                    target_to_linux_sector(dev, lba),
436                                    target_to_linux_sector(dev,  nolb),
437                                    GFP_KERNEL);
438         if (ret < 0) {
439                 pr_err("blkdev_issue_discard() failed: %d\n", ret);
440                 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
441         }
442
443         return 0;
444 }
445
446 static sense_reason_t
447 iblock_execute_zero_out(struct block_device *bdev, struct se_cmd *cmd)
448 {
449         struct se_device *dev = cmd->se_dev;
450         struct scatterlist *sg = &cmd->t_data_sg[0];
451         unsigned char *buf, *not_zero;
452         int ret;
453
454         buf = kmap(sg_page(sg)) + sg->offset;
455         if (!buf)
456                 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
457         /*
458          * Fall back to block_execute_write_same() slow-path if
459          * incoming WRITE_SAME payload does not contain zeros.
460          */
461         not_zero = memchr_inv(buf, 0x00, cmd->data_length);
462         kunmap(sg_page(sg));
463
464         if (not_zero)
465                 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
466
467         ret = blkdev_issue_zeroout(bdev,
468                                 target_to_linux_sector(dev, cmd->t_task_lba),
469                                 target_to_linux_sector(dev,
470                                         sbc_get_write_same_sectors(cmd)),
471                                 GFP_KERNEL, BLKDEV_ZERO_NOUNMAP);
472         if (ret)
473                 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
474
475         target_complete_cmd(cmd, SAM_STAT_GOOD);
476         return 0;
477 }
478
479 static sense_reason_t
480 iblock_execute_write_same(struct se_cmd *cmd)
481 {
482         struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
483         struct iblock_req *ibr;
484         struct scatterlist *sg;
485         struct bio *bio;
486         struct bio_list list;
487         struct se_device *dev = cmd->se_dev;
488         sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba);
489         sector_t sectors = target_to_linux_sector(dev,
490                                         sbc_get_write_same_sectors(cmd));
491
492         if (cmd->prot_op) {
493                 pr_err("WRITE_SAME: Protection information with IBLOCK"
494                        " backends not supported\n");
495                 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
496         }
497
498         if (!cmd->t_data_nents)
499                 return TCM_INVALID_CDB_FIELD;
500
501         sg = &cmd->t_data_sg[0];
502
503         if (cmd->t_data_nents > 1 ||
504             sg->length != cmd->se_dev->dev_attrib.block_size) {
505                 pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u"
506                         " block_size: %u\n", cmd->t_data_nents, sg->length,
507                         cmd->se_dev->dev_attrib.block_size);
508                 return TCM_INVALID_CDB_FIELD;
509         }
510
511         if (bdev_write_zeroes_sectors(bdev)) {
512                 if (!iblock_execute_zero_out(bdev, cmd))
513                         return 0;
514         }
515
516         ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
517         if (!ibr)
518                 goto fail;
519         cmd->priv = ibr;
520
521         bio = iblock_get_bio(cmd, block_lba, 1, REQ_OP_WRITE);
522         if (!bio)
523                 goto fail_free_ibr;
524
525         bio_list_init(&list);
526         bio_list_add(&list, bio);
527
528         refcount_set(&ibr->pending, 1);
529
530         while (sectors) {
531                 while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
532                                 != sg->length) {
533
534                         bio = iblock_get_bio(cmd, block_lba, 1, REQ_OP_WRITE);
535                         if (!bio)
536                                 goto fail_put_bios;
537
538                         refcount_inc(&ibr->pending);
539                         bio_list_add(&list, bio);
540                 }
541
542                 /* Always in 512 byte units for Linux/Block */
543                 block_lba += sg->length >> SECTOR_SHIFT;
544                 sectors -= sg->length >> SECTOR_SHIFT;
545         }
546
547         iblock_submit_bios(&list);
548         return 0;
549
550 fail_put_bios:
551         while ((bio = bio_list_pop(&list)))
552                 bio_put(bio);
553 fail_free_ibr:
554         kfree(ibr);
555 fail:
556         return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
557 }
558
559 enum {
560         Opt_udev_path, Opt_readonly, Opt_force, Opt_err
561 };
562
563 static match_table_t tokens = {
564         {Opt_udev_path, "udev_path=%s"},
565         {Opt_readonly, "readonly=%d"},
566         {Opt_force, "force=%d"},
567         {Opt_err, NULL}
568 };
569
570 static ssize_t iblock_set_configfs_dev_params(struct se_device *dev,
571                 const char *page, ssize_t count)
572 {
573         struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
574         char *orig, *ptr, *arg_p, *opts;
575         substring_t args[MAX_OPT_ARGS];
576         int ret = 0, token;
577         unsigned long tmp_readonly;
578
579         opts = kstrdup(page, GFP_KERNEL);
580         if (!opts)
581                 return -ENOMEM;
582
583         orig = opts;
584
585         while ((ptr = strsep(&opts, ",\n")) != NULL) {
586                 if (!*ptr)
587                         continue;
588
589                 token = match_token(ptr, tokens, args);
590                 switch (token) {
591                 case Opt_udev_path:
592                         if (ib_dev->ibd_bd) {
593                                 pr_err("Unable to set udev_path= while"
594                                         " ib_dev->ibd_bd exists\n");
595                                 ret = -EEXIST;
596                                 goto out;
597                         }
598                         if (match_strlcpy(ib_dev->ibd_udev_path, &args[0],
599                                 SE_UDEV_PATH_LEN) == 0) {
600                                 ret = -EINVAL;
601                                 break;
602                         }
603                         pr_debug("IBLOCK: Referencing UDEV path: %s\n",
604                                         ib_dev->ibd_udev_path);
605                         ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH;
606                         break;
607                 case Opt_readonly:
608                         arg_p = match_strdup(&args[0]);
609                         if (!arg_p) {
610                                 ret = -ENOMEM;
611                                 break;
612                         }
613                         ret = kstrtoul(arg_p, 0, &tmp_readonly);
614                         kfree(arg_p);
615                         if (ret < 0) {
616                                 pr_err("kstrtoul() failed for"
617                                                 " readonly=\n");
618                                 goto out;
619                         }
620                         ib_dev->ibd_readonly = tmp_readonly;
621                         pr_debug("IBLOCK: readonly: %d\n", ib_dev->ibd_readonly);
622                         break;
623                 case Opt_force:
624                         break;
625                 default:
626                         break;
627                 }
628         }
629
630 out:
631         kfree(orig);
632         return (!ret) ? count : ret;
633 }
634
635 static ssize_t iblock_show_configfs_dev_params(struct se_device *dev, char *b)
636 {
637         struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
638         struct block_device *bd = ib_dev->ibd_bd;
639         ssize_t bl = 0;
640
641         if (bd)
642                 bl += sprintf(b + bl, "iBlock device: %pg", bd);
643         if (ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)
644                 bl += sprintf(b + bl, "  UDEV PATH: %s",
645                                 ib_dev->ibd_udev_path);
646         bl += sprintf(b + bl, "  readonly: %d\n", ib_dev->ibd_readonly);
647
648         bl += sprintf(b + bl, "        ");
649         if (bd) {
650                 bl += sprintf(b + bl, "Major: %d Minor: %d  %s\n",
651                         MAJOR(bd->bd_dev), MINOR(bd->bd_dev),
652                         "CLAIMED: IBLOCK");
653         } else {
654                 bl += sprintf(b + bl, "Major: 0 Minor: 0\n");
655         }
656
657         return bl;
658 }
659
660 static int
661 iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio,
662                  struct sg_mapping_iter *miter)
663 {
664         struct se_device *dev = cmd->se_dev;
665         struct blk_integrity *bi;
666         struct bio_integrity_payload *bip;
667         struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
668         int rc;
669         size_t resid, len;
670
671         bi = bdev_get_integrity(ib_dev->ibd_bd);
672         if (!bi) {
673                 pr_err("Unable to locate bio_integrity\n");
674                 return -ENODEV;
675         }
676
677         bip = bio_integrity_alloc(bio, GFP_NOIO, bio_max_segs(cmd->t_prot_nents));
678         if (IS_ERR(bip)) {
679                 pr_err("Unable to allocate bio_integrity_payload\n");
680                 return PTR_ERR(bip);
681         }
682
683         bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio));
684         /* virtual start sector must be in integrity interval units */
685         bip_set_seed(bip, bio->bi_iter.bi_sector >>
686                                   (bi->interval_exp - SECTOR_SHIFT));
687
688         pr_debug("IBLOCK BIP Size: %u Sector: %llu\n", bip->bip_iter.bi_size,
689                  (unsigned long long)bip->bip_iter.bi_sector);
690
691         resid = bip->bip_iter.bi_size;
692         while (resid > 0 && sg_miter_next(miter)) {
693
694                 len = min_t(size_t, miter->length, resid);
695                 rc = bio_integrity_add_page(bio, miter->page, len,
696                                             offset_in_page(miter->addr));
697                 if (rc != len) {
698                         pr_err("bio_integrity_add_page() failed; %d\n", rc);
699                         sg_miter_stop(miter);
700                         return -ENOMEM;
701                 }
702
703                 pr_debug("Added bio integrity page: %p length: %zu offset: %lu\n",
704                           miter->page, len, offset_in_page(miter->addr));
705
706                 resid -= len;
707                 if (len < miter->length)
708                         miter->consumed -= miter->length - len;
709         }
710         sg_miter_stop(miter);
711
712         return 0;
713 }
714
715 static sense_reason_t
716 iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
717                   enum dma_data_direction data_direction)
718 {
719         struct se_device *dev = cmd->se_dev;
720         sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba);
721         struct iblock_req *ibr;
722         struct bio *bio;
723         struct bio_list list;
724         struct scatterlist *sg;
725         u32 sg_num = sgl_nents;
726         unsigned int opf;
727         unsigned bio_cnt;
728         int i, rc;
729         struct sg_mapping_iter prot_miter;
730         unsigned int miter_dir;
731
732         if (data_direction == DMA_TO_DEVICE) {
733                 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
734                 /*
735                  * Force writethrough using REQ_FUA if a volatile write cache
736                  * is not enabled, or if initiator set the Force Unit Access bit.
737                  */
738                 opf = REQ_OP_WRITE;
739                 miter_dir = SG_MITER_TO_SG;
740                 if (bdev_fua(ib_dev->ibd_bd)) {
741                         if (cmd->se_cmd_flags & SCF_FUA)
742                                 opf |= REQ_FUA;
743                         else if (!bdev_write_cache(ib_dev->ibd_bd))
744                                 opf |= REQ_FUA;
745                 }
746         } else {
747                 opf = REQ_OP_READ;
748                 miter_dir = SG_MITER_FROM_SG;
749         }
750
751         ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
752         if (!ibr)
753                 goto fail;
754         cmd->priv = ibr;
755
756         if (!sgl_nents) {
757                 refcount_set(&ibr->pending, 1);
758                 iblock_complete_cmd(cmd);
759                 return 0;
760         }
761
762         bio = iblock_get_bio(cmd, block_lba, sgl_nents, opf);
763         if (!bio)
764                 goto fail_free_ibr;
765
766         bio_list_init(&list);
767         bio_list_add(&list, bio);
768
769         refcount_set(&ibr->pending, 2);
770         bio_cnt = 1;
771
772         if (cmd->prot_type && dev->dev_attrib.pi_prot_type)
773                 sg_miter_start(&prot_miter, cmd->t_prot_sg, cmd->t_prot_nents,
774                                miter_dir);
775
776         for_each_sg(sgl, sg, sgl_nents, i) {
777                 /*
778                  * XXX: if the length the device accepts is shorter than the
779                  *      length of the S/G list entry this will cause and
780                  *      endless loop.  Better hope no driver uses huge pages.
781                  */
782                 while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
783                                 != sg->length) {
784                         if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
785                                 rc = iblock_alloc_bip(cmd, bio, &prot_miter);
786                                 if (rc)
787                                         goto fail_put_bios;
788                         }
789
790                         if (bio_cnt >= IBLOCK_MAX_BIO_PER_TASK) {
791                                 iblock_submit_bios(&list);
792                                 bio_cnt = 0;
793                         }
794
795                         bio = iblock_get_bio(cmd, block_lba, sg_num, opf);
796                         if (!bio)
797                                 goto fail_put_bios;
798
799                         refcount_inc(&ibr->pending);
800                         bio_list_add(&list, bio);
801                         bio_cnt++;
802                 }
803
804                 /* Always in 512 byte units for Linux/Block */
805                 block_lba += sg->length >> SECTOR_SHIFT;
806                 sg_num--;
807         }
808
809         if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
810                 rc = iblock_alloc_bip(cmd, bio, &prot_miter);
811                 if (rc)
812                         goto fail_put_bios;
813         }
814
815         iblock_submit_bios(&list);
816         iblock_complete_cmd(cmd);
817         return 0;
818
819 fail_put_bios:
820         while ((bio = bio_list_pop(&list)))
821                 bio_put(bio);
822 fail_free_ibr:
823         kfree(ibr);
824 fail:
825         return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
826 }
827
828 static sector_t iblock_get_blocks(struct se_device *dev)
829 {
830         struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
831         struct block_device *bd = ib_dev->ibd_bd;
832         struct request_queue *q = bdev_get_queue(bd);
833
834         return iblock_emulate_read_cap_with_block_size(dev, bd, q);
835 }
836
837 static sector_t iblock_get_alignment_offset_lbas(struct se_device *dev)
838 {
839         struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
840         struct block_device *bd = ib_dev->ibd_bd;
841         int ret;
842
843         ret = bdev_alignment_offset(bd);
844         if (ret == -1)
845                 return 0;
846
847         /* convert offset-bytes to offset-lbas */
848         return ret / bdev_logical_block_size(bd);
849 }
850
851 static unsigned int iblock_get_lbppbe(struct se_device *dev)
852 {
853         struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
854         struct block_device *bd = ib_dev->ibd_bd;
855         unsigned int logs_per_phys =
856                 bdev_physical_block_size(bd) / bdev_logical_block_size(bd);
857
858         return ilog2(logs_per_phys);
859 }
860
861 static unsigned int iblock_get_io_min(struct se_device *dev)
862 {
863         struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
864         struct block_device *bd = ib_dev->ibd_bd;
865
866         return bdev_io_min(bd);
867 }
868
869 static unsigned int iblock_get_io_opt(struct se_device *dev)
870 {
871         struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
872         struct block_device *bd = ib_dev->ibd_bd;
873
874         return bdev_io_opt(bd);
875 }
876
877 static struct sbc_ops iblock_sbc_ops = {
878         .execute_rw             = iblock_execute_rw,
879         .execute_sync_cache     = iblock_execute_sync_cache,
880         .execute_write_same     = iblock_execute_write_same,
881         .execute_unmap          = iblock_execute_unmap,
882 };
883
884 static sense_reason_t
885 iblock_parse_cdb(struct se_cmd *cmd)
886 {
887         return sbc_parse_cdb(cmd, &iblock_sbc_ops);
888 }
889
890 static bool iblock_get_write_cache(struct se_device *dev)
891 {
892         return bdev_write_cache(IBLOCK_DEV(dev)->ibd_bd);
893 }
894
895 static const struct target_backend_ops iblock_ops = {
896         .name                   = "iblock",
897         .inquiry_prod           = "IBLOCK",
898         .inquiry_rev            = IBLOCK_VERSION,
899         .owner                  = THIS_MODULE,
900         .attach_hba             = iblock_attach_hba,
901         .detach_hba             = iblock_detach_hba,
902         .alloc_device           = iblock_alloc_device,
903         .configure_device       = iblock_configure_device,
904         .destroy_device         = iblock_destroy_device,
905         .free_device            = iblock_free_device,
906         .plug_device            = iblock_plug_device,
907         .unplug_device          = iblock_unplug_device,
908         .parse_cdb              = iblock_parse_cdb,
909         .set_configfs_dev_params = iblock_set_configfs_dev_params,
910         .show_configfs_dev_params = iblock_show_configfs_dev_params,
911         .get_device_type        = sbc_get_device_type,
912         .get_blocks             = iblock_get_blocks,
913         .get_alignment_offset_lbas = iblock_get_alignment_offset_lbas,
914         .get_lbppbe             = iblock_get_lbppbe,
915         .get_io_min             = iblock_get_io_min,
916         .get_io_opt             = iblock_get_io_opt,
917         .get_write_cache        = iblock_get_write_cache,
918         .tb_dev_attrib_attrs    = sbc_attrib_attrs,
919 };
920
921 static int __init iblock_module_init(void)
922 {
923         return transport_backend_register(&iblock_ops);
924 }
925
926 static void __exit iblock_module_exit(void)
927 {
928         target_backend_unregister(&iblock_ops);
929 }
930
931 MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin");
932 MODULE_AUTHOR("nab@Linux-iSCSI.org");
933 MODULE_LICENSE("GPL");
934
935 module_init(iblock_module_init);
936 module_exit(iblock_module_exit);