target_core_iblock.c 21.2 KB
Newer Older
1 2 3 4 5 6
/*******************************************************************************
 * Filename:  target_core_iblock.c
 *
 * This file contains the Storage Engine  <-> Linux BlockIO transport
 * specific functions.
 *
7
 * (c) Copyright 2003-2013 Datera, Inc.
8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36
 *
 * Nicholas A. Bellinger <nab@kernel.org>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 *
 ******************************************************************************/

#include <linux/string.h>
#include <linux/parser.h>
#include <linux/timer.h>
#include <linux/fs.h>
#include <linux/blkdev.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/bio.h>
#include <linux/genhd.h>
#include <linux/file.h>
37
#include <linux/module.h>
38 39
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
40
#include <asm/unaligned.h>
41 42

#include <target/target_core_base.h>
43
#include <target/target_core_backend.h>
44 45 46

#include "target_core_iblock.h"

47 48 49
#define IBLOCK_MAX_BIO_PER_TASK	 32	/* max # of bios to submit at a time */
#define IBLOCK_BIO_POOL_SIZE	128

50 51 52 53 54 55
static inline struct iblock_dev *IBLOCK_DEV(struct se_device *dev)
{
	return container_of(dev, struct iblock_dev, dev);
}


56 57
static int iblock_attach_hba(struct se_hba *hba, u32 host_id)
{
58
	pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
59 60 61 62 63 64 65 66 67
		" Generic Target Core Stack %s\n", hba->hba_id,
		IBLOCK_VERSION, TARGET_CORE_MOD_VERSION);
	return 0;
}

static void iblock_detach_hba(struct se_hba *hba)
{
}

68
static struct se_device *iblock_alloc_device(struct se_hba *hba, const char *name)
69 70 71 72
{
	struct iblock_dev *ib_dev = NULL;

	ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL);
73 74
	if (!ib_dev) {
		pr_err("Unable to allocate struct iblock_dev\n");
75 76 77
		return NULL;
	}

78
	pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name);
79

80
	return &ib_dev->dev;
81 82
}

83
static int iblock_configure_device(struct se_device *dev)
84
{
85
	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
86
	struct request_queue *q;
87
	struct block_device *bd = NULL;
88
	struct blk_integrity *bi;
89
	fmode_t mode;
90
	int ret = -ENOMEM;
91

92 93 94
	if (!(ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)) {
		pr_err("Missing udev_path= parameters for IBLOCK\n");
		return -EINVAL;
95
	}
96 97

	ib_dev->ibd_bio_set = bioset_create(IBLOCK_BIO_POOL_SIZE, 0);
98
	if (!ib_dev->ibd_bio_set) {
99 100
		pr_err("IBLOCK: Unable to create bioset\n");
		goto out;
101
	}
102

103
	pr_debug( "IBLOCK: Claiming struct block_device: %s\n",
104 105
			ib_dev->ibd_udev_path);

106 107 108 109 110
	mode = FMODE_READ|FMODE_EXCL;
	if (!ib_dev->ibd_readonly)
		mode |= FMODE_WRITE;

	bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev);
111 112
	if (IS_ERR(bd)) {
		ret = PTR_ERR(bd);
113
		goto out_free_bioset;
114
	}
115 116
	ib_dev->ibd_bd = bd;

117 118 119
	q = bdev_get_queue(bd);

	dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd);
120
	dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
121
	dev->dev_attrib.hw_queue_depth = q->nr_requests;
122 123 124 125 126 127

	/*
	 * Check if the underlying struct block_device request_queue supports
	 * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
	 * in ATA and we need to set TPE=1
	 */
128
	if (blk_queue_discard(q)) {
129
		dev->dev_attrib.max_unmap_lba_count =
130
				q->limits.max_discard_sectors;
131

132 133 134
		/*
		 * Currently hardcoded to 1 in Linux/SCSI code..
		 */
135 136
		dev->dev_attrib.max_unmap_block_desc_count = 1;
		dev->dev_attrib.unmap_granularity =
137
				q->limits.discard_granularity >> 9;
138
		dev->dev_attrib.unmap_granularity_alignment =
139 140
				q->limits.discard_alignment;

141
		pr_debug("IBLOCK: BLOCK Discard support available,"
142 143
				" disabled by default\n");
	}
144 145 146 147 148
	/*
	 * Enable write same emulation for IBLOCK and use 0xFFFF as
	 * the smaller WRITE_SAME(10) only has a two-byte block count.
	 */
	dev->dev_attrib.max_write_same_len = 0xFFFF;
149

150
	if (blk_queue_nonrot(q))
151
		dev->dev_attrib.is_nonrot = 1;
152

153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182
	bi = bdev_get_integrity(bd);
	if (bi) {
		struct bio_set *bs = ib_dev->ibd_bio_set;

		if (!strcmp(bi->name, "T10-DIF-TYPE3-IP") ||
		    !strcmp(bi->name, "T10-DIF-TYPE1-IP")) {
			pr_err("IBLOCK export of blk_integrity: %s not"
			       " supported\n", bi->name);
			ret = -ENOSYS;
			goto out_blkdev_put;
		}

		if (!strcmp(bi->name, "T10-DIF-TYPE3-CRC")) {
			dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE3_PROT;
		} else if (!strcmp(bi->name, "T10-DIF-TYPE1-CRC")) {
			dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE1_PROT;
		}

		if (dev->dev_attrib.pi_prot_type) {
			if (bioset_integrity_create(bs, IBLOCK_BIO_POOL_SIZE) < 0) {
				pr_err("Unable to allocate bioset for PI\n");
				ret = -ENOMEM;
				goto out_blkdev_put;
			}
			pr_debug("IBLOCK setup BIP bs->bio_integrity_pool: %p\n",
				 bs->bio_integrity_pool);
		}
		dev->dev_attrib.hw_pi_prot_type = dev->dev_attrib.pi_prot_type;
	}

183
	return 0;
184

185 186
out_blkdev_put:
	blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
187 188 189 190 191
out_free_bioset:
	bioset_free(ib_dev->ibd_bio_set);
	ib_dev->ibd_bio_set = NULL;
out:
	return ret;
192 193
}

194 195 196 197 198 199 200 201
static void iblock_dev_call_rcu(struct rcu_head *p)
{
	struct se_device *dev = container_of(p, struct se_device, rcu_head);
	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);

	kfree(ib_dev);
}

202
static void iblock_free_device(struct se_device *dev)
203
{
204
	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
205

206 207
	if (ib_dev->ibd_bd != NULL)
		blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
208
	if (ib_dev->ibd_bio_set != NULL)
209
		bioset_free(ib_dev->ibd_bio_set);
210

211
	call_rcu(&dev->rcu_head, iblock_dev_call_rcu);
212 213 214 215 216 217 218 219 220 221 222
}

static unsigned long long iblock_emulate_read_cap_with_block_size(
	struct se_device *dev,
	struct block_device *bd,
	struct request_queue *q)
{
	unsigned long long blocks_long = (div_u64(i_size_read(bd->bd_inode),
					bdev_logical_block_size(bd)) - 1);
	u32 block_size = bdev_logical_block_size(bd);

223
	if (block_size == dev->dev_attrib.block_size)
224 225 226 227
		return blocks_long;

	switch (block_size) {
	case 4096:
228
		switch (dev->dev_attrib.block_size) {
229 230 231 232 233 234 235 236 237 238 239 240 241
		case 2048:
			blocks_long <<= 1;
			break;
		case 1024:
			blocks_long <<= 2;
			break;
		case 512:
			blocks_long <<= 3;
		default:
			break;
		}
		break;
	case 2048:
242
		switch (dev->dev_attrib.block_size) {
243 244 245 246 247 248 249 250 251 252 253 254 255 256
		case 4096:
			blocks_long >>= 1;
			break;
		case 1024:
			blocks_long <<= 1;
			break;
		case 512:
			blocks_long <<= 2;
			break;
		default:
			break;
		}
		break;
	case 1024:
257
		switch (dev->dev_attrib.block_size) {
258 259 260 261 262 263 264 265 266 267 268 269 270 271
		case 4096:
			blocks_long >>= 2;
			break;
		case 2048:
			blocks_long >>= 1;
			break;
		case 512:
			blocks_long <<= 1;
			break;
		default:
			break;
		}
		break;
	case 512:
272
		switch (dev->dev_attrib.block_size) {
273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292
		case 4096:
			blocks_long >>= 3;
			break;
		case 2048:
			blocks_long >>= 2;
			break;
		case 1024:
			blocks_long >>= 1;
			break;
		default:
			break;
		}
		break;
	default:
		break;
	}

	return blocks_long;
}

293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327
static void iblock_complete_cmd(struct se_cmd *cmd)
{
	struct iblock_req *ibr = cmd->priv;
	u8 status;

	if (!atomic_dec_and_test(&ibr->pending))
		return;

	if (atomic_read(&ibr->ib_bio_err_cnt))
		status = SAM_STAT_CHECK_CONDITION;
	else
		status = SAM_STAT_GOOD;

	target_complete_cmd(cmd, status);
	kfree(ibr);
}

static void iblock_bio_done(struct bio *bio, int err)
{
	struct se_cmd *cmd = bio->bi_private;
	struct iblock_req *ibr = cmd->priv;

	/*
	 * Set -EIO if !BIO_UPTODATE and the passed is still err=0
	 */
	if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && !err)
		err = -EIO;

	if (err != 0) {
		pr_err("test_bit(BIO_UPTODATE) failed for bio: %p,"
			" err: %d\n", bio, err);
		/*
		 * Bump the ib_bio_err_cnt and release bio.
		 */
		atomic_inc(&ibr->ib_bio_err_cnt);
328
		smp_mb__after_atomic();
329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357
	}

	bio_put(bio);

	iblock_complete_cmd(cmd);
}

static struct bio *
iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num)
{
	struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
	struct bio *bio;

	/*
	 * Only allocate as many vector entries as the bio code allows us to,
	 * we'll loop later on until we have handled the whole request.
	 */
	if (sg_num > BIO_MAX_PAGES)
		sg_num = BIO_MAX_PAGES;

	bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set);
	if (!bio) {
		pr_err("Unable to allocate memory for bio\n");
		return NULL;
	}

	bio->bi_bdev = ib_dev->ibd_bd;
	bio->bi_private = cmd;
	bio->bi_end_io = &iblock_bio_done;
358
	bio->bi_iter.bi_sector = lba;
359 360 361 362 363 364 365 366 367 368 369 370 371 372 373

	return bio;
}

static void iblock_submit_bios(struct bio_list *list, int rw)
{
	struct blk_plug plug;
	struct bio *bio;

	blk_start_plug(&plug);
	while ((bio = bio_list_pop(list)))
		submit_bio(rw, bio);
	blk_finish_plug(&plug);
}

374 375 376 377 378 379 380
static void iblock_end_io_flush(struct bio *bio, int err)
{
	struct se_cmd *cmd = bio->bi_private;

	if (err)
		pr_err("IBLOCK: cache flush failed: %d\n", err);

381
	if (cmd) {
382
		if (err)
383
			target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
384
		else
385 386 387
			target_complete_cmd(cmd, SAM_STAT_GOOD);
	}

388 389 390
	bio_put(bio);
}

391
/*
392 393
 * Implement SYCHRONIZE CACHE.  Note that we can't handle lba ranges and must
 * always flush the whole cache.
394
 */
395 396
static sense_reason_t
iblock_execute_sync_cache(struct se_cmd *cmd)
397
{
398
	struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
399
	int immed = (cmd->t_task_cdb[1] & 0x2);
400
	struct bio *bio;
401 402 403

	/*
	 * If the Immediate bit is set, queue up the GOOD response
404
	 * for this SYNCHRONIZE_CACHE op.
405 406
	 */
	if (immed)
407
		target_complete_cmd(cmd, SAM_STAT_GOOD);
408

409 410 411
	bio = bio_alloc(GFP_KERNEL, 0);
	bio->bi_end_io = iblock_end_io_flush;
	bio->bi_bdev = ib_dev->ibd_bd;
412
	if (!immed)
413 414
		bio->bi_private = cmd;
	submit_bio(WRITE_FLUSH, bio);
415
	return 0;
416 417
}

418
static sense_reason_t
419
iblock_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
420
{
421
	struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
422 423 424 425 426 427 428 429 430 431 432
	int ret;

	ret = blkdev_issue_discard(bdev, lba, nolb, GFP_KERNEL, 0);
	if (ret < 0) {
		pr_err("blkdev_issue_discard() failed: %d\n", ret);
		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
	}

	return 0;
}

433
static sense_reason_t
434
iblock_execute_write_same_unmap(struct se_cmd *cmd)
435
{
436 437
	sector_t lba = cmd->t_task_lba;
	sector_t nolb = sbc_get_write_same_sectors(cmd);
438
	sense_reason_t ret;
439

440
	ret = iblock_execute_unmap(cmd, lba, nolb);
441 442
	if (ret)
		return ret;
443 444 445 446 447

	target_complete_cmd(cmd, GOOD);
	return 0;
}

448 449 450 451 452 453 454 455
static sense_reason_t
iblock_execute_write_same(struct se_cmd *cmd)
{
	struct iblock_req *ibr;
	struct scatterlist *sg;
	struct bio *bio;
	struct bio_list list;
	sector_t block_lba = cmd->t_task_lba;
456
	sector_t sectors = sbc_get_write_same_sectors(cmd);
457

458 459 460 461 462
	if (cmd->prot_op) {
		pr_err("WRITE_SAME: Protection information with IBLOCK"
		       " backends not supported\n");
		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
	}
463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515
	sg = &cmd->t_data_sg[0];

	if (cmd->t_data_nents > 1 ||
	    sg->length != cmd->se_dev->dev_attrib.block_size) {
		pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u"
			" block_size: %u\n", cmd->t_data_nents, sg->length,
			cmd->se_dev->dev_attrib.block_size);
		return TCM_INVALID_CDB_FIELD;
	}

	ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
	if (!ibr)
		goto fail;
	cmd->priv = ibr;

	bio = iblock_get_bio(cmd, block_lba, 1);
	if (!bio)
		goto fail_free_ibr;

	bio_list_init(&list);
	bio_list_add(&list, bio);

	atomic_set(&ibr->pending, 1);

	while (sectors) {
		while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
				!= sg->length) {

			bio = iblock_get_bio(cmd, block_lba, 1);
			if (!bio)
				goto fail_put_bios;

			atomic_inc(&ibr->pending);
			bio_list_add(&list, bio);
		}

		/* Always in 512 byte units for Linux/Block */
		block_lba += sg->length >> IBLOCK_LBA_SHIFT;
		sectors -= 1;
	}

	iblock_submit_bios(&list, WRITE);
	return 0;

fail_put_bios:
	while ((bio = bio_list_pop(&list)))
		bio_put(bio);
fail_free_ibr:
	kfree(ibr);
fail:
	return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}

516
enum {
517
	Opt_udev_path, Opt_readonly, Opt_force, Opt_err
518 519 520 521
};

static match_table_t tokens = {
	{Opt_udev_path, "udev_path=%s"},
522
	{Opt_readonly, "readonly=%d"},
523 524 525 526
	{Opt_force, "force=%d"},
	{Opt_err, NULL}
};

527 528
static ssize_t iblock_set_configfs_dev_params(struct se_device *dev,
		const char *page, ssize_t count)
529
{
530
	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
531
	char *orig, *ptr, *arg_p, *opts;
532
	substring_t args[MAX_OPT_ARGS];
533
	int ret = 0, token;
534
	unsigned long tmp_readonly;
535 536 537 538 539 540 541

	opts = kstrdup(page, GFP_KERNEL);
	if (!opts)
		return -ENOMEM;

	orig = opts;

542
	while ((ptr = strsep(&opts, ",\n")) != NULL) {
543 544 545 546 547 548 549
		if (!*ptr)
			continue;

		token = match_token(ptr, tokens, args);
		switch (token) {
		case Opt_udev_path:
			if (ib_dev->ibd_bd) {
550
				pr_err("Unable to set udev_path= while"
551 552 553 554
					" ib_dev->ibd_bd exists\n");
				ret = -EEXIST;
				goto out;
			}
555 556 557
			if (match_strlcpy(ib_dev->ibd_udev_path, &args[0],
				SE_UDEV_PATH_LEN) == 0) {
				ret = -EINVAL;
558 559
				break;
			}
560
			pr_debug("IBLOCK: Referencing UDEV path: %s\n",
561 562 563
					ib_dev->ibd_udev_path);
			ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH;
			break;
564 565 566 567 568 569
		case Opt_readonly:
			arg_p = match_strdup(&args[0]);
			if (!arg_p) {
				ret = -ENOMEM;
				break;
			}
570
			ret = kstrtoul(arg_p, 0, &tmp_readonly);
571 572
			kfree(arg_p);
			if (ret < 0) {
573
				pr_err("kstrtoul() failed for"
574 575 576 577 578 579
						" readonly=\n");
				goto out;
			}
			ib_dev->ibd_readonly = tmp_readonly;
			pr_debug("IBLOCK: readonly: %d\n", ib_dev->ibd_readonly);
			break;
580 581 582 583 584 585 586 587 588 589 590 591
		case Opt_force:
			break;
		default:
			break;
		}
	}

out:
	kfree(orig);
	return (!ret) ? count : ret;
}

592
static ssize_t iblock_show_configfs_dev_params(struct se_device *dev, char *b)
593
{
594 595
	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
	struct block_device *bd = ib_dev->ibd_bd;
596 597 598 599 600 601
	char buf[BDEVNAME_SIZE];
	ssize_t bl = 0;

	if (bd)
		bl += sprintf(b + bl, "iBlock device: %s",
				bdevname(bd, buf));
602
	if (ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)
603
		bl += sprintf(b + bl, "  UDEV PATH: %s",
604 605
				ib_dev->ibd_udev_path);
	bl += sprintf(b + bl, "  readonly: %d\n", ib_dev->ibd_readonly);
606 607 608 609

	bl += sprintf(b + bl, "        ");
	if (bd) {
		bl += sprintf(b + bl, "Major: %d Minor: %d  %s\n",
610
			MAJOR(bd->bd_dev), MINOR(bd->bd_dev), (!bd->bd_contains) ?
611
			"" : (bd->bd_holder == ib_dev) ?
612 613
			"CLAIMED: IBLOCK" : "CLAIMED: OS");
	} else {
614
		bl += sprintf(b + bl, "Major: 0 Minor: 0\n");
615 616 617 618 619
	}

	return bl;
}

620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641
static int
iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio)
{
	struct se_device *dev = cmd->se_dev;
	struct blk_integrity *bi;
	struct bio_integrity_payload *bip;
	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
	struct scatterlist *sg;
	int i, rc;

	bi = bdev_get_integrity(ib_dev->ibd_bd);
	if (!bi) {
		pr_err("Unable to locate bio_integrity\n");
		return -ENODEV;
	}

	bip = bio_integrity_alloc(bio, GFP_NOIO, cmd->t_prot_nents);
	if (!bip) {
		pr_err("Unable to allocate bio_integrity_payload\n");
		return -ENOMEM;
	}

642
	bip->bip_iter.bi_size = (cmd->data_length / dev->dev_attrib.block_size) *
643
			 dev->prot_length;
644
	bip->bip_iter.bi_sector = bio->bi_iter.bi_sector;
645

646 647
	pr_debug("IBLOCK BIP Size: %u Sector: %llu\n", bip->bip_iter.bi_size,
		 (unsigned long long)bip->bip_iter.bi_sector);
648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664

	for_each_sg(cmd->t_prot_sg, sg, cmd->t_prot_nents, i) {

		rc = bio_integrity_add_page(bio, sg_page(sg), sg->length,
					    sg->offset);
		if (rc != sg->length) {
			pr_err("bio_integrity_add_page() failed; %d\n", rc);
			return -ENOMEM;
		}

		pr_debug("Added bio integrity page: %p length: %d offset; %d\n",
			 sg_page(sg), sg->length, sg->offset);
	}

	return 0;
}

665
static sense_reason_t
666 667
iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
		  enum dma_data_direction data_direction)
668
{
669
	struct se_device *dev = cmd->se_dev;
670
	struct iblock_req *ibr;
671
	struct bio *bio, *bio_start;
672
	struct bio_list list;
673
	struct scatterlist *sg;
674
	u32 sg_num = sgl_nents;
675
	sector_t block_lba;
676
	unsigned bio_cnt;
677
	int rw = 0;
678
	int i;
679

680
	if (data_direction == DMA_TO_DEVICE) {
681 682
		struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
		struct request_queue *q = bdev_get_queue(ib_dev->ibd_bd);
683
		/*
684 685
		 * Force writethrough using WRITE_FUA if a volatile write cache
		 * is not enabled, or if initiator set the Force Unit Access bit.
686
		 */
687 688 689 690 691
		if (q->flush_flags & REQ_FUA) {
			if (cmd->se_cmd_flags & SCF_FUA)
				rw = WRITE_FUA;
			else if (!(q->flush_flags & REQ_FLUSH))
				rw = WRITE_FUA;
692 693
			else
				rw = WRITE;
694
		} else {
695
			rw = WRITE;
696
		}
697 698 699 700
	} else {
		rw = READ;
	}

701
	/*
702 703
	 * Convert the blocksize advertised to the initiator to the 512 byte
	 * units unconditionally used by the Linux block layer.
704
	 */
705
	if (dev->dev_attrib.block_size == 4096)
706
		block_lba = (cmd->t_task_lba << 3);
707
	else if (dev->dev_attrib.block_size == 2048)
708
		block_lba = (cmd->t_task_lba << 2);
709
	else if (dev->dev_attrib.block_size == 1024)
710
		block_lba = (cmd->t_task_lba << 1);
711
	else if (dev->dev_attrib.block_size == 512)
712
		block_lba = cmd->t_task_lba;
713
	else {
714
		pr_err("Unsupported SCSI -> BLOCK LBA conversion:"
715
				" %u\n", dev->dev_attrib.block_size);
716
		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
717 718
	}

719 720 721 722 723
	ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
	if (!ibr)
		goto fail;
	cmd->priv = ibr;

724 725 726 727 728 729
	if (!sgl_nents) {
		atomic_set(&ibr->pending, 1);
		iblock_complete_cmd(cmd);
		return 0;
	}

730 731 732
	bio = iblock_get_bio(cmd, block_lba, sgl_nents);
	if (!bio)
		goto fail_free_ibr;
733

734
	bio_start = bio;
735 736
	bio_list_init(&list);
	bio_list_add(&list, bio);
737 738

	atomic_set(&ibr->pending, 2);
739
	bio_cnt = 1;
740

741
	for_each_sg(sgl, sg, sgl_nents, i) {
742 743 744 745 746 747 748
		/*
		 * XXX: if the length the device accepts is shorter than the
		 *	length of the S/G list entry this will cause and
		 *	endless loop.  Better hope no driver uses huge pages.
		 */
		while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
				!= sg->length) {
749 750 751 752 753
			if (bio_cnt >= IBLOCK_MAX_BIO_PER_TASK) {
				iblock_submit_bios(&list, rw);
				bio_cnt = 0;
			}

754
			bio = iblock_get_bio(cmd, block_lba, sg_num);
755
			if (!bio)
756 757 758
				goto fail_put_bios;

			atomic_inc(&ibr->pending);
759
			bio_list_add(&list, bio);
760
			bio_cnt++;
761
		}
762

763 764 765 766 767
		/* Always in 512 byte units for Linux/Block */
		block_lba += sg->length >> IBLOCK_LBA_SHIFT;
		sg_num--;
	}

768
	if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
769 770 771 772 773
		int rc = iblock_alloc_bip(cmd, bio_start);
		if (rc)
			goto fail_put_bios;
	}

774
	iblock_submit_bios(&list, rw);
775
	iblock_complete_cmd(cmd);
776
	return 0;
777

778
fail_put_bios:
779
	while ((bio = bio_list_pop(&list)))
780
		bio_put(bio);
781 782 783
fail_free_ibr:
	kfree(ibr);
fail:
784
	return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
785 786 787 788
}

static sector_t iblock_get_blocks(struct se_device *dev)
{
789 790
	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
	struct block_device *bd = ib_dev->ibd_bd;
791 792 793 794 795
	struct request_queue *q = bdev_get_queue(bd);

	return iblock_emulate_read_cap_with_block_size(dev, bd, q);
}

796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834
static sector_t iblock_get_alignment_offset_lbas(struct se_device *dev)
{
	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
	struct block_device *bd = ib_dev->ibd_bd;
	int ret;

	ret = bdev_alignment_offset(bd);
	if (ret == -1)
		return 0;

	/* convert offset-bytes to offset-lbas */
	return ret / bdev_logical_block_size(bd);
}

static unsigned int iblock_get_lbppbe(struct se_device *dev)
{
	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
	struct block_device *bd = ib_dev->ibd_bd;
	int logs_per_phys = bdev_physical_block_size(bd) / bdev_logical_block_size(bd);

	return ilog2(logs_per_phys);
}

static unsigned int iblock_get_io_min(struct se_device *dev)
{
	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
	struct block_device *bd = ib_dev->ibd_bd;

	return bdev_io_min(bd);
}

static unsigned int iblock_get_io_opt(struct se_device *dev)
{
	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
	struct block_device *bd = ib_dev->ibd_bd;

	return bdev_io_opt(bd);
}

Christoph Hellwig's avatar
Christoph Hellwig committed
835
static struct sbc_ops iblock_sbc_ops = {
836
	.execute_rw		= iblock_execute_rw,
837
	.execute_sync_cache	= iblock_execute_sync_cache,
838
	.execute_write_same	= iblock_execute_write_same,
839
	.execute_write_same_unmap = iblock_execute_write_same_unmap,
840
	.execute_unmap		= iblock_execute_unmap,
841 842
};

843 844
static sense_reason_t
iblock_parse_cdb(struct se_cmd *cmd)
845
{
Christoph Hellwig's avatar
Christoph Hellwig committed
846
	return sbc_parse_cdb(cmd, &iblock_sbc_ops);
847 848
}

849
static bool iblock_get_write_cache(struct se_device *dev)
850 851 852 853 854 855 856 857
{
	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
	struct block_device *bd = ib_dev->ibd_bd;
	struct request_queue *q = bdev_get_queue(bd);

	return q->flush_flags & REQ_FLUSH;
}

858
static const struct target_backend_ops iblock_ops = {
859
	.name			= "iblock",
860 861
	.inquiry_prod		= "IBLOCK",
	.inquiry_rev		= IBLOCK_VERSION,
862 863 864
	.owner			= THIS_MODULE,
	.attach_hba		= iblock_attach_hba,
	.detach_hba		= iblock_detach_hba,
865 866
	.alloc_device		= iblock_alloc_device,
	.configure_device	= iblock_configure_device,
867
	.free_device		= iblock_free_device,
868
	.parse_cdb		= iblock_parse_cdb,
869 870
	.set_configfs_dev_params = iblock_set_configfs_dev_params,
	.show_configfs_dev_params = iblock_show_configfs_dev_params,
871
	.get_device_type	= sbc_get_device_type,
872
	.get_blocks		= iblock_get_blocks,
873 874 875 876
	.get_alignment_offset_lbas = iblock_get_alignment_offset_lbas,
	.get_lbppbe		= iblock_get_lbppbe,
	.get_io_min		= iblock_get_io_min,
	.get_io_opt		= iblock_get_io_opt,
877
	.get_write_cache	= iblock_get_write_cache,
878
	.tb_dev_attrib_attrs	= sbc_attrib_attrs,
879 880 881 882
};

static int __init iblock_module_init(void)
{
883
	return transport_backend_register(&iblock_ops);
884 885
}

886
static void __exit iblock_module_exit(void)
887
{
888
	target_backend_unregister(&iblock_ops);
889 890 891 892 893 894 895 896
}

MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin");
MODULE_AUTHOR("nab@Linux-iSCSI.org");
MODULE_LICENSE("GPL");

module_init(iblock_module_init);
module_exit(iblock_module_exit);