Commit 1802979a authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.dk/linux-block

Pull block updates and fixes from Jens Axboe:

 - NVMe updates and fixes that missed the first pull request. This
   includes bug fixes, and support for autonomous power management.

 - Fix from Christoph for missing clear of the request payload, causing
   a problem with (at least) the storvsc driver.

 - Further fixes for the queue/bdi life time issues from Jan.

 - The Kconfig mq scheduler update from me.

 - Fixing a use-after-free in dm-rq, spotted by Bart, introduced in this
   merge window.

 - Three fixes for nbd from Josef.

 - Bug fix from Omar, fixing a bug in sas transport code that oopses
   when bsg ioctls were used. From Omar.

 - Improvements to the queue restart and tag wait from from Omar.

 - Set of fixes for the sed/opal code from Scott.

 - Three trivial patches to cciss from Tobin

* 'for-linus' of git://git.kernel.dk/linux-block: (41 commits)
  dm-rq: don't dereference request payload after ending request
  blk-mq-sched: separate mark hctx and queue restart operations
  blk-mq: use sbq wait queues instead of restart for driver tags
  block/sed-opal: Propagate original error message to userland.
  nvme/pci: re-check security protocol support after reset
  block/sed-opal: Introduce free_opal_dev to free the structure and clean up state
  nvme: detect NVMe controller in recent MacBooks
  nvme-rdma: add support for host_traddr
  nvmet-rdma: Fix error handling
  nvmet-rdma: use nvme cm status helper
  nvme-rdma: move nvme cm status helper to .h file
  nvme-fc: don't bother to validate ioccsz and iorcsz
  nvme/pci: No special case for queue busy on IO
  nvme/core: Fix race kicking freed request_queue
  nvme/pci: Disable on removal when disconnected
  nvme: Enable autonomous power state transitions
  nvme: Add a quirk mechanism that uses identify_ctrl
  nvme: make nvmf_register_transport require a create_ctrl callback
  nvme: Use CNS as 8-bit field and avoid endianness conversion
  nvme: add semicolon in nvme_command setting
  ...
parents f1ef09fd 61febef4
...@@ -69,50 +69,6 @@ config MQ_IOSCHED_DEADLINE ...@@ -69,50 +69,6 @@ config MQ_IOSCHED_DEADLINE
---help--- ---help---
MQ version of the deadline IO scheduler. MQ version of the deadline IO scheduler.
config MQ_IOSCHED_NONE
bool
default y
choice
prompt "Default single-queue blk-mq I/O scheduler"
default DEFAULT_SQ_NONE
help
Select the I/O scheduler which will be used by default for blk-mq
managed block devices with a single queue.
config DEFAULT_SQ_DEADLINE
bool "MQ Deadline" if MQ_IOSCHED_DEADLINE=y
config DEFAULT_SQ_NONE
bool "None"
endchoice
config DEFAULT_SQ_IOSCHED
string
default "mq-deadline" if DEFAULT_SQ_DEADLINE
default "none" if DEFAULT_SQ_NONE
choice
prompt "Default multi-queue blk-mq I/O scheduler"
default DEFAULT_MQ_NONE
help
Select the I/O scheduler which will be used by default for blk-mq
managed block devices with multiple queues.
config DEFAULT_MQ_DEADLINE
bool "MQ Deadline" if MQ_IOSCHED_DEADLINE=y
config DEFAULT_MQ_NONE
bool "None"
endchoice
config DEFAULT_MQ_IOSCHED
string
default "mq-deadline" if DEFAULT_MQ_DEADLINE
default "none" if DEFAULT_MQ_NONE
endmenu endmenu
endif endif
...@@ -205,7 +205,7 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) ...@@ -205,7 +205,7 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
* needing a restart in that case. * needing a restart in that case.
*/ */
if (!list_empty(&rq_list)) { if (!list_empty(&rq_list)) {
blk_mq_sched_mark_restart(hctx); blk_mq_sched_mark_restart_hctx(hctx);
did_work = blk_mq_dispatch_rq_list(hctx, &rq_list); did_work = blk_mq_dispatch_rq_list(hctx, &rq_list);
} else if (!has_sched_dispatch) { } else if (!has_sched_dispatch) {
blk_mq_flush_busy_ctxs(hctx, &rq_list); blk_mq_flush_busy_ctxs(hctx, &rq_list);
...@@ -331,21 +331,17 @@ static void blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx) ...@@ -331,21 +331,17 @@ static void blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx)
void blk_mq_sched_restart_queues(struct blk_mq_hw_ctx *hctx) void blk_mq_sched_restart_queues(struct blk_mq_hw_ctx *hctx)
{ {
unsigned int i;
if (!(hctx->flags & BLK_MQ_F_TAG_SHARED))
blk_mq_sched_restart_hctx(hctx);
else {
struct request_queue *q = hctx->queue; struct request_queue *q = hctx->queue;
unsigned int i;
if (!test_bit(QUEUE_FLAG_RESTART, &q->queue_flags)) if (test_bit(QUEUE_FLAG_RESTART, &q->queue_flags)) {
return; if (test_and_clear_bit(QUEUE_FLAG_RESTART, &q->queue_flags)) {
clear_bit(QUEUE_FLAG_RESTART, &q->queue_flags);
queue_for_each_hw_ctx(q, hctx, i) queue_for_each_hw_ctx(q, hctx, i)
blk_mq_sched_restart_hctx(hctx); blk_mq_sched_restart_hctx(hctx);
} }
} else {
blk_mq_sched_restart_hctx(hctx);
}
} }
/* /*
...@@ -498,15 +494,6 @@ int blk_mq_sched_init(struct request_queue *q) ...@@ -498,15 +494,6 @@ int blk_mq_sched_init(struct request_queue *q)
{ {
int ret; int ret;
#if defined(CONFIG_DEFAULT_SQ_NONE)
if (q->nr_hw_queues == 1)
return 0;
#endif
#if defined(CONFIG_DEFAULT_MQ_NONE)
if (q->nr_hw_queues > 1)
return 0;
#endif
mutex_lock(&q->sysfs_lock); mutex_lock(&q->sysfs_lock);
ret = elevator_init(q, NULL); ret = elevator_init(q, NULL);
mutex_unlock(&q->sysfs_lock); mutex_unlock(&q->sysfs_lock);
......
...@@ -122,17 +122,27 @@ static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx) ...@@ -122,17 +122,27 @@ static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx)
return false; return false;
} }
static inline void blk_mq_sched_mark_restart(struct blk_mq_hw_ctx *hctx) /*
* Mark a hardware queue as needing a restart.
*/
static inline void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
{ {
if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) { if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
if (hctx->flags & BLK_MQ_F_TAG_SHARED) { }
/*
* Mark a hardware queue and the request queue it belongs to as needing a
* restart.
*/
static inline void blk_mq_sched_mark_restart_queue(struct blk_mq_hw_ctx *hctx)
{
struct request_queue *q = hctx->queue; struct request_queue *q = hctx->queue;
if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
if (!test_bit(QUEUE_FLAG_RESTART, &q->queue_flags)) if (!test_bit(QUEUE_FLAG_RESTART, &q->queue_flags))
set_bit(QUEUE_FLAG_RESTART, &q->queue_flags); set_bit(QUEUE_FLAG_RESTART, &q->queue_flags);
}
}
} }
static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx) static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx)
......
...@@ -904,6 +904,44 @@ static bool reorder_tags_to_front(struct list_head *list) ...@@ -904,6 +904,44 @@ static bool reorder_tags_to_front(struct list_head *list)
return first != NULL; return first != NULL;
} }
static int blk_mq_dispatch_wake(wait_queue_t *wait, unsigned mode, int flags,
void *key)
{
struct blk_mq_hw_ctx *hctx;
hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait);
list_del(&wait->task_list);
clear_bit_unlock(BLK_MQ_S_TAG_WAITING, &hctx->state);
blk_mq_run_hw_queue(hctx, true);
return 1;
}
static bool blk_mq_dispatch_wait_add(struct blk_mq_hw_ctx *hctx)
{
struct sbq_wait_state *ws;
/*
* The TAG_WAITING bit serves as a lock protecting hctx->dispatch_wait.
* The thread which wins the race to grab this bit adds the hardware
* queue to the wait queue.
*/
if (test_bit(BLK_MQ_S_TAG_WAITING, &hctx->state) ||
test_and_set_bit_lock(BLK_MQ_S_TAG_WAITING, &hctx->state))
return false;
init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake);
ws = bt_wait_ptr(&hctx->tags->bitmap_tags, hctx);
/*
* As soon as this returns, it's no longer safe to fiddle with
* hctx->dispatch_wait, since a completion can wake up the wait queue
* and unlock the bit.
*/
add_wait_queue(&ws->wait, &hctx->dispatch_wait);
return true;
}
bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list) bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
{ {
struct request_queue *q = hctx->queue; struct request_queue *q = hctx->queue;
...@@ -931,15 +969,22 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list) ...@@ -931,15 +969,22 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
continue; continue;
/* /*
* We failed getting a driver tag. Mark the queue(s) * The initial allocation attempt failed, so we need to
* as needing a restart. Retry getting a tag again, * rerun the hardware queue when a tag is freed.
* in case the needed IO completed right before we */
* marked the queue as needing a restart. if (blk_mq_dispatch_wait_add(hctx)) {
/*
* It's possible that a tag was freed in the
* window between the allocation failure and
* adding the hardware queue to the wait queue.
*/ */
blk_mq_sched_mark_restart(hctx);
if (!blk_mq_get_driver_tag(rq, &hctx, false)) if (!blk_mq_get_driver_tag(rq, &hctx, false))
break; break;
} else {
break;
}
} }
list_del_init(&rq->queuelist); list_del_init(&rq->queuelist);
bd.rq = rq; bd.rq = rq;
...@@ -995,10 +1040,11 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list) ...@@ -995,10 +1040,11 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
* *
* blk_mq_run_hw_queue() already checks the STOPPED bit * blk_mq_run_hw_queue() already checks the STOPPED bit
* *
* If RESTART is set, then let completion restart the queue * If RESTART or TAG_WAITING is set, then let completion restart
* instead of potentially looping here. * the queue instead of potentially looping here.
*/ */
if (!blk_mq_sched_needs_restart(hctx)) if (!blk_mq_sched_needs_restart(hctx) &&
!test_bit(BLK_MQ_S_TAG_WAITING, &hctx->state))
blk_mq_run_hw_queue(hctx, true); blk_mq_run_hw_queue(hctx, true);
} }
......
...@@ -220,17 +220,24 @@ int elevator_init(struct request_queue *q, char *name) ...@@ -220,17 +220,24 @@ int elevator_init(struct request_queue *q, char *name)
} }
if (!e) { if (!e) {
if (q->mq_ops && q->nr_hw_queues == 1) /*
e = elevator_get(CONFIG_DEFAULT_SQ_IOSCHED, false); * For blk-mq devices, we default to using mq-deadline,
else if (q->mq_ops) * if available, for single queue devices. If deadline
e = elevator_get(CONFIG_DEFAULT_MQ_IOSCHED, false); * isn't available OR we have multiple queues, default
else * to "none".
*/
if (q->mq_ops) {
if (q->nr_hw_queues == 1)
e = elevator_get("mq-deadline", false);
if (!e)
return 0;
} else
e = elevator_get(CONFIG_DEFAULT_IOSCHED, false); e = elevator_get(CONFIG_DEFAULT_IOSCHED, false);
if (!e) { if (!e) {
printk(KERN_ERR printk(KERN_ERR
"Default I/O scheduler not found. " \ "Default I/O scheduler not found. " \
"Using noop/none.\n"); "Using noop.\n");
e = elevator_get("noop", false); e = elevator_get("noop", false);
} }
} }
......
...@@ -669,14 +669,14 @@ void del_gendisk(struct gendisk *disk) ...@@ -669,14 +669,14 @@ void del_gendisk(struct gendisk *disk)
disk_part_iter_init(&piter, disk, disk_part_iter_init(&piter, disk,
DISK_PITER_INCL_EMPTY | DISK_PITER_REVERSE); DISK_PITER_INCL_EMPTY | DISK_PITER_REVERSE);
while ((part = disk_part_iter_next(&piter))) { while ((part = disk_part_iter_next(&piter))) {
bdev_unhash_inode(MKDEV(disk->major,
disk->first_minor + part->partno));
invalidate_partition(disk, part->partno); invalidate_partition(disk, part->partno);
bdev_unhash_inode(part_devt(part));
delete_partition(disk, part->partno); delete_partition(disk, part->partno);
} }
disk_part_iter_exit(&piter); disk_part_iter_exit(&piter);
invalidate_partition(disk, 0); invalidate_partition(disk, 0);
bdev_unhash_inode(disk_devt(disk));
set_capacity(disk, 0); set_capacity(disk, 0);
disk->flags &= ~GENHD_FL_UP; disk->flags &= ~GENHD_FL_UP;
......
This diff is collapsed.
...@@ -647,8 +647,7 @@ cciss_scsi_setup(ctlr_info_t *h) ...@@ -647,8 +647,7 @@ cciss_scsi_setup(ctlr_info_t *h)
struct cciss_scsi_adapter_data_t * shba; struct cciss_scsi_adapter_data_t * shba;
ccissscsi[h->ctlr].ndevices = 0; ccissscsi[h->ctlr].ndevices = 0;
shba = (struct cciss_scsi_adapter_data_t *) shba = kmalloc(sizeof(*shba), GFP_KERNEL);
kmalloc(sizeof(*shba), GFP_KERNEL);
if (shba == NULL) if (shba == NULL)
return; return;
shba->scsi_host = NULL; shba->scsi_host = NULL;
...@@ -699,10 +698,8 @@ static void complete_scsi_command(CommandList_struct *c, int timeout, ...@@ -699,10 +698,8 @@ static void complete_scsi_command(CommandList_struct *c, int timeout,
ei->SenseLen); ei->SenseLen);
scsi_set_resid(cmd, ei->ResidualCnt); scsi_set_resid(cmd, ei->ResidualCnt);
if(ei->CommandStatus != 0) if (ei->CommandStatus != 0) { /* an error has occurred */
{ /* an error has occurred */ switch (ei->CommandStatus) {
switch(ei->CommandStatus)
{
case CMD_TARGET_STATUS: case CMD_TARGET_STATUS:
/* Pass it up to the upper layers... */ /* Pass it up to the upper layers... */
if (!ei->ScsiStatus) { if (!ei->ScsiStatus) {
...@@ -902,8 +899,7 @@ cciss_scsi_interpret_error(ctlr_info_t *h, CommandList_struct *c) ...@@ -902,8 +899,7 @@ cciss_scsi_interpret_error(ctlr_info_t *h, CommandList_struct *c)
ErrorInfo_struct *ei; ErrorInfo_struct *ei;
ei = c->err_info; ei = c->err_info;
switch(ei->CommandStatus) switch (ei->CommandStatus) {
{
case CMD_TARGET_STATUS: case CMD_TARGET_STATUS:
dev_warn(&h->pdev->dev, dev_warn(&h->pdev->dev,
"cmd %p has completed with errors\n", c); "cmd %p has completed with errors\n", c);
...@@ -1182,8 +1178,7 @@ cciss_update_non_disk_devices(ctlr_info_t *h, int hostno) ...@@ -1182,8 +1178,7 @@ cciss_update_non_disk_devices(ctlr_info_t *h, int hostno)
cciss_scsi_get_device_id(h, scsi3addr, cciss_scsi_get_device_id(h, scsi3addr,
this_device->device_id, sizeof(this_device->device_id)); this_device->device_id, sizeof(this_device->device_id));
switch (this_device->devtype) switch (this_device->devtype) {
{
case 0x05: /* CD-ROM */ { case 0x05: /* CD-ROM */ {
/* We don't *really* support actual CD-ROM devices, /* We don't *really* support actual CD-ROM devices,
...@@ -1414,8 +1409,7 @@ cciss_scsi_queue_command_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmn ...@@ -1414,8 +1409,7 @@ cciss_scsi_queue_command_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmn
memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len); memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
c->Request.Type.Type = TYPE_CMD; c->Request.Type.Type = TYPE_CMD;
c->Request.Type.Attribute = ATTR_SIMPLE; c->Request.Type.Attribute = ATTR_SIMPLE;
switch(cmd->sc_data_direction) switch (cmd->sc_data_direction) {
{
case DMA_TO_DEVICE: case DMA_TO_DEVICE:
c->Request.Type.Direction = XFER_WRITE; c->Request.Type.Direction = XFER_WRITE;
break; break;
......
...@@ -96,6 +96,10 @@ static int max_part; ...@@ -96,6 +96,10 @@ static int max_part;
static struct workqueue_struct *recv_workqueue; static struct workqueue_struct *recv_workqueue;
static int part_shift; static int part_shift;
static int nbd_dev_dbg_init(struct nbd_device *nbd);
static void nbd_dev_dbg_close(struct nbd_device *nbd);
static inline struct device *nbd_to_dev(struct nbd_device *nbd) static inline struct device *nbd_to_dev(struct nbd_device *nbd)
{ {
return disk_to_dev(nbd->disk); return disk_to_dev(nbd->disk);
...@@ -120,7 +124,7 @@ static const char *nbdcmd_to_ascii(int cmd) ...@@ -120,7 +124,7 @@ static const char *nbdcmd_to_ascii(int cmd)
static int nbd_size_clear(struct nbd_device *nbd, struct block_device *bdev) static int nbd_size_clear(struct nbd_device *nbd, struct block_device *bdev)
{ {
bdev->bd_inode->i_size = 0; bd_set_size(bdev, 0);
set_capacity(nbd->disk, 0); set_capacity(nbd->disk, 0);
kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE); kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
...@@ -129,29 +133,20 @@ static int nbd_size_clear(struct nbd_device *nbd, struct block_device *bdev) ...@@ -129,29 +133,20 @@ static int nbd_size_clear(struct nbd_device *nbd, struct block_device *bdev)
static void nbd_size_update(struct nbd_device *nbd, struct block_device *bdev) static void nbd_size_update(struct nbd_device *nbd, struct block_device *bdev)
{ {
if (!nbd_is_connected(nbd)) blk_queue_logical_block_size(nbd->disk->queue, nbd->blksize);
return; blk_queue_physical_block_size(nbd->disk->queue, nbd->blksize);
bd_set_size(bdev, nbd->bytesize);
bdev->bd_inode->i_size = nbd->bytesize;
set_capacity(nbd->disk, nbd->bytesize >> 9); set_capacity(nbd->disk, nbd->bytesize >> 9);
kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE); kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
} }
static int nbd_size_set(struct nbd_device *nbd, struct block_device *bdev, static void nbd_size_set(struct nbd_device *nbd, struct block_device *bdev,
loff_t blocksize, loff_t nr_blocks) loff_t blocksize, loff_t nr_blocks)
{ {
int ret;
ret = set_blocksize(bdev, blocksize);
if (ret)
return ret;
nbd->blksize = blocksize; nbd->blksize = blocksize;
nbd->bytesize = blocksize * nr_blocks; nbd->bytesize = blocksize * nr_blocks;
if (nbd_is_connected(nbd))
nbd_size_update(nbd, bdev); nbd_size_update(nbd, bdev);
return 0;
} }
static void nbd_end_request(struct nbd_cmd *cmd) static void nbd_end_request(struct nbd_cmd *cmd)
...@@ -571,10 +566,17 @@ static int nbd_queue_rq(struct blk_mq_hw_ctx *hctx, ...@@ -571,10 +566,17 @@ static int nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
return BLK_MQ_RQ_QUEUE_OK; return BLK_MQ_RQ_QUEUE_OK;
} }
static int nbd_add_socket(struct nbd_device *nbd, struct socket *sock) static int nbd_add_socket(struct nbd_device *nbd, struct block_device *bdev,
unsigned long arg)
{ {
struct socket *sock;
struct nbd_sock **socks; struct nbd_sock **socks;
struct nbd_sock *nsock; struct nbd_sock *nsock;
int err;
sock = sockfd_lookup(arg, &err);
if (!sock)
return err;
if (!nbd->task_setup) if (!nbd->task_setup)
nbd->task_setup = current; nbd->task_setup = current;
...@@ -598,26 +600,20 @@ static int nbd_add_socket(struct nbd_device *nbd, struct socket *sock) ...@@ -598,26 +600,20 @@ static int nbd_add_socket(struct nbd_device *nbd, struct socket *sock)
nsock->sock = sock; nsock->sock = sock;
socks[nbd->num_connections++] = nsock; socks[nbd->num_connections++] = nsock;
if (max_part)
bdev->bd_invalidated = 1;
return 0; return 0;
} }
/* Reset all properties of an NBD device */ /* Reset all properties of an NBD device */
static void nbd_reset(struct nbd_device *nbd) static void nbd_reset(struct nbd_device *nbd)
{ {
int i;
for (i = 0; i < nbd->num_connections; i++)
kfree(nbd->socks[i]);
kfree(nbd->socks);
nbd->socks = NULL;
nbd->runtime_flags = 0; nbd->runtime_flags = 0;
nbd->blksize = 1024; nbd->blksize = 1024;
nbd->bytesize = 0; nbd->bytesize = 0;
set_capacity(nbd->disk, 0); set_capacity(nbd->disk, 0);
nbd->flags = 0; nbd->flags = 0;
nbd->tag_set.timeout = 0; nbd->tag_set.timeout = 0;
nbd->num_connections = 0;
nbd->task_setup = NULL;
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue); queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
} }
...@@ -659,15 +655,8 @@ static void send_disconnects(struct nbd_device *nbd) ...@@ -659,15 +655,8 @@ static void send_disconnects(struct nbd_device *nbd)
} }
} }
static int nbd_dev_dbg_init(struct nbd_device *nbd); static int nbd_disconnect(struct nbd_device *nbd, struct block_device *bdev)
static void nbd_dev_dbg_close(struct nbd_device *nbd);
/* Must be called with config_lock held */
static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
unsigned int cmd, unsigned long arg)
{ {
switch (cmd) {
case NBD_DISCONNECT: {
dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n"); dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n");
if (!nbd->socks) if (!nbd->socks)
return -EINVAL; return -EINVAL;
...@@ -684,9 +673,10 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, ...@@ -684,9 +673,10 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
&nbd->runtime_flags)) &nbd->runtime_flags))
send_disconnects(nbd); send_disconnects(nbd);
return 0; return 0;
} }
case NBD_CLEAR_SOCK: static int nbd_clear_sock(struct nbd_device *nbd, struct block_device *bdev)
{
sock_shutdown(nbd); sock_shutdown(nbd);
nbd_clear_que(nbd); nbd_clear_que(nbd);
kill_bdev(bdev); kill_bdev(bdev);
...@@ -695,7 +685,8 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, ...@@ -695,7 +685,8 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
* We want to give the run thread a chance to wait for everybody * We want to give the run thread a chance to wait for everybody
* to clean up and then do it's own cleanup. * to clean up and then do it's own cleanup.
*/ */
if (!test_bit(NBD_RUNNING, &nbd->runtime_flags)) { if (!test_bit(NBD_RUNNING, &nbd->runtime_flags) &&
nbd->num_connections) {
int i; int i;
for (i = 0; i < nbd->num_connections; i++) for (i = 0; i < nbd->num_connections; i++)
...@@ -703,46 +694,14 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, ...@@ -703,46 +694,14 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
kfree(nbd->socks); kfree(nbd->socks);
nbd->socks = NULL; nbd->socks = NULL;
nbd->num_connections = 0; nbd->num_connections = 0;
nbd->task_setup = NULL;
}
return 0;
case NBD_SET_SOCK: {
int err;
struct socket *sock = sockfd_lookup(arg, &err);
if (!sock)
return err;
err = nbd_add_socket(nbd, sock);
if (!err && max_part)
bdev->bd_invalidated = 1;
return err;
}
case NBD_SET_BLKSIZE: {
loff_t bsize = div_s64(nbd->bytesize, arg);
return nbd_size_set(nbd, bdev, arg, bsize);
} }
nbd->task_setup = NULL;
case NBD_SET_SIZE:
return nbd_size_set(nbd, bdev, nbd->blksize,
div_s64(arg, nbd->blksize));
case NBD_SET_SIZE_BLOCKS:
return nbd_size_set(nbd, bdev, nbd->blksize, arg);
case NBD_SET_TIMEOUT:
nbd->tag_set.timeout = arg * HZ;
return 0;
case NBD_SET_FLAGS:
nbd->flags = arg;
return 0; return 0;
}
case NBD_DO_IT: { static int nbd_start_device(struct nbd_device *nbd, struct block_device *bdev)
{
struct recv_thread_args *args; struct recv_thread_args *args;
int num_connections = nbd->num_connections; int num_connections = nbd->num_connections;
int error = 0, i; int error = 0, i;
...@@ -798,10 +757,8 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, ...@@ -798,10 +757,8 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
mutex_lock(&nbd->config_lock); mutex_lock(&nbd->config_lock);
nbd->task_recv = NULL; nbd->task_recv = NULL;
out_err: out_err:
sock_shutdown(nbd); clear_bit(NBD_RUNNING, &nbd->runtime_flags);
nbd_clear_que(nbd); nbd_clear_sock(nbd, bdev);
kill_bdev(bdev);
nbd_bdev_reset(bdev);
/* user requested, ignore socket errors */ /* user requested, ignore socket errors */
if (test_bit(NBD_DISCONNECT_REQUESTED, &nbd->runtime_flags)) if (test_bit(NBD_DISCONNECT_REQUESTED, &nbd->runtime_flags))
...@@ -811,15 +768,45 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, ...@@ -811,15 +768,45 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
nbd_reset(nbd); nbd_reset(nbd);
return error; return error;
} }
/* Must be called with config_lock held */
static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
unsigned int cmd, unsigned long arg)
{
switch (cmd) {
case NBD_DISCONNECT:
return nbd_disconnect(nbd, bdev);
case NBD_CLEAR_SOCK:
return nbd_clear_sock(nbd, bdev);
case NBD_SET_SOCK:
return nbd_add_socket(nbd, bdev, arg);
case NBD_SET_BLKSIZE:
nbd_size_set(nbd, bdev, arg,
div_s64(nbd->bytesize, arg));
return 0;
case NBD_SET_SIZE:
nbd_size_set(nbd, bdev, nbd->blksize,
div_s64(arg, nbd->blksize));
return 0;
case NBD_SET_SIZE_BLOCKS:
nbd_size_set(nbd, bdev, nbd->blksize, arg);
return 0;
case NBD_SET_TIMEOUT:
nbd->tag_set.timeout = arg * HZ;
return 0;
case NBD_SET_FLAGS:
nbd->flags = arg;
return 0;
case NBD_DO_IT:
return nbd_start_device(nbd, bdev);
case NBD_CLEAR_QUE: case NBD_CLEAR_QUE:
/* /*
* This is for compatibility only. The queue is always cleared * This is for compatibility only. The queue is always cleared
* by NBD_DO_IT or NBD_CLEAR_SOCK. * by NBD_DO_IT or NBD_CLEAR_SOCK.
*/ */
return 0; return 0;
case NBD_PRINT_DEBUG: case NBD_PRINT_DEBUG:
/* /*
* For compatibility only, we no longer keep a list of * For compatibility only, we no longer keep a list of
...@@ -1134,8 +1121,10 @@ static int __init nbd_init(void) ...@@ -1134,8 +1121,10 @@ static int __init nbd_init(void)
if (!recv_workqueue) if (!recv_workqueue)
return -ENOMEM; return -ENOMEM;
if (register_blkdev(NBD_MAJOR, "nbd")) if (register_blkdev(NBD_MAJOR, "nbd")) {
destroy_workqueue(recv_workqueue);
return -EIO; return -EIO;
}
nbd_dbg_init(); nbd_dbg_init();
......
...@@ -328,13 +328,15 @@ static void dm_softirq_done(struct request *rq) ...@@ -328,13 +328,15 @@ static void dm_softirq_done(struct request *rq)
int rw; int rw;
if (!clone) { if (!clone) {
rq_end_stats(tio->md, rq); struct mapped_device *md = tio->md;
rq_end_stats(md, rq);
rw = rq_data_dir(rq); rw = rq_data_dir(rq);
if (!rq->q->mq_ops) if (!rq->q->mq_ops)
blk_end_request_all(rq, tio->error); blk_end_request_all(rq, tio->error);
else else
blk_mq_end_request(rq, tio->error); blk_mq_end_request(rq, tio->error);
rq_completed(tio->md, rw, false); rq_completed(md, rw, false);
return; return;
} }
......
This diff is collapsed.
...@@ -480,11 +480,16 @@ EXPORT_SYMBOL_GPL(nvmf_connect_io_queue); ...@@ -480,11 +480,16 @@ EXPORT_SYMBOL_GPL(nvmf_connect_io_queue);
* being implemented to the common NVMe fabrics library. Part of * being implemented to the common NVMe fabrics library. Part of
* the overall init sequence of starting up a fabrics driver. * the overall init sequence of starting up a fabrics driver.
*/ */
void nvmf_register_transport(struct nvmf_transport_ops *ops) int nvmf_register_transport(struct nvmf_transport_ops *ops)
{ {
if (!ops->create_ctrl)
return -EINVAL;
mutex_lock(&nvmf_transports_mutex); mutex_lock(&nvmf_transports_mutex);
list_add_tail(&ops->entry, &nvmf_transports); list_add_tail(&ops->entry, &nvmf_transports);
mutex_unlock(&nvmf_transports_mutex); mutex_unlock(&nvmf_transports_mutex);
return 0;
} }
EXPORT_SYMBOL_GPL(nvmf_register_transport); EXPORT_SYMBOL_GPL(nvmf_register_transport);
......
...@@ -128,7 +128,7 @@ int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val); ...@@ -128,7 +128,7 @@ int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val);
int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val); int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val);
int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl); int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl);
int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid); int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid);
void nvmf_register_transport(struct nvmf_transport_ops *ops); int nvmf_register_transport(struct nvmf_transport_ops *ops);
void nvmf_unregister_transport(struct nvmf_transport_ops *ops); void nvmf_unregister_transport(struct nvmf_transport_ops *ops);
void nvmf_free_options(struct nvmf_ctrl_options *opts); void nvmf_free_options(struct nvmf_ctrl_options *opts);
const char *nvmf_get_subsysnqn(struct nvme_ctrl *ctrl); const char *nvmf_get_subsysnqn(struct nvme_ctrl *ctrl);
......
...@@ -2353,18 +2353,6 @@ __nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, ...@@ -2353,18 +2353,6 @@ __nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
/* sanity checks */ /* sanity checks */
/* FC-NVME supports 64-byte SQE only */
if (ctrl->ctrl.ioccsz != 4) {
dev_err(ctrl->ctrl.device, "ioccsz %d is not supported!\n",
ctrl->ctrl.ioccsz);
goto out_remove_admin_queue;
}
/* FC-NVME supports 16-byte CQE only */
if (ctrl->ctrl.iorcsz != 1) {
dev_err(ctrl->ctrl.device, "iorcsz %d is not supported!\n",
ctrl->ctrl.iorcsz);
goto out_remove_admin_queue;
}
/* FC-NVME does not have other data in the capsule */ /* FC-NVME does not have other data in the capsule */
if (ctrl->ctrl.icdoff) { if (ctrl->ctrl.icdoff) {
dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n", dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n",
...@@ -2562,8 +2550,7 @@ static int __init nvme_fc_init_module(void) ...@@ -2562,8 +2550,7 @@ static int __init nvme_fc_init_module(void)
if (!nvme_fc_wq) if (!nvme_fc_wq)
return -ENOMEM; return -ENOMEM;
nvmf_register_transport(&nvme_fc_transport); return nvmf_register_transport(&nvme_fc_transport);
return 0;
} }
static void __exit nvme_fc_exit_module(void) static void __exit nvme_fc_exit_module(void)
......
...@@ -78,6 +78,11 @@ enum nvme_quirks { ...@@ -78,6 +78,11 @@ enum nvme_quirks {
* readiness, which is done by reading the NVME_CSTS_RDY bit. * readiness, which is done by reading the NVME_CSTS_RDY bit.
*/ */
NVME_QUIRK_DELAY_BEFORE_CHK_RDY = (1 << 3), NVME_QUIRK_DELAY_BEFORE_CHK_RDY = (1 << 3),
/*
* APST should not be used.
*/
NVME_QUIRK_NO_APST = (1 << 4),
}; };
/* /*
...@@ -112,6 +117,7 @@ enum nvme_ctrl_state { ...@@ -112,6 +117,7 @@ enum nvme_ctrl_state {
struct nvme_ctrl { struct nvme_ctrl {
enum nvme_ctrl_state state; enum nvme_ctrl_state state;
bool identified;
spinlock_t lock; spinlock_t lock;
const struct nvme_ctrl_ops *ops; const struct nvme_ctrl_ops *ops;
struct request_queue *admin_q; struct request_queue *admin_q;
...@@ -147,13 +153,19 @@ struct nvme_ctrl { ...@@ -147,13 +153,19 @@ struct nvme_ctrl {
u32 vs; u32 vs;
u32 sgls; u32 sgls;
u16 kas; u16 kas;
u8 npss;
u8 apsta;
unsigned int kato; unsigned int kato;
bool subsystem; bool subsystem;
unsigned long quirks; unsigned long quirks;
struct nvme_id_power_state psd[32];
struct work_struct scan_work; struct work_struct scan_work;
struct work_struct async_event_work; struct work_struct async_event_work;
struct delayed_work ka_work; struct delayed_work ka_work;
/* Power saving configuration */
u64 ps_max_latency_us;
/* Fabrics only */ /* Fabrics only */
u16 sqsize; u16 sqsize;
u32 ioccsz; u32 ioccsz;
......
...@@ -613,9 +613,6 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx, ...@@ -613,9 +613,6 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
spin_lock_irq(&nvmeq->q_lock); spin_lock_irq(&nvmeq->q_lock);
if (unlikely(nvmeq->cq_vector < 0)) { if (unlikely(nvmeq->cq_vector < 0)) {
if (ns && !test_bit(NVME_NS_DEAD, &ns->flags))
ret = BLK_MQ_RQ_QUEUE_BUSY;
else
ret = BLK_MQ_RQ_QUEUE_ERROR; ret = BLK_MQ_RQ_QUEUE_ERROR;
spin_unlock_irq(&nvmeq->q_lock); spin_unlock_irq(&nvmeq->q_lock);
goto out_cleanup_iod; goto out_cleanup_iod;
...@@ -1739,7 +1736,7 @@ static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl) ...@@ -1739,7 +1736,7 @@ static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl)
if (dev->ctrl.admin_q) if (dev->ctrl.admin_q)
blk_put_queue(dev->ctrl.admin_q); blk_put_queue(dev->ctrl.admin_q);
kfree(dev->queues); kfree(dev->queues);
kfree(dev->ctrl.opal_dev); free_opal_dev(dev->ctrl.opal_dev);
kfree(dev); kfree(dev);
} }
...@@ -1789,13 +1786,16 @@ static void nvme_reset_work(struct work_struct *work) ...@@ -1789,13 +1786,16 @@ static void nvme_reset_work(struct work_struct *work)
if (result) if (result)
goto out; goto out;
if ((dev->ctrl.oacs & NVME_CTRL_OACS_SEC_SUPP) && !dev->ctrl.opal_dev) { if (dev->ctrl.oacs & NVME_CTRL_OACS_SEC_SUPP) {
if (!dev->ctrl.opal_dev)
dev->ctrl.opal_dev = dev->ctrl.opal_dev =
init_opal_dev(&dev->ctrl, &nvme_sec_submit); init_opal_dev(&dev->ctrl, &nvme_sec_submit);
} else if (was_suspend)
if (was_suspend)
opal_unlock_from_suspend(dev->ctrl.opal_dev); opal_unlock_from_suspend(dev->ctrl.opal_dev);
} else {
free_opal_dev(dev->ctrl.opal_dev);
dev->ctrl.opal_dev = NULL;
}
result = nvme_setup_io_queues(dev); result = nvme_setup_io_queues(dev);
if (result) if (result)
...@@ -2001,8 +2001,10 @@ static void nvme_remove(struct pci_dev *pdev) ...@@ -2001,8 +2001,10 @@ static void nvme_remove(struct pci_dev *pdev)
pci_set_drvdata(pdev, NULL); pci_set_drvdata(pdev, NULL);
if (!pci_device_is_present(pdev)) if (!pci_device_is_present(pdev)) {
nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD); nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD);
nvme_dev_disable(dev, false);
}
flush_work(&dev->reset_work); flush_work(&dev->reset_work);
nvme_uninit_ctrl(&dev->ctrl); nvme_uninit_ctrl(&dev->ctrl);
...@@ -2121,6 +2123,7 @@ static const struct pci_device_id nvme_id_table[] = { ...@@ -2121,6 +2123,7 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
{ PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001) }, { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001) },
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) },
{ 0, } { 0, }
}; };
MODULE_DEVICE_TABLE(pci, nvme_id_table); MODULE_DEVICE_TABLE(pci, nvme_id_table);
......
...@@ -42,28 +42,6 @@ ...@@ -42,28 +42,6 @@
#define NVME_RDMA_MAX_INLINE_SEGMENTS 1 #define NVME_RDMA_MAX_INLINE_SEGMENTS 1
static const char *const nvme_rdma_cm_status_strs[] = {
[NVME_RDMA_CM_INVALID_LEN] = "invalid length",
[NVME_RDMA_CM_INVALID_RECFMT] = "invalid record format",
[NVME_RDMA_CM_INVALID_QID] = "invalid queue ID",
[NVME_RDMA_CM_INVALID_HSQSIZE] = "invalid host SQ size",
[NVME_RDMA_CM_INVALID_HRQSIZE] = "invalid host RQ size",
[NVME_RDMA_CM_NO_RSC] = "resource not found",
[NVME_RDMA_CM_INVALID_IRD] = "invalid IRD",
[NVME_RDMA_CM_INVALID_ORD] = "Invalid ORD",
};
static const char *nvme_rdma_cm_msg(enum nvme_rdma_cm_status status)
{
size_t index = status;
if (index < ARRAY_SIZE(nvme_rdma_cm_status_strs) &&
nvme_rdma_cm_status_strs[index])
return nvme_rdma_cm_status_strs[index];
else
return "unrecognized reason";
};
/* /*
* We handle AEN commands ourselves and don't even let the * We handle AEN commands ourselves and don't even let the
* block layer know about them. * block layer know about them.
...@@ -155,6 +133,10 @@ struct nvme_rdma_ctrl { ...@@ -155,6 +133,10 @@ struct nvme_rdma_ctrl {
struct sockaddr addr; struct sockaddr addr;
struct sockaddr_in addr_in; struct sockaddr_in addr_in;
}; };
union {
struct sockaddr src_addr;
struct sockaddr_in src_addr_in;
};
struct nvme_ctrl ctrl; struct nvme_ctrl ctrl;
}; };
...@@ -567,6 +549,7 @@ static int nvme_rdma_init_queue(struct nvme_rdma_ctrl *ctrl, ...@@ -567,6 +549,7 @@ static int nvme_rdma_init_queue(struct nvme_rdma_ctrl *ctrl,
int idx, size_t queue_size) int idx, size_t queue_size)
{ {
struct nvme_rdma_queue *queue; struct nvme_rdma_queue *queue;
struct sockaddr *src_addr = NULL;
int ret; int ret;
queue = &ctrl->queues[idx]; queue = &ctrl->queues[idx];
...@@ -589,7 +572,10 @@ static int nvme_rdma_init_queue(struct nvme_rdma_ctrl *ctrl, ...@@ -589,7 +572,10 @@ static int nvme_rdma_init_queue(struct nvme_rdma_ctrl *ctrl,
} }
queue->cm_error = -ETIMEDOUT; queue->cm_error = -ETIMEDOUT;
ret = rdma_resolve_addr(queue->cm_id, NULL, &ctrl->addr, if (ctrl->ctrl.opts->mask & NVMF_OPT_HOST_TRADDR)
src_addr = &ctrl->src_addr;
ret = rdma_resolve_addr(queue->cm_id, src_addr, &ctrl->addr,
NVME_RDMA_CONNECT_TIMEOUT_MS); NVME_RDMA_CONNECT_TIMEOUT_MS);
if (ret) { if (ret) {
dev_info(ctrl->ctrl.device, dev_info(ctrl->ctrl.device,
...@@ -1905,6 +1891,16 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev, ...@@ -1905,6 +1891,16 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
goto out_free_ctrl; goto out_free_ctrl;
} }
if (opts->mask & NVMF_OPT_HOST_TRADDR) {
ret = nvme_rdma_parse_ipaddr(&ctrl->src_addr_in,
opts->host_traddr);
if (ret) {
pr_err("malformed src IP address passed: %s\n",
opts->host_traddr);
goto out_free_ctrl;
}
}
if (opts->mask & NVMF_OPT_TRSVCID) { if (opts->mask & NVMF_OPT_TRSVCID) {
u16 port; u16 port;
...@@ -2016,7 +2012,8 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev, ...@@ -2016,7 +2012,8 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
static struct nvmf_transport_ops nvme_rdma_transport = { static struct nvmf_transport_ops nvme_rdma_transport = {
.name = "rdma", .name = "rdma",
.required_opts = NVMF_OPT_TRADDR, .required_opts = NVMF_OPT_TRADDR,
.allowed_opts = NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY, .allowed_opts = NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY |
NVMF_OPT_HOST_TRADDR,
.create_ctrl = nvme_rdma_create_ctrl, .create_ctrl = nvme_rdma_create_ctrl,
}; };
...@@ -2063,8 +2060,7 @@ static int __init nvme_rdma_init_module(void) ...@@ -2063,8 +2060,7 @@ static int __init nvme_rdma_init_module(void)
return ret; return ret;
} }
nvmf_register_transport(&nvme_rdma_transport); return nvmf_register_transport(&nvme_rdma_transport);
return 0;
} }
static void __exit nvme_rdma_cleanup_module(void) static void __exit nvme_rdma_cleanup_module(void)
......
...@@ -41,7 +41,7 @@ static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req, ...@@ -41,7 +41,7 @@ static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->get_log_page.nsid); ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->get_log_page.nsid);
if (!ns) { if (!ns) {
status = NVME_SC_INVALID_NS; status = NVME_SC_INVALID_NS;
pr_err("nvmet : Counld not find namespace id : %d\n", pr_err("nvmet : Could not find namespace id : %d\n",
le32_to_cpu(req->cmd->get_log_page.nsid)); le32_to_cpu(req->cmd->get_log_page.nsid));
goto out; goto out;
} }
...@@ -509,7 +509,7 @@ int nvmet_parse_admin_cmd(struct nvmet_req *req) ...@@ -509,7 +509,7 @@ int nvmet_parse_admin_cmd(struct nvmet_req *req)
break; break;
case nvme_admin_identify: case nvme_admin_identify:
req->data_len = 4096; req->data_len = 4096;
switch (le32_to_cpu(cmd->identify.cns)) { switch (cmd->identify.cns) {
case NVME_ID_CNS_NS: case NVME_ID_CNS_NS:
req->execute = nvmet_execute_identify_ns; req->execute = nvmet_execute_identify_ns;
return 0; return 0;
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include "nvmet.h" #include "nvmet.h"
static struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX]; static struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX];
static DEFINE_IDA(cntlid_ida);
/* /*
* This read/write semaphore is used to synchronize access to configuration * This read/write semaphore is used to synchronize access to configuration
...@@ -749,7 +750,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, ...@@ -749,7 +750,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
if (!ctrl->sqs) if (!ctrl->sqs)
goto out_free_cqs; goto out_free_cqs;
ret = ida_simple_get(&subsys->cntlid_ida, ret = ida_simple_get(&cntlid_ida,
NVME_CNTLID_MIN, NVME_CNTLID_MAX, NVME_CNTLID_MIN, NVME_CNTLID_MAX,
GFP_KERNEL); GFP_KERNEL);
if (ret < 0) { if (ret < 0) {
...@@ -819,7 +820,7 @@ static void nvmet_ctrl_free(struct kref *ref) ...@@ -819,7 +820,7 @@ static void nvmet_ctrl_free(struct kref *ref)
flush_work(&ctrl->async_event_work); flush_work(&ctrl->async_event_work);
cancel_work_sync(&ctrl->fatal_err_work); cancel_work_sync(&ctrl->fatal_err_work);
ida_simple_remove(&subsys->cntlid_ida, ctrl->cntlid); ida_simple_remove(&cntlid_ida, ctrl->cntlid);
nvmet_subsys_put(subsys); nvmet_subsys_put(subsys);
kfree(ctrl->sqs); kfree(ctrl->sqs);
...@@ -918,9 +919,6 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn, ...@@ -918,9 +919,6 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
mutex_init(&subsys->lock); mutex_init(&subsys->lock);
INIT_LIST_HEAD(&subsys->namespaces); INIT_LIST_HEAD(&subsys->namespaces);
INIT_LIST_HEAD(&subsys->ctrls); INIT_LIST_HEAD(&subsys->ctrls);
ida_init(&subsys->cntlid_ida);
INIT_LIST_HEAD(&subsys->hosts); INIT_LIST_HEAD(&subsys->hosts);
return subsys; return subsys;
...@@ -933,7 +931,6 @@ static void nvmet_subsys_free(struct kref *ref) ...@@ -933,7 +931,6 @@ static void nvmet_subsys_free(struct kref *ref)
WARN_ON_ONCE(!list_empty(&subsys->namespaces)); WARN_ON_ONCE(!list_empty(&subsys->namespaces));
ida_destroy(&subsys->cntlid_ida);
kfree(subsys->subsysnqn); kfree(subsys->subsysnqn);
kfree(subsys); kfree(subsys);
} }
...@@ -976,6 +973,7 @@ static void __exit nvmet_exit(void) ...@@ -976,6 +973,7 @@ static void __exit nvmet_exit(void)
{ {
nvmet_exit_configfs(); nvmet_exit_configfs();
nvmet_exit_discovery(); nvmet_exit_discovery();
ida_destroy(&cntlid_ida);
BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_entry) != 1024); BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_entry) != 1024);
BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_hdr) != 1024); BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_hdr) != 1024);
......
...@@ -186,14 +186,14 @@ int nvmet_parse_discovery_cmd(struct nvmet_req *req) ...@@ -186,14 +186,14 @@ int nvmet_parse_discovery_cmd(struct nvmet_req *req)
} }
case nvme_admin_identify: case nvme_admin_identify:
req->data_len = 4096; req->data_len = 4096;
switch (le32_to_cpu(cmd->identify.cns)) { switch (cmd->identify.cns) {
case NVME_ID_CNS_CTRL: case NVME_ID_CNS_CTRL:
req->execute = req->execute =
nvmet_execute_identify_disc_ctrl; nvmet_execute_identify_disc_ctrl;
return 0; return 0;
default: default:
pr_err("nvmet: unsupported identify cns %d\n", pr_err("nvmet: unsupported identify cns %d\n",
le32_to_cpu(cmd->identify.cns)); cmd->identify.cns);
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
} }
default: default:
......
...@@ -153,8 +153,8 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req) ...@@ -153,8 +153,8 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
goto out; goto out;
} }
pr_info("creating controller %d for NQN %s.\n", pr_info("creating controller %d for subsystem %s for NQN %s.\n",
ctrl->cntlid, ctrl->hostnqn); ctrl->cntlid, ctrl->subsys->subsysnqn, ctrl->hostnqn);
req->rsp->result.u16 = cpu_to_le16(ctrl->cntlid); req->rsp->result.u16 = cpu_to_le16(ctrl->cntlid);
out: out:
...@@ -220,7 +220,7 @@ int nvmet_parse_connect_cmd(struct nvmet_req *req) ...@@ -220,7 +220,7 @@ int nvmet_parse_connect_cmd(struct nvmet_req *req)
req->ns = NULL; req->ns = NULL;
if (req->cmd->common.opcode != nvme_fabrics_command) { if (cmd->common.opcode != nvme_fabrics_command) {
pr_err("invalid command 0x%x on unconnected queue.\n", pr_err("invalid command 0x%x on unconnected queue.\n",
cmd->fabrics.opcode); cmd->fabrics.opcode);
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
......
...@@ -1817,16 +1817,14 @@ nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq) ...@@ -1817,16 +1817,14 @@ nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq)
/* data no longer needed */ /* data no longer needed */
nvmet_fc_free_tgt_pgs(fod); nvmet_fc_free_tgt_pgs(fod);
if (fcpreq->fcp_error || abort)
nvmet_req_complete(&fod->req, fcpreq->fcp_error); nvmet_req_complete(&fod->req, fcpreq->fcp_error);
return; return;
} }
switch (fcpreq->op) { switch (fcpreq->op) {
case NVMET_FCOP_WRITEDATA: case NVMET_FCOP_WRITEDATA:
if (abort || fcpreq->fcp_error || if (fcpreq->fcp_error ||
fcpreq->transferred_length != fcpreq->transfer_length) { fcpreq->transferred_length != fcpreq->transfer_length) {
nvmet_req_complete(&fod->req, nvmet_req_complete(&fod->req,
NVME_SC_FC_TRANSPORT_ERROR); NVME_SC_FC_TRANSPORT_ERROR);
...@@ -1849,7 +1847,7 @@ nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq) ...@@ -1849,7 +1847,7 @@ nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq)
case NVMET_FCOP_READDATA: case NVMET_FCOP_READDATA:
case NVMET_FCOP_READDATA_RSP: case NVMET_FCOP_READDATA_RSP:
if (abort || fcpreq->fcp_error || if (fcpreq->fcp_error ||
fcpreq->transferred_length != fcpreq->transfer_length) { fcpreq->transferred_length != fcpreq->transfer_length) {
/* data no longer needed */ /* data no longer needed */
nvmet_fc_free_tgt_pgs(fod); nvmet_fc_free_tgt_pgs(fod);
......
...@@ -724,8 +724,7 @@ static int __init nvme_loop_init_module(void) ...@@ -724,8 +724,7 @@ static int __init nvme_loop_init_module(void)
ret = nvmet_register_transport(&nvme_loop_ops); ret = nvmet_register_transport(&nvme_loop_ops);
if (ret) if (ret)
return ret; return ret;
nvmf_register_transport(&nvme_loop_transport); return nvmf_register_transport(&nvme_loop_transport);
return 0;
} }
static void __exit nvme_loop_cleanup_module(void) static void __exit nvme_loop_cleanup_module(void)
......
...@@ -142,7 +142,6 @@ struct nvmet_subsys { ...@@ -142,7 +142,6 @@ struct nvmet_subsys {
unsigned int max_nsid; unsigned int max_nsid;
struct list_head ctrls; struct list_head ctrls;
struct ida cntlid_ida;
struct list_head hosts; struct list_head hosts;
bool allow_any_host; bool allow_any_host;
......
...@@ -1041,6 +1041,9 @@ static int nvmet_rdma_cm_reject(struct rdma_cm_id *cm_id, ...@@ -1041,6 +1041,9 @@ static int nvmet_rdma_cm_reject(struct rdma_cm_id *cm_id,
{ {
struct nvme_rdma_cm_rej rej; struct nvme_rdma_cm_rej rej;
pr_debug("rejecting connect request: status %d (%s)\n",
status, nvme_rdma_cm_msg(status));
rej.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0); rej.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0);
rej.sts = cpu_to_le16(status); rej.sts = cpu_to_le16(status);
...@@ -1091,7 +1094,7 @@ nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev, ...@@ -1091,7 +1094,7 @@ nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev,
queue->idx = ida_simple_get(&nvmet_rdma_queue_ida, 0, 0, GFP_KERNEL); queue->idx = ida_simple_get(&nvmet_rdma_queue_ida, 0, 0, GFP_KERNEL);
if (queue->idx < 0) { if (queue->idx < 0) {
ret = NVME_RDMA_CM_NO_RSC; ret = NVME_RDMA_CM_NO_RSC;
goto out_free_queue; goto out_destroy_sq;
} }
ret = nvmet_rdma_alloc_rsps(queue); ret = nvmet_rdma_alloc_rsps(queue);
...@@ -1135,7 +1138,6 @@ nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev, ...@@ -1135,7 +1138,6 @@ nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev,
out_free_queue: out_free_queue:
kfree(queue); kfree(queue);
out_reject: out_reject:
pr_debug("rejecting connect request with status code %d\n", ret);
nvmet_rdma_cm_reject(cm_id, ret); nvmet_rdma_cm_reject(cm_id, ret);
return NULL; return NULL;
} }
...@@ -1188,7 +1190,6 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id, ...@@ -1188,7 +1190,6 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
ndev = nvmet_rdma_find_get_device(cm_id); ndev = nvmet_rdma_find_get_device(cm_id);
if (!ndev) { if (!ndev) {
pr_err("no client data!\n");
nvmet_rdma_cm_reject(cm_id, NVME_RDMA_CM_NO_RSC); nvmet_rdma_cm_reject(cm_id, NVME_RDMA_CM_NO_RSC);
return -ECONNREFUSED; return -ECONNREFUSED;
} }
......
...@@ -1167,7 +1167,7 @@ void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd) ...@@ -1167,7 +1167,7 @@ void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd)
/* zero out the cmd, except for the embedded scsi_request */ /* zero out the cmd, except for the embedded scsi_request */
memset((char *)cmd + sizeof(cmd->req), 0, memset((char *)cmd + sizeof(cmd->req), 0,
sizeof(*cmd) - sizeof(cmd->req)); sizeof(*cmd) - sizeof(cmd->req) + dev->host->hostt->cmd_size);
cmd->device = dev; cmd->device = dev;
cmd->sense_buffer = buf; cmd->sense_buffer = buf;
......
...@@ -227,27 +227,31 @@ static int sas_bsg_initialize(struct Scsi_Host *shost, struct sas_rphy *rphy) ...@@ -227,27 +227,31 @@ static int sas_bsg_initialize(struct Scsi_Host *shost, struct sas_rphy *rphy)
return 0; return 0;
} }
q = blk_alloc_queue(GFP_KERNEL);
if (!q)
return -ENOMEM;
q->cmd_size = sizeof(struct scsi_request);
if (rphy) { if (rphy) {
q = blk_init_queue(sas_non_host_smp_request, NULL); q->request_fn = sas_non_host_smp_request;
dev = &rphy->dev; dev = &rphy->dev;
name = dev_name(dev); name = dev_name(dev);
release = NULL; release = NULL;
} else { } else {
q = blk_init_queue(sas_host_smp_request, NULL); q->request_fn = sas_host_smp_request;
dev = &shost->shost_gendev; dev = &shost->shost_gendev;
snprintf(namebuf, sizeof(namebuf), snprintf(namebuf, sizeof(namebuf),
"sas_host%d", shost->host_no); "sas_host%d", shost->host_no);
name = namebuf; name = namebuf;
release = sas_host_release; release = sas_host_release;
} }
if (!q) error = blk_init_allocated_queue(q);
return -ENOMEM; if (error)
goto out_cleanup_queue;
error = bsg_register_queue(q, dev, name, release); error = bsg_register_queue(q, dev, name, release);
if (error) { if (error)
blk_cleanup_queue(q); goto out_cleanup_queue;
return -ENOMEM;
}
if (rphy) if (rphy)
rphy->q = q; rphy->q = q;
...@@ -261,6 +265,10 @@ static int sas_bsg_initialize(struct Scsi_Host *shost, struct sas_rphy *rphy) ...@@ -261,6 +265,10 @@ static int sas_bsg_initialize(struct Scsi_Host *shost, struct sas_rphy *rphy)
queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q); queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q);
return 0; return 0;
out_cleanup_queue:
blk_cleanup_queue(q);
return error;
} }
static void sas_bsg_remove(struct Scsi_Host *shost, struct sas_rphy *rphy) static void sas_bsg_remove(struct Scsi_Host *shost, struct sas_rphy *rphy)
......
...@@ -1043,13 +1043,22 @@ static struct block_device *bd_acquire(struct inode *inode) ...@@ -1043,13 +1043,22 @@ static struct block_device *bd_acquire(struct inode *inode)
spin_lock(&bdev_lock); spin_lock(&bdev_lock);
bdev = inode->i_bdev; bdev = inode->i_bdev;
if (bdev) { if (bdev && !inode_unhashed(bdev->bd_inode)) {
bdgrab(bdev); bdgrab(bdev);
spin_unlock(&bdev_lock); spin_unlock(&bdev_lock);
return bdev; return bdev;
} }
spin_unlock(&bdev_lock); spin_unlock(&bdev_lock);
/*
* i_bdev references block device inode that was already shut down
* (corresponding device got removed). Remove the reference and look
* up block device inode again just in case new device got
* reestablished under the same device number.
*/
if (bdev)
bd_forget(inode);
bdev = bdget(inode->i_rdev); bdev = bdget(inode->i_rdev);
if (bdev) { if (bdev) {
spin_lock(&bdev_lock); spin_lock(&bdev_lock);
......
...@@ -33,6 +33,7 @@ struct blk_mq_hw_ctx { ...@@ -33,6 +33,7 @@ struct blk_mq_hw_ctx {
struct blk_mq_ctx **ctxs; struct blk_mq_ctx **ctxs;
unsigned int nr_ctx; unsigned int nr_ctx;
wait_queue_t dispatch_wait;
atomic_t wait_index; atomic_t wait_index;
struct blk_mq_tags *tags; struct blk_mq_tags *tags;
...@@ -160,6 +161,7 @@ enum { ...@@ -160,6 +161,7 @@ enum {
BLK_MQ_S_STOPPED = 0, BLK_MQ_S_STOPPED = 0,
BLK_MQ_S_TAG_ACTIVE = 1, BLK_MQ_S_TAG_ACTIVE = 1,
BLK_MQ_S_SCHED_RESTART = 2, BLK_MQ_S_SCHED_RESTART = 2,
BLK_MQ_S_TAG_WAITING = 3,
BLK_MQ_MAX_DEPTH = 10240, BLK_MQ_MAX_DEPTH = 10240,
......
...@@ -29,6 +29,30 @@ enum nvme_rdma_cm_status { ...@@ -29,6 +29,30 @@ enum nvme_rdma_cm_status {
NVME_RDMA_CM_INVALID_ORD = 0x08, NVME_RDMA_CM_INVALID_ORD = 0x08,
}; };
static inline const char *nvme_rdma_cm_msg(enum nvme_rdma_cm_status status)
{
switch (status) {
case NVME_RDMA_CM_INVALID_LEN:
return "invalid length";
case NVME_RDMA_CM_INVALID_RECFMT:
return "invalid record format";
case NVME_RDMA_CM_INVALID_QID:
return "invalid queue ID";
case NVME_RDMA_CM_INVALID_HSQSIZE:
return "invalid host SQ size";
case NVME_RDMA_CM_INVALID_HRQSIZE:
return "invalid host RQ size";
case NVME_RDMA_CM_NO_RSC:
return "resource not found";
case NVME_RDMA_CM_INVALID_IRD:
return "invalid IRD";
case NVME_RDMA_CM_INVALID_ORD:
return "Invalid ORD";
default:
return "unrecognized reason";
}
}
/** /**
* struct nvme_rdma_cm_req - rdma connect request * struct nvme_rdma_cm_req - rdma connect request
* *
......
...@@ -579,6 +579,12 @@ struct nvme_write_zeroes_cmd { ...@@ -579,6 +579,12 @@ struct nvme_write_zeroes_cmd {
__le16 appmask; __le16 appmask;
}; };
/* Features */
struct nvme_feat_auto_pst {
__le64 entries[32];
};
/* Admin commands */ /* Admin commands */
enum nvme_admin_opcode { enum nvme_admin_opcode {
...@@ -644,7 +650,9 @@ struct nvme_identify { ...@@ -644,7 +650,9 @@ struct nvme_identify {
__le32 nsid; __le32 nsid;
__u64 rsvd2[2]; __u64 rsvd2[2];
union nvme_data_ptr dptr; union nvme_data_ptr dptr;
__le32 cns; __u8 cns;
__u8 rsvd3;
__le16 ctrlid;
__u32 rsvd11[5]; __u32 rsvd11[5];
}; };
......
...@@ -27,6 +27,7 @@ typedef int (sec_send_recv)(void *data, u16 spsp, u8 secp, void *buffer, ...@@ -27,6 +27,7 @@ typedef int (sec_send_recv)(void *data, u16 spsp, u8 secp, void *buffer,
size_t len, bool send); size_t len, bool send);
#ifdef CONFIG_BLK_SED_OPAL #ifdef CONFIG_BLK_SED_OPAL
void free_opal_dev(struct opal_dev *dev);
bool opal_unlock_from_suspend(struct opal_dev *dev); bool opal_unlock_from_suspend(struct opal_dev *dev);
struct opal_dev *init_opal_dev(void *data, sec_send_recv *send_recv); struct opal_dev *init_opal_dev(void *data, sec_send_recv *send_recv);
int sed_ioctl(struct opal_dev *dev, unsigned int cmd, void __user *ioctl_ptr); int sed_ioctl(struct opal_dev *dev, unsigned int cmd, void __user *ioctl_ptr);
...@@ -51,6 +52,10 @@ static inline bool is_sed_ioctl(unsigned int cmd) ...@@ -51,6 +52,10 @@ static inline bool is_sed_ioctl(unsigned int cmd)
return false; return false;
} }
#else #else
static inline void free_opal_dev(struct opal_dev *dev)
{
}
static inline bool is_sed_ioctl(unsigned int cmd) static inline bool is_sed_ioctl(unsigned int cmd)
{ {
return false; return false;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment