Commit 3e8072d4 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.infradead.org/users/willy/linux-nvme

Pull NVMe driver updates from Matthew Wilcox:
 "Various updates to the NVMe driver.  The most user-visible change is
  that drive hotplugging now works and CPU hotplug while an NVMe drive
  is installed should also work better"

* git://git.infradead.org/users/willy/linux-nvme:
  NVMe: Retry failed commands with non-fatal errors
  NVMe: Add getgeo to block ops
  NVMe: Start-stop nvme_thread during device add-remove.
  NVMe: Make I/O timeout a module parameter
  NVMe: CPU hot plug notification
  NVMe: per-cpu io queues
  NVMe: Replace DEFINE_PCI_DEVICE_TABLE
  NVMe: Fix divide-by-zero in nvme_trans_io_get_num_cmds
  NVMe: IOCTL path RCU protect queue access
  NVMe: RCU protected access to io queues
  NVMe: Initialize device reference count earlier
  NVMe: Add CONFIG_PM_SLEEP to suspend/resume functions
parents a63b747b edd10d33
/* /*
* NVM Express device driver * NVM Express device driver
* Copyright (c) 2011, Intel Corporation. * Copyright (c) 2011-2014, Intel Corporation.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License, * under the terms and conditions of the GNU General Public License,
...@@ -20,10 +20,12 @@ ...@@ -20,10 +20,12 @@
#include <linux/bio.h> #include <linux/bio.h>
#include <linux/bitops.h> #include <linux/bitops.h>
#include <linux/blkdev.h> #include <linux/blkdev.h>
#include <linux/cpu.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/genhd.h> #include <linux/genhd.h>
#include <linux/hdreg.h>
#include <linux/idr.h> #include <linux/idr.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
...@@ -35,6 +37,7 @@ ...@@ -35,6 +37,7 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/moduleparam.h> #include <linux/moduleparam.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/percpu.h>
#include <linux/poison.h> #include <linux/poison.h>
#include <linux/ptrace.h> #include <linux/ptrace.h>
#include <linux/sched.h> #include <linux/sched.h>
...@@ -47,6 +50,11 @@ ...@@ -47,6 +50,11 @@
#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command)) #define SQ_SIZE(depth) (depth * sizeof(struct nvme_command))
#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion)) #define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion))
#define ADMIN_TIMEOUT (60 * HZ) #define ADMIN_TIMEOUT (60 * HZ)
#define IOD_TIMEOUT (4 * NVME_IO_TIMEOUT)
unsigned char io_timeout = 30;
module_param(io_timeout, byte, 0644);
MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O");
static int nvme_major; static int nvme_major;
module_param(nvme_major, int, 0); module_param(nvme_major, int, 0);
...@@ -58,6 +66,7 @@ static DEFINE_SPINLOCK(dev_list_lock); ...@@ -58,6 +66,7 @@ static DEFINE_SPINLOCK(dev_list_lock);
static LIST_HEAD(dev_list); static LIST_HEAD(dev_list);
static struct task_struct *nvme_thread; static struct task_struct *nvme_thread;
static struct workqueue_struct *nvme_workq; static struct workqueue_struct *nvme_workq;
static wait_queue_head_t nvme_kthread_wait;
static void nvme_reset_failed_dev(struct work_struct *ws); static void nvme_reset_failed_dev(struct work_struct *ws);
...@@ -74,6 +83,7 @@ struct async_cmd_info { ...@@ -74,6 +83,7 @@ struct async_cmd_info {
* commands and one for I/O commands). * commands and one for I/O commands).
*/ */
struct nvme_queue { struct nvme_queue {
struct rcu_head r_head;
struct device *q_dmadev; struct device *q_dmadev;
struct nvme_dev *dev; struct nvme_dev *dev;
char irqname[24]; /* nvme4294967295-65535\0 */ char irqname[24]; /* nvme4294967295-65535\0 */
...@@ -85,6 +95,7 @@ struct nvme_queue { ...@@ -85,6 +95,7 @@ struct nvme_queue {
wait_queue_head_t sq_full; wait_queue_head_t sq_full;
wait_queue_t sq_cong_wait; wait_queue_t sq_cong_wait;
struct bio_list sq_cong; struct bio_list sq_cong;
struct list_head iod_bio;
u32 __iomem *q_db; u32 __iomem *q_db;
u16 q_depth; u16 q_depth;
u16 cq_vector; u16 cq_vector;
...@@ -95,6 +106,7 @@ struct nvme_queue { ...@@ -95,6 +106,7 @@ struct nvme_queue {
u8 cq_phase; u8 cq_phase;
u8 cqe_seen; u8 cqe_seen;
u8 q_suspended; u8 q_suspended;
cpumask_var_t cpu_mask;
struct async_cmd_info cmdinfo; struct async_cmd_info cmdinfo;
unsigned long cmdid_data[]; unsigned long cmdid_data[];
}; };
...@@ -118,7 +130,7 @@ static inline void _nvme_check_size(void) ...@@ -118,7 +130,7 @@ static inline void _nvme_check_size(void)
BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512); BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512);
} }
typedef void (*nvme_completion_fn)(struct nvme_dev *, void *, typedef void (*nvme_completion_fn)(struct nvme_queue *, void *,
struct nvme_completion *); struct nvme_completion *);
struct nvme_cmd_info { struct nvme_cmd_info {
...@@ -190,7 +202,7 @@ static int alloc_cmdid_killable(struct nvme_queue *nvmeq, void *ctx, ...@@ -190,7 +202,7 @@ static int alloc_cmdid_killable(struct nvme_queue *nvmeq, void *ctx,
#define CMD_CTX_FLUSH (0x318 + CMD_CTX_BASE) #define CMD_CTX_FLUSH (0x318 + CMD_CTX_BASE)
#define CMD_CTX_ABORT (0x31C + CMD_CTX_BASE) #define CMD_CTX_ABORT (0x31C + CMD_CTX_BASE)
static void special_completion(struct nvme_dev *dev, void *ctx, static void special_completion(struct nvme_queue *nvmeq, void *ctx,
struct nvme_completion *cqe) struct nvme_completion *cqe)
{ {
if (ctx == CMD_CTX_CANCELLED) if (ctx == CMD_CTX_CANCELLED)
...@@ -198,26 +210,26 @@ static void special_completion(struct nvme_dev *dev, void *ctx, ...@@ -198,26 +210,26 @@ static void special_completion(struct nvme_dev *dev, void *ctx,
if (ctx == CMD_CTX_FLUSH) if (ctx == CMD_CTX_FLUSH)
return; return;
if (ctx == CMD_CTX_ABORT) { if (ctx == CMD_CTX_ABORT) {
++dev->abort_limit; ++nvmeq->dev->abort_limit;
return; return;
} }
if (ctx == CMD_CTX_COMPLETED) { if (ctx == CMD_CTX_COMPLETED) {
dev_warn(&dev->pci_dev->dev, dev_warn(nvmeq->q_dmadev,
"completed id %d twice on queue %d\n", "completed id %d twice on queue %d\n",
cqe->command_id, le16_to_cpup(&cqe->sq_id)); cqe->command_id, le16_to_cpup(&cqe->sq_id));
return; return;
} }
if (ctx == CMD_CTX_INVALID) { if (ctx == CMD_CTX_INVALID) {
dev_warn(&dev->pci_dev->dev, dev_warn(nvmeq->q_dmadev,
"invalid id %d completed on queue %d\n", "invalid id %d completed on queue %d\n",
cqe->command_id, le16_to_cpup(&cqe->sq_id)); cqe->command_id, le16_to_cpup(&cqe->sq_id));
return; return;
} }
dev_warn(&dev->pci_dev->dev, "Unknown special completion %p\n", ctx); dev_warn(nvmeq->q_dmadev, "Unknown special completion %p\n", ctx);
} }
static void async_completion(struct nvme_dev *dev, void *ctx, static void async_completion(struct nvme_queue *nvmeq, void *ctx,
struct nvme_completion *cqe) struct nvme_completion *cqe)
{ {
struct async_cmd_info *cmdinfo = ctx; struct async_cmd_info *cmdinfo = ctx;
...@@ -262,14 +274,34 @@ static void *cancel_cmdid(struct nvme_queue *nvmeq, int cmdid, ...@@ -262,14 +274,34 @@ static void *cancel_cmdid(struct nvme_queue *nvmeq, int cmdid,
return ctx; return ctx;
} }
struct nvme_queue *get_nvmeq(struct nvme_dev *dev) static struct nvme_queue *raw_nvmeq(struct nvme_dev *dev, int qid)
{
return rcu_dereference_raw(dev->queues[qid]);
}
static struct nvme_queue *get_nvmeq(struct nvme_dev *dev) __acquires(RCU)
{
unsigned queue_id = get_cpu_var(*dev->io_queue);
rcu_read_lock();
return rcu_dereference(dev->queues[queue_id]);
}
static void put_nvmeq(struct nvme_queue *nvmeq) __releases(RCU)
{
rcu_read_unlock();
put_cpu_var(nvmeq->dev->io_queue);
}
static struct nvme_queue *lock_nvmeq(struct nvme_dev *dev, int q_idx)
__acquires(RCU)
{ {
return dev->queues[get_cpu() + 1]; rcu_read_lock();
return rcu_dereference(dev->queues[q_idx]);
} }
void put_nvmeq(struct nvme_queue *nvmeq) static void unlock_nvmeq(struct nvme_queue *nvmeq) __releases(RCU)
{ {
put_cpu(); rcu_read_unlock();
} }
/** /**
...@@ -284,6 +316,10 @@ static int nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd) ...@@ -284,6 +316,10 @@ static int nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
unsigned long flags; unsigned long flags;
u16 tail; u16 tail;
spin_lock_irqsave(&nvmeq->q_lock, flags); spin_lock_irqsave(&nvmeq->q_lock, flags);
if (nvmeq->q_suspended) {
spin_unlock_irqrestore(&nvmeq->q_lock, flags);
return -EBUSY;
}
tail = nvmeq->sq_tail; tail = nvmeq->sq_tail;
memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd)); memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd));
if (++tail == nvmeq->q_depth) if (++tail == nvmeq->q_depth)
...@@ -323,6 +359,7 @@ nvme_alloc_iod(unsigned nseg, unsigned nbytes, gfp_t gfp) ...@@ -323,6 +359,7 @@ nvme_alloc_iod(unsigned nseg, unsigned nbytes, gfp_t gfp)
iod->npages = -1; iod->npages = -1;
iod->length = nbytes; iod->length = nbytes;
iod->nents = 0; iod->nents = 0;
iod->first_dma = 0ULL;
iod->start_time = jiffies; iod->start_time = jiffies;
} }
...@@ -371,19 +408,31 @@ static void nvme_end_io_acct(struct bio *bio, unsigned long start_time) ...@@ -371,19 +408,31 @@ static void nvme_end_io_acct(struct bio *bio, unsigned long start_time)
part_stat_unlock(); part_stat_unlock();
} }
static void bio_completion(struct nvme_dev *dev, void *ctx, static void bio_completion(struct nvme_queue *nvmeq, void *ctx,
struct nvme_completion *cqe) struct nvme_completion *cqe)
{ {
struct nvme_iod *iod = ctx; struct nvme_iod *iod = ctx;
struct bio *bio = iod->private; struct bio *bio = iod->private;
u16 status = le16_to_cpup(&cqe->status) >> 1; u16 status = le16_to_cpup(&cqe->status) >> 1;
if (unlikely(status)) {
if (!(status & NVME_SC_DNR ||
bio->bi_rw & REQ_FAILFAST_MASK) &&
(jiffies - iod->start_time) < IOD_TIMEOUT) {
if (!waitqueue_active(&nvmeq->sq_full))
add_wait_queue(&nvmeq->sq_full,
&nvmeq->sq_cong_wait);
list_add_tail(&iod->node, &nvmeq->iod_bio);
wake_up(&nvmeq->sq_full);
return;
}
}
if (iod->nents) { if (iod->nents) {
dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents, dma_unmap_sg(nvmeq->q_dmadev, iod->sg, iod->nents,
bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
nvme_end_io_acct(bio, iod->start_time); nvme_end_io_acct(bio, iod->start_time);
} }
nvme_free_iod(dev, iod); nvme_free_iod(nvmeq->dev, iod);
if (status) if (status)
bio_endio(bio, -EIO); bio_endio(bio, -EIO);
else else
...@@ -391,8 +440,8 @@ static void bio_completion(struct nvme_dev *dev, void *ctx, ...@@ -391,8 +440,8 @@ static void bio_completion(struct nvme_dev *dev, void *ctx,
} }
/* length is in bytes. gfp flags indicates whether we may sleep. */ /* length is in bytes. gfp flags indicates whether we may sleep. */
int nvme_setup_prps(struct nvme_dev *dev, struct nvme_common_command *cmd, int nvme_setup_prps(struct nvme_dev *dev, struct nvme_iod *iod, int total_len,
struct nvme_iod *iod, int total_len, gfp_t gfp) gfp_t gfp)
{ {
struct dma_pool *pool; struct dma_pool *pool;
int length = total_len; int length = total_len;
...@@ -405,7 +454,6 @@ int nvme_setup_prps(struct nvme_dev *dev, struct nvme_common_command *cmd, ...@@ -405,7 +454,6 @@ int nvme_setup_prps(struct nvme_dev *dev, struct nvme_common_command *cmd,
dma_addr_t prp_dma; dma_addr_t prp_dma;
int nprps, i; int nprps, i;
cmd->prp1 = cpu_to_le64(dma_addr);
length -= (PAGE_SIZE - offset); length -= (PAGE_SIZE - offset);
if (length <= 0) if (length <= 0)
return total_len; return total_len;
...@@ -420,7 +468,7 @@ int nvme_setup_prps(struct nvme_dev *dev, struct nvme_common_command *cmd, ...@@ -420,7 +468,7 @@ int nvme_setup_prps(struct nvme_dev *dev, struct nvme_common_command *cmd,
} }
if (length <= PAGE_SIZE) { if (length <= PAGE_SIZE) {
cmd->prp2 = cpu_to_le64(dma_addr); iod->first_dma = dma_addr;
return total_len; return total_len;
} }
...@@ -435,13 +483,12 @@ int nvme_setup_prps(struct nvme_dev *dev, struct nvme_common_command *cmd, ...@@ -435,13 +483,12 @@ int nvme_setup_prps(struct nvme_dev *dev, struct nvme_common_command *cmd,
prp_list = dma_pool_alloc(pool, gfp, &prp_dma); prp_list = dma_pool_alloc(pool, gfp, &prp_dma);
if (!prp_list) { if (!prp_list) {
cmd->prp2 = cpu_to_le64(dma_addr); iod->first_dma = dma_addr;
iod->npages = -1; iod->npages = -1;
return (total_len - length) + PAGE_SIZE; return (total_len - length) + PAGE_SIZE;
} }
list[0] = prp_list; list[0] = prp_list;
iod->first_dma = prp_dma; iod->first_dma = prp_dma;
cmd->prp2 = cpu_to_le64(prp_dma);
i = 0; i = 0;
for (;;) { for (;;) {
if (i == PAGE_SIZE / 8) { if (i == PAGE_SIZE / 8) {
...@@ -480,10 +527,11 @@ static int nvme_split_and_submit(struct bio *bio, struct nvme_queue *nvmeq, ...@@ -480,10 +527,11 @@ static int nvme_split_and_submit(struct bio *bio, struct nvme_queue *nvmeq,
bio_chain(split, bio); bio_chain(split, bio);
if (bio_list_empty(&nvmeq->sq_cong)) if (!waitqueue_active(&nvmeq->sq_full))
add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait); add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
bio_list_add(&nvmeq->sq_cong, split); bio_list_add(&nvmeq->sq_cong, split);
bio_list_add(&nvmeq->sq_cong, bio); bio_list_add(&nvmeq->sq_cong, bio);
wake_up(&nvmeq->sq_full);
return 0; return 0;
} }
...@@ -536,25 +584,13 @@ static int nvme_map_bio(struct nvme_queue *nvmeq, struct nvme_iod *iod, ...@@ -536,25 +584,13 @@ static int nvme_map_bio(struct nvme_queue *nvmeq, struct nvme_iod *iod,
return length; return length;
} }
/*
* We reuse the small pool to allocate the 16-byte range here as it is not
* worth having a special pool for these or additional cases to handle freeing
* the iod.
*/
static int nvme_submit_discard(struct nvme_queue *nvmeq, struct nvme_ns *ns, static int nvme_submit_discard(struct nvme_queue *nvmeq, struct nvme_ns *ns,
struct bio *bio, struct nvme_iod *iod, int cmdid) struct bio *bio, struct nvme_iod *iod, int cmdid)
{ {
struct nvme_dsm_range *range; struct nvme_dsm_range *range =
(struct nvme_dsm_range *)iod_list(iod)[0];
struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail]; struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
range = dma_pool_alloc(nvmeq->dev->prp_small_pool, GFP_ATOMIC,
&iod->first_dma);
if (!range)
return -ENOMEM;
iod_list(iod)[0] = (__le64 *)range;
iod->npages = 0;
range->cattr = cpu_to_le32(0); range->cattr = cpu_to_le32(0);
range->nlb = cpu_to_le32(bio->bi_iter.bi_size >> ns->lba_shift); range->nlb = cpu_to_le32(bio->bi_iter.bi_size >> ns->lba_shift);
range->slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_iter.bi_sector)); range->slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_iter.bi_sector));
...@@ -601,44 +637,22 @@ int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns) ...@@ -601,44 +637,22 @@ int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns)
return nvme_submit_flush(nvmeq, ns, cmdid); return nvme_submit_flush(nvmeq, ns, cmdid);
} }
/* static int nvme_submit_iod(struct nvme_queue *nvmeq, struct nvme_iod *iod)
* Called with local interrupts disabled and the q_lock held. May not sleep.
*/
static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
struct bio *bio)
{ {
struct bio *bio = iod->private;
struct nvme_ns *ns = bio->bi_bdev->bd_disk->private_data;
struct nvme_command *cmnd; struct nvme_command *cmnd;
struct nvme_iod *iod; int cmdid;
enum dma_data_direction dma_dir;
int cmdid, length, result;
u16 control; u16 control;
u32 dsmgmt; u32 dsmgmt;
int psegs = bio_phys_segments(ns->queue, bio);
if ((bio->bi_rw & REQ_FLUSH) && psegs) {
result = nvme_submit_flush_data(nvmeq, ns);
if (result)
return result;
}
result = -ENOMEM;
iod = nvme_alloc_iod(psegs, bio->bi_iter.bi_size, GFP_ATOMIC);
if (!iod)
goto nomem;
iod->private = bio;
result = -EBUSY;
cmdid = alloc_cmdid(nvmeq, iod, bio_completion, NVME_IO_TIMEOUT); cmdid = alloc_cmdid(nvmeq, iod, bio_completion, NVME_IO_TIMEOUT);
if (unlikely(cmdid < 0)) if (unlikely(cmdid < 0))
goto free_iod; return cmdid;
if (bio->bi_rw & REQ_DISCARD) { if (bio->bi_rw & REQ_DISCARD)
result = nvme_submit_discard(nvmeq, ns, bio, iod, cmdid); return nvme_submit_discard(nvmeq, ns, bio, iod, cmdid);
if (result) if ((bio->bi_rw & REQ_FLUSH) && !iod->nents)
goto free_cmdid;
return result;
}
if ((bio->bi_rw & REQ_FLUSH) && !psegs)
return nvme_submit_flush(nvmeq, ns, cmdid); return nvme_submit_flush(nvmeq, ns, cmdid);
control = 0; control = 0;
...@@ -652,42 +666,85 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns, ...@@ -652,42 +666,85 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH; dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail]; cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
memset(cmnd, 0, sizeof(*cmnd)); memset(cmnd, 0, sizeof(*cmnd));
if (bio_data_dir(bio)) {
cmnd->rw.opcode = nvme_cmd_write;
dma_dir = DMA_TO_DEVICE;
} else {
cmnd->rw.opcode = nvme_cmd_read;
dma_dir = DMA_FROM_DEVICE;
}
result = nvme_map_bio(nvmeq, iod, bio, dma_dir, psegs);
if (result <= 0)
goto free_cmdid;
length = result;
cmnd->rw.opcode = bio_data_dir(bio) ? nvme_cmd_write : nvme_cmd_read;
cmnd->rw.command_id = cmdid; cmnd->rw.command_id = cmdid;
cmnd->rw.nsid = cpu_to_le32(ns->ns_id); cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
length = nvme_setup_prps(nvmeq->dev, &cmnd->common, iod, length, cmnd->rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
GFP_ATOMIC); cmnd->rw.prp2 = cpu_to_le64(iod->first_dma);
cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_iter.bi_sector)); cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_iter.bi_sector));
cmnd->rw.length = cpu_to_le16((length >> ns->lba_shift) - 1); cmnd->rw.length =
cpu_to_le16((bio->bi_iter.bi_size >> ns->lba_shift) - 1);
cmnd->rw.control = cpu_to_le16(control); cmnd->rw.control = cpu_to_le16(control);
cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt); cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
nvme_start_io_acct(bio);
if (++nvmeq->sq_tail == nvmeq->q_depth) if (++nvmeq->sq_tail == nvmeq->q_depth)
nvmeq->sq_tail = 0; nvmeq->sq_tail = 0;
writel(nvmeq->sq_tail, nvmeq->q_db); writel(nvmeq->sq_tail, nvmeq->q_db);
return 0; return 0;
}
/*
* Called with local interrupts disabled and the q_lock held. May not sleep.
*/
static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
struct bio *bio)
{
struct nvme_iod *iod;
int psegs = bio_phys_segments(ns->queue, bio);
int result;
if ((bio->bi_rw & REQ_FLUSH) && psegs) {
result = nvme_submit_flush_data(nvmeq, ns);
if (result)
return result;
}
iod = nvme_alloc_iod(psegs, bio->bi_iter.bi_size, GFP_ATOMIC);
if (!iod)
return -ENOMEM;
iod->private = bio;
if (bio->bi_rw & REQ_DISCARD) {
void *range;
/*
* We reuse the small pool to allocate the 16-byte range here
* as it is not worth having a special pool for these or
* additional cases to handle freeing the iod.
*/
range = dma_pool_alloc(nvmeq->dev->prp_small_pool,
GFP_ATOMIC,
&iod->first_dma);
if (!range) {
result = -ENOMEM;
goto free_iod;
}
iod_list(iod)[0] = (__le64 *)range;
iod->npages = 0;
} else if (psegs) {
result = nvme_map_bio(nvmeq, iod, bio,
bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
psegs);
if (result <= 0)
goto free_iod;
if (nvme_setup_prps(nvmeq->dev, iod, result, GFP_ATOMIC) !=
result) {
result = -ENOMEM;
goto free_iod;
}
nvme_start_io_acct(bio);
}
if (unlikely(nvme_submit_iod(nvmeq, iod))) {
if (!waitqueue_active(&nvmeq->sq_full))
add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
list_add_tail(&iod->node, &nvmeq->iod_bio);
}
return 0;
free_cmdid:
free_cmdid(nvmeq, cmdid, NULL);
free_iod: free_iod:
nvme_free_iod(nvmeq->dev, iod); nvme_free_iod(nvmeq->dev, iod);
nomem:
return result; return result;
} }
...@@ -711,7 +768,7 @@ static int nvme_process_cq(struct nvme_queue *nvmeq) ...@@ -711,7 +768,7 @@ static int nvme_process_cq(struct nvme_queue *nvmeq)
} }
ctx = free_cmdid(nvmeq, cqe.command_id, &fn); ctx = free_cmdid(nvmeq, cqe.command_id, &fn);
fn(nvmeq->dev, ctx, &cqe); fn(nvmeq, ctx, &cqe);
} }
/* If the controller ignores the cq head doorbell and continuously /* If the controller ignores the cq head doorbell and continuously
...@@ -747,7 +804,7 @@ static void nvme_make_request(struct request_queue *q, struct bio *bio) ...@@ -747,7 +804,7 @@ static void nvme_make_request(struct request_queue *q, struct bio *bio)
if (!nvmeq->q_suspended && bio_list_empty(&nvmeq->sq_cong)) if (!nvmeq->q_suspended && bio_list_empty(&nvmeq->sq_cong))
result = nvme_submit_bio_queue(nvmeq, ns, bio); result = nvme_submit_bio_queue(nvmeq, ns, bio);
if (unlikely(result)) { if (unlikely(result)) {
if (bio_list_empty(&nvmeq->sq_cong)) if (!waitqueue_active(&nvmeq->sq_full))
add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait); add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
bio_list_add(&nvmeq->sq_cong, bio); bio_list_add(&nvmeq->sq_cong, bio);
} }
...@@ -791,7 +848,7 @@ struct sync_cmd_info { ...@@ -791,7 +848,7 @@ struct sync_cmd_info {
int status; int status;
}; };
static void sync_completion(struct nvme_dev *dev, void *ctx, static void sync_completion(struct nvme_queue *nvmeq, void *ctx,
struct nvme_completion *cqe) struct nvme_completion *cqe)
{ {
struct sync_cmd_info *cmdinfo = ctx; struct sync_cmd_info *cmdinfo = ctx;
...@@ -804,27 +861,46 @@ static void sync_completion(struct nvme_dev *dev, void *ctx, ...@@ -804,27 +861,46 @@ static void sync_completion(struct nvme_dev *dev, void *ctx,
* Returns 0 on success. If the result is negative, it's a Linux error code; * Returns 0 on success. If the result is negative, it's a Linux error code;
* if the result is positive, it's an NVM Express status code * if the result is positive, it's an NVM Express status code
*/ */
int nvme_submit_sync_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd, static int nvme_submit_sync_cmd(struct nvme_dev *dev, int q_idx,
struct nvme_command *cmd,
u32 *result, unsigned timeout) u32 *result, unsigned timeout)
{ {
int cmdid; int cmdid, ret;
struct sync_cmd_info cmdinfo; struct sync_cmd_info cmdinfo;
struct nvme_queue *nvmeq;
nvmeq = lock_nvmeq(dev, q_idx);
if (!nvmeq) {
unlock_nvmeq(nvmeq);
return -ENODEV;
}
cmdinfo.task = current; cmdinfo.task = current;
cmdinfo.status = -EINTR; cmdinfo.status = -EINTR;
cmdid = alloc_cmdid_killable(nvmeq, &cmdinfo, sync_completion, cmdid = alloc_cmdid(nvmeq, &cmdinfo, sync_completion, timeout);
timeout); if (cmdid < 0) {
if (cmdid < 0) unlock_nvmeq(nvmeq);
return cmdid; return cmdid;
}
cmd->common.command_id = cmdid; cmd->common.command_id = cmdid;
set_current_state(TASK_KILLABLE); set_current_state(TASK_KILLABLE);
nvme_submit_cmd(nvmeq, cmd); ret = nvme_submit_cmd(nvmeq, cmd);
if (ret) {
free_cmdid(nvmeq, cmdid, NULL);
unlock_nvmeq(nvmeq);
set_current_state(TASK_RUNNING);
return ret;
}
unlock_nvmeq(nvmeq);
schedule_timeout(timeout); schedule_timeout(timeout);
if (cmdinfo.status == -EINTR) { if (cmdinfo.status == -EINTR) {
nvmeq = lock_nvmeq(dev, q_idx);
if (nvmeq)
nvme_abort_command(nvmeq, cmdid); nvme_abort_command(nvmeq, cmdid);
unlock_nvmeq(nvmeq);
return -EINTR; return -EINTR;
} }
...@@ -845,20 +921,26 @@ static int nvme_submit_async_cmd(struct nvme_queue *nvmeq, ...@@ -845,20 +921,26 @@ static int nvme_submit_async_cmd(struct nvme_queue *nvmeq,
return cmdid; return cmdid;
cmdinfo->status = -EINTR; cmdinfo->status = -EINTR;
cmd->common.command_id = cmdid; cmd->common.command_id = cmdid;
nvme_submit_cmd(nvmeq, cmd); return nvme_submit_cmd(nvmeq, cmd);
return 0;
} }
int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd, int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
u32 *result) u32 *result)
{ {
return nvme_submit_sync_cmd(dev->queues[0], cmd, result, ADMIN_TIMEOUT); return nvme_submit_sync_cmd(dev, 0, cmd, result, ADMIN_TIMEOUT);
}
int nvme_submit_io_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
u32 *result)
{
return nvme_submit_sync_cmd(dev, smp_processor_id() + 1, cmd, result,
NVME_IO_TIMEOUT);
} }
static int nvme_submit_admin_cmd_async(struct nvme_dev *dev, static int nvme_submit_admin_cmd_async(struct nvme_dev *dev,
struct nvme_command *cmd, struct async_cmd_info *cmdinfo) struct nvme_command *cmd, struct async_cmd_info *cmdinfo)
{ {
return nvme_submit_async_cmd(dev->queues[0], cmd, cmdinfo, return nvme_submit_async_cmd(raw_nvmeq(dev, 0), cmd, cmdinfo,
ADMIN_TIMEOUT); ADMIN_TIMEOUT);
} }
...@@ -985,6 +1067,7 @@ static void nvme_abort_cmd(int cmdid, struct nvme_queue *nvmeq) ...@@ -985,6 +1067,7 @@ static void nvme_abort_cmd(int cmdid, struct nvme_queue *nvmeq)
struct nvme_command cmd; struct nvme_command cmd;
struct nvme_dev *dev = nvmeq->dev; struct nvme_dev *dev = nvmeq->dev;
struct nvme_cmd_info *info = nvme_cmd_info(nvmeq); struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
struct nvme_queue *adminq;
if (!nvmeq->qid || info[cmdid].aborted) { if (!nvmeq->qid || info[cmdid].aborted) {
if (work_busy(&dev->reset_work)) if (work_busy(&dev->reset_work))
...@@ -1001,7 +1084,8 @@ static void nvme_abort_cmd(int cmdid, struct nvme_queue *nvmeq) ...@@ -1001,7 +1084,8 @@ static void nvme_abort_cmd(int cmdid, struct nvme_queue *nvmeq)
if (!dev->abort_limit) if (!dev->abort_limit)
return; return;
a_cmdid = alloc_cmdid(dev->queues[0], CMD_CTX_ABORT, special_completion, adminq = rcu_dereference(dev->queues[0]);
a_cmdid = alloc_cmdid(adminq, CMD_CTX_ABORT, special_completion,
ADMIN_TIMEOUT); ADMIN_TIMEOUT);
if (a_cmdid < 0) if (a_cmdid < 0)
return; return;
...@@ -1018,7 +1102,7 @@ static void nvme_abort_cmd(int cmdid, struct nvme_queue *nvmeq) ...@@ -1018,7 +1102,7 @@ static void nvme_abort_cmd(int cmdid, struct nvme_queue *nvmeq)
dev_warn(nvmeq->q_dmadev, "Aborting I/O %d QID %d\n", cmdid, dev_warn(nvmeq->q_dmadev, "Aborting I/O %d QID %d\n", cmdid,
nvmeq->qid); nvmeq->qid);
nvme_submit_cmd(dev->queues[0], &cmd); nvme_submit_cmd(adminq, &cmd);
} }
/** /**
...@@ -1051,23 +1135,38 @@ static void nvme_cancel_ios(struct nvme_queue *nvmeq, bool timeout) ...@@ -1051,23 +1135,38 @@ static void nvme_cancel_ios(struct nvme_queue *nvmeq, bool timeout)
dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d QID %d\n", cmdid, dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d QID %d\n", cmdid,
nvmeq->qid); nvmeq->qid);
ctx = cancel_cmdid(nvmeq, cmdid, &fn); ctx = cancel_cmdid(nvmeq, cmdid, &fn);
fn(nvmeq->dev, ctx, &cqe); fn(nvmeq, ctx, &cqe);
} }
} }
static void nvme_free_queue(struct nvme_queue *nvmeq) static void nvme_free_queue(struct rcu_head *r)
{ {
struct nvme_queue *nvmeq = container_of(r, struct nvme_queue, r_head);
spin_lock_irq(&nvmeq->q_lock); spin_lock_irq(&nvmeq->q_lock);
while (bio_list_peek(&nvmeq->sq_cong)) { while (bio_list_peek(&nvmeq->sq_cong)) {
struct bio *bio = bio_list_pop(&nvmeq->sq_cong); struct bio *bio = bio_list_pop(&nvmeq->sq_cong);
bio_endio(bio, -EIO); bio_endio(bio, -EIO);
} }
while (!list_empty(&nvmeq->iod_bio)) {
static struct nvme_completion cqe = {
.status = cpu_to_le16(
(NVME_SC_ABORT_REQ | NVME_SC_DNR) << 1),
};
struct nvme_iod *iod = list_first_entry(&nvmeq->iod_bio,
struct nvme_iod,
node);
list_del(&iod->node);
bio_completion(nvmeq, iod, &cqe);
}
spin_unlock_irq(&nvmeq->q_lock); spin_unlock_irq(&nvmeq->q_lock);
dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth), dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
(void *)nvmeq->cqes, nvmeq->cq_dma_addr); (void *)nvmeq->cqes, nvmeq->cq_dma_addr);
dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth), dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
nvmeq->sq_cmds, nvmeq->sq_dma_addr); nvmeq->sq_cmds, nvmeq->sq_dma_addr);
if (nvmeq->qid)
free_cpumask_var(nvmeq->cpu_mask);
kfree(nvmeq); kfree(nvmeq);
} }
...@@ -1076,9 +1175,10 @@ static void nvme_free_queues(struct nvme_dev *dev, int lowest) ...@@ -1076,9 +1175,10 @@ static void nvme_free_queues(struct nvme_dev *dev, int lowest)
int i; int i;
for (i = dev->queue_count - 1; i >= lowest; i--) { for (i = dev->queue_count - 1; i >= lowest; i--) {
nvme_free_queue(dev->queues[i]); struct nvme_queue *nvmeq = raw_nvmeq(dev, i);
rcu_assign_pointer(dev->queues[i], NULL);
call_rcu(&nvmeq->r_head, nvme_free_queue);
dev->queue_count--; dev->queue_count--;
dev->queues[i] = NULL;
} }
} }
...@@ -1098,6 +1198,7 @@ static int nvme_suspend_queue(struct nvme_queue *nvmeq) ...@@ -1098,6 +1198,7 @@ static int nvme_suspend_queue(struct nvme_queue *nvmeq)
return 1; return 1;
} }
nvmeq->q_suspended = 1; nvmeq->q_suspended = 1;
nvmeq->dev->online_queues--;
spin_unlock_irq(&nvmeq->q_lock); spin_unlock_irq(&nvmeq->q_lock);
irq_set_affinity_hint(vector, NULL); irq_set_affinity_hint(vector, NULL);
...@@ -1116,7 +1217,7 @@ static void nvme_clear_queue(struct nvme_queue *nvmeq) ...@@ -1116,7 +1217,7 @@ static void nvme_clear_queue(struct nvme_queue *nvmeq)
static void nvme_disable_queue(struct nvme_dev *dev, int qid) static void nvme_disable_queue(struct nvme_dev *dev, int qid)
{ {
struct nvme_queue *nvmeq = dev->queues[qid]; struct nvme_queue *nvmeq = raw_nvmeq(dev, qid);
if (!nvmeq) if (!nvmeq)
return; return;
...@@ -1152,6 +1253,9 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid, ...@@ -1152,6 +1253,9 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
if (!nvmeq->sq_cmds) if (!nvmeq->sq_cmds)
goto free_cqdma; goto free_cqdma;
if (qid && !zalloc_cpumask_var(&nvmeq->cpu_mask, GFP_KERNEL))
goto free_sqdma;
nvmeq->q_dmadev = dmadev; nvmeq->q_dmadev = dmadev;
nvmeq->dev = dev; nvmeq->dev = dev;
snprintf(nvmeq->irqname, sizeof(nvmeq->irqname), "nvme%dq%d", snprintf(nvmeq->irqname, sizeof(nvmeq->irqname), "nvme%dq%d",
...@@ -1162,15 +1266,20 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid, ...@@ -1162,15 +1266,20 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
init_waitqueue_head(&nvmeq->sq_full); init_waitqueue_head(&nvmeq->sq_full);
init_waitqueue_entry(&nvmeq->sq_cong_wait, nvme_thread); init_waitqueue_entry(&nvmeq->sq_cong_wait, nvme_thread);
bio_list_init(&nvmeq->sq_cong); bio_list_init(&nvmeq->sq_cong);
INIT_LIST_HEAD(&nvmeq->iod_bio);
nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
nvmeq->q_depth = depth; nvmeq->q_depth = depth;
nvmeq->cq_vector = vector; nvmeq->cq_vector = vector;
nvmeq->qid = qid; nvmeq->qid = qid;
nvmeq->q_suspended = 1; nvmeq->q_suspended = 1;
dev->queue_count++; dev->queue_count++;
rcu_assign_pointer(dev->queues[qid], nvmeq);
return nvmeq; return nvmeq;
free_sqdma:
dma_free_coherent(dmadev, SQ_SIZE(depth), (void *)nvmeq->sq_cmds,
nvmeq->sq_dma_addr);
free_cqdma: free_cqdma:
dma_free_coherent(dmadev, CQ_SIZE(depth), (void *)nvmeq->cqes, dma_free_coherent(dmadev, CQ_SIZE(depth), (void *)nvmeq->cqes,
nvmeq->cq_dma_addr); nvmeq->cq_dma_addr);
...@@ -1203,6 +1312,7 @@ static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid) ...@@ -1203,6 +1312,7 @@ static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq->q_depth)); memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq->q_depth));
nvme_cancel_ios(nvmeq, false); nvme_cancel_ios(nvmeq, false);
nvmeq->q_suspended = 0; nvmeq->q_suspended = 0;
dev->online_queues++;
} }
static int nvme_create_queue(struct nvme_queue *nvmeq, int qid) static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
...@@ -1311,12 +1421,11 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev) ...@@ -1311,12 +1421,11 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
if (result < 0) if (result < 0)
return result; return result;
nvmeq = dev->queues[0]; nvmeq = raw_nvmeq(dev, 0);
if (!nvmeq) { if (!nvmeq) {
nvmeq = nvme_alloc_queue(dev, 0, 64, 0); nvmeq = nvme_alloc_queue(dev, 0, 64, 0);
if (!nvmeq) if (!nvmeq)
return -ENOMEM; return -ENOMEM;
dev->queues[0] = nvmeq;
} }
aqa = nvmeq->q_depth - 1; aqa = nvmeq->q_depth - 1;
...@@ -1418,7 +1527,6 @@ void nvme_unmap_user_pages(struct nvme_dev *dev, int write, ...@@ -1418,7 +1527,6 @@ void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
{ {
struct nvme_dev *dev = ns->dev; struct nvme_dev *dev = ns->dev;
struct nvme_queue *nvmeq;
struct nvme_user_io io; struct nvme_user_io io;
struct nvme_command c; struct nvme_command c;
unsigned length, meta_len; unsigned length, meta_len;
...@@ -1492,22 +1600,14 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) ...@@ -1492,22 +1600,14 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
c.rw.metadata = cpu_to_le64(meta_dma_addr); c.rw.metadata = cpu_to_le64(meta_dma_addr);
} }
length = nvme_setup_prps(dev, &c.common, iod, length, GFP_KERNEL); length = nvme_setup_prps(dev, iod, length, GFP_KERNEL);
c.rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
c.rw.prp2 = cpu_to_le64(iod->first_dma);
nvmeq = get_nvmeq(dev);
/*
* Since nvme_submit_sync_cmd sleeps, we can't keep preemption
* disabled. We may be preempted at any point, and be rescheduled
* to a different CPU. That will cause cacheline bouncing, but no
* additional races since q_lock already protects against other CPUs.
*/
put_nvmeq(nvmeq);
if (length != (io.nblocks + 1) << ns->lba_shift) if (length != (io.nblocks + 1) << ns->lba_shift)
status = -ENOMEM; status = -ENOMEM;
else if (!nvmeq || nvmeq->q_suspended)
status = -EBUSY;
else else
status = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT); status = nvme_submit_io_cmd(dev, &c, NULL);
if (meta_len) { if (meta_len) {
if (status == NVME_SC_SUCCESS && !(io.opcode & 1)) { if (status == NVME_SC_SUCCESS && !(io.opcode & 1)) {
...@@ -1572,8 +1672,9 @@ static int nvme_user_admin_cmd(struct nvme_dev *dev, ...@@ -1572,8 +1672,9 @@ static int nvme_user_admin_cmd(struct nvme_dev *dev,
length); length);
if (IS_ERR(iod)) if (IS_ERR(iod))
return PTR_ERR(iod); return PTR_ERR(iod);
length = nvme_setup_prps(dev, &c.common, iod, length, length = nvme_setup_prps(dev, iod, length, GFP_KERNEL);
GFP_KERNEL); c.common.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
c.common.prp2 = cpu_to_le64(iod->first_dma);
} }
timeout = cmd.timeout_ms ? msecs_to_jiffies(cmd.timeout_ms) : timeout = cmd.timeout_ms ? msecs_to_jiffies(cmd.timeout_ms) :
...@@ -1581,8 +1682,7 @@ static int nvme_user_admin_cmd(struct nvme_dev *dev, ...@@ -1581,8 +1682,7 @@ static int nvme_user_admin_cmd(struct nvme_dev *dev,
if (length != cmd.data_len) if (length != cmd.data_len)
status = -ENOMEM; status = -ENOMEM;
else else
status = nvme_submit_sync_cmd(dev->queues[0], &c, &cmd.result, status = nvme_submit_sync_cmd(dev, 0, &c, &cmd.result, timeout);
timeout);
if (cmd.data_len) { if (cmd.data_len) {
nvme_unmap_user_pages(dev, cmd.opcode & 1, iod); nvme_unmap_user_pages(dev, cmd.opcode & 1, iod);
...@@ -1653,25 +1753,51 @@ static void nvme_release(struct gendisk *disk, fmode_t mode) ...@@ -1653,25 +1753,51 @@ static void nvme_release(struct gendisk *disk, fmode_t mode)
kref_put(&dev->kref, nvme_free_dev); kref_put(&dev->kref, nvme_free_dev);
} }
static int nvme_getgeo(struct block_device *bd, struct hd_geometry *geo)
{
/* some standard values */
geo->heads = 1 << 6;
geo->sectors = 1 << 5;
geo->cylinders = get_capacity(bd->bd_disk) >> 11;
return 0;
}
static const struct block_device_operations nvme_fops = { static const struct block_device_operations nvme_fops = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
.ioctl = nvme_ioctl, .ioctl = nvme_ioctl,
.compat_ioctl = nvme_compat_ioctl, .compat_ioctl = nvme_compat_ioctl,
.open = nvme_open, .open = nvme_open,
.release = nvme_release, .release = nvme_release,
.getgeo = nvme_getgeo,
}; };
static void nvme_resubmit_iods(struct nvme_queue *nvmeq)
{
struct nvme_iod *iod, *next;
list_for_each_entry_safe(iod, next, &nvmeq->iod_bio, node) {
if (unlikely(nvme_submit_iod(nvmeq, iod)))
break;
list_del(&iod->node);
if (bio_list_empty(&nvmeq->sq_cong) &&
list_empty(&nvmeq->iod_bio))
remove_wait_queue(&nvmeq->sq_full,
&nvmeq->sq_cong_wait);
}
}
static void nvme_resubmit_bios(struct nvme_queue *nvmeq) static void nvme_resubmit_bios(struct nvme_queue *nvmeq)
{ {
while (bio_list_peek(&nvmeq->sq_cong)) { while (bio_list_peek(&nvmeq->sq_cong)) {
struct bio *bio = bio_list_pop(&nvmeq->sq_cong); struct bio *bio = bio_list_pop(&nvmeq->sq_cong);
struct nvme_ns *ns = bio->bi_bdev->bd_disk->private_data; struct nvme_ns *ns = bio->bi_bdev->bd_disk->private_data;
if (bio_list_empty(&nvmeq->sq_cong)) if (bio_list_empty(&nvmeq->sq_cong) &&
list_empty(&nvmeq->iod_bio))
remove_wait_queue(&nvmeq->sq_full, remove_wait_queue(&nvmeq->sq_full,
&nvmeq->sq_cong_wait); &nvmeq->sq_cong_wait);
if (nvme_submit_bio_queue(nvmeq, ns, bio)) { if (nvme_submit_bio_queue(nvmeq, ns, bio)) {
if (bio_list_empty(&nvmeq->sq_cong)) if (!waitqueue_active(&nvmeq->sq_full))
add_wait_queue(&nvmeq->sq_full, add_wait_queue(&nvmeq->sq_full,
&nvmeq->sq_cong_wait); &nvmeq->sq_cong_wait);
bio_list_add_head(&nvmeq->sq_cong, bio); bio_list_add_head(&nvmeq->sq_cong, bio);
...@@ -1700,8 +1826,10 @@ static int nvme_kthread(void *data) ...@@ -1700,8 +1826,10 @@ static int nvme_kthread(void *data)
queue_work(nvme_workq, &dev->reset_work); queue_work(nvme_workq, &dev->reset_work);
continue; continue;
} }
rcu_read_lock();
for (i = 0; i < dev->queue_count; i++) { for (i = 0; i < dev->queue_count; i++) {
struct nvme_queue *nvmeq = dev->queues[i]; struct nvme_queue *nvmeq =
rcu_dereference(dev->queues[i]);
if (!nvmeq) if (!nvmeq)
continue; continue;
spin_lock_irq(&nvmeq->q_lock); spin_lock_irq(&nvmeq->q_lock);
...@@ -1710,9 +1838,11 @@ static int nvme_kthread(void *data) ...@@ -1710,9 +1838,11 @@ static int nvme_kthread(void *data)
nvme_process_cq(nvmeq); nvme_process_cq(nvmeq);
nvme_cancel_ios(nvmeq, true); nvme_cancel_ios(nvmeq, true);
nvme_resubmit_bios(nvmeq); nvme_resubmit_bios(nvmeq);
nvme_resubmit_iods(nvmeq);
unlock: unlock:
spin_unlock_irq(&nvmeq->q_lock); spin_unlock_irq(&nvmeq->q_lock);
} }
rcu_read_unlock();
} }
spin_unlock(&dev_list_lock); spin_unlock(&dev_list_lock);
schedule_timeout(round_jiffies_relative(HZ)); schedule_timeout(round_jiffies_relative(HZ));
...@@ -1787,6 +1917,143 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid, ...@@ -1787,6 +1917,143 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid,
return NULL; return NULL;
} }
static int nvme_find_closest_node(int node)
{
int n, val, min_val = INT_MAX, best_node = node;
for_each_online_node(n) {
if (n == node)
continue;
val = node_distance(node, n);
if (val < min_val) {
min_val = val;
best_node = n;
}
}
return best_node;
}
static void nvme_set_queue_cpus(cpumask_t *qmask, struct nvme_queue *nvmeq,
int count)
{
int cpu;
for_each_cpu(cpu, qmask) {
if (cpumask_weight(nvmeq->cpu_mask) >= count)
break;
if (!cpumask_test_and_set_cpu(cpu, nvmeq->cpu_mask))
*per_cpu_ptr(nvmeq->dev->io_queue, cpu) = nvmeq->qid;
}
}
static void nvme_add_cpus(cpumask_t *mask, const cpumask_t *unassigned_cpus,
const cpumask_t *new_mask, struct nvme_queue *nvmeq, int cpus_per_queue)
{
int next_cpu;
for_each_cpu(next_cpu, new_mask) {
cpumask_or(mask, mask, get_cpu_mask(next_cpu));
cpumask_or(mask, mask, topology_thread_cpumask(next_cpu));
cpumask_and(mask, mask, unassigned_cpus);
nvme_set_queue_cpus(mask, nvmeq, cpus_per_queue);
}
}
static void nvme_create_io_queues(struct nvme_dev *dev)
{
unsigned i, max;
max = min(dev->max_qid, num_online_cpus());
for (i = dev->queue_count; i <= max; i++)
if (!nvme_alloc_queue(dev, i, dev->q_depth, i - 1))
break;
max = min(dev->queue_count - 1, num_online_cpus());
for (i = dev->online_queues; i <= max; i++)
if (nvme_create_queue(raw_nvmeq(dev, i), i))
break;
}
/*
* If there are fewer queues than online cpus, this will try to optimally
* assign a queue to multiple cpus by grouping cpus that are "close" together:
* thread siblings, core, socket, closest node, then whatever else is
* available.
*/
static void nvme_assign_io_queues(struct nvme_dev *dev)
{
unsigned cpu, cpus_per_queue, queues, remainder, i;
cpumask_var_t unassigned_cpus;
nvme_create_io_queues(dev);
queues = min(dev->online_queues - 1, num_online_cpus());
if (!queues)
return;
cpus_per_queue = num_online_cpus() / queues;
remainder = queues - (num_online_cpus() - queues * cpus_per_queue);
if (!alloc_cpumask_var(&unassigned_cpus, GFP_KERNEL))
return;
cpumask_copy(unassigned_cpus, cpu_online_mask);
cpu = cpumask_first(unassigned_cpus);
for (i = 1; i <= queues; i++) {
struct nvme_queue *nvmeq = lock_nvmeq(dev, i);
cpumask_t mask;
cpumask_clear(nvmeq->cpu_mask);
if (!cpumask_weight(unassigned_cpus)) {
unlock_nvmeq(nvmeq);
break;
}
mask = *get_cpu_mask(cpu);
nvme_set_queue_cpus(&mask, nvmeq, cpus_per_queue);
if (cpus_weight(mask) < cpus_per_queue)
nvme_add_cpus(&mask, unassigned_cpus,
topology_thread_cpumask(cpu),
nvmeq, cpus_per_queue);
if (cpus_weight(mask) < cpus_per_queue)
nvme_add_cpus(&mask, unassigned_cpus,
topology_core_cpumask(cpu),
nvmeq, cpus_per_queue);
if (cpus_weight(mask) < cpus_per_queue)
nvme_add_cpus(&mask, unassigned_cpus,
cpumask_of_node(cpu_to_node(cpu)),
nvmeq, cpus_per_queue);
if (cpus_weight(mask) < cpus_per_queue)
nvme_add_cpus(&mask, unassigned_cpus,
cpumask_of_node(
nvme_find_closest_node(
cpu_to_node(cpu))),
nvmeq, cpus_per_queue);
if (cpus_weight(mask) < cpus_per_queue)
nvme_add_cpus(&mask, unassigned_cpus,
unassigned_cpus,
nvmeq, cpus_per_queue);
WARN(cpumask_weight(nvmeq->cpu_mask) != cpus_per_queue,
"nvme%d qid:%d mis-matched queue-to-cpu assignment\n",
dev->instance, i);
irq_set_affinity_hint(dev->entry[nvmeq->cq_vector].vector,
nvmeq->cpu_mask);
cpumask_andnot(unassigned_cpus, unassigned_cpus,
nvmeq->cpu_mask);
cpu = cpumask_next(cpu, unassigned_cpus);
if (remainder && !--remainder)
cpus_per_queue++;
unlock_nvmeq(nvmeq);
}
WARN(cpumask_weight(unassigned_cpus), "nvme%d unassigned online cpus\n",
dev->instance);
i = 0;
cpumask_andnot(unassigned_cpus, cpu_possible_mask, cpu_online_mask);
for_each_cpu(cpu, unassigned_cpus)
*per_cpu_ptr(dev->io_queue, cpu) = (i++ % queues) + 1;
free_cpumask_var(unassigned_cpus);
}
static int set_queue_count(struct nvme_dev *dev, int count) static int set_queue_count(struct nvme_dev *dev, int count)
{ {
int status; int status;
...@@ -1805,13 +2072,26 @@ static size_t db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues) ...@@ -1805,13 +2072,26 @@ static size_t db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues)
return 4096 + ((nr_io_queues + 1) * 8 * dev->db_stride); return 4096 + ((nr_io_queues + 1) * 8 * dev->db_stride);
} }
static int nvme_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
struct nvme_dev *dev = container_of(self, struct nvme_dev, nb);
switch (action) {
case CPU_ONLINE:
case CPU_DEAD:
nvme_assign_io_queues(dev);
break;
}
return NOTIFY_OK;
}
static int nvme_setup_io_queues(struct nvme_dev *dev) static int nvme_setup_io_queues(struct nvme_dev *dev)
{ {
struct nvme_queue *adminq = dev->queues[0]; struct nvme_queue *adminq = raw_nvmeq(dev, 0);
struct pci_dev *pdev = dev->pci_dev; struct pci_dev *pdev = dev->pci_dev;
int result, cpu, i, vecs, nr_io_queues, size, q_depth; int result, i, vecs, nr_io_queues, size;
nr_io_queues = num_online_cpus(); nr_io_queues = num_possible_cpus();
result = set_queue_count(dev, nr_io_queues); result = set_queue_count(dev, nr_io_queues);
if (result < 0) if (result < 0)
return result; return result;
...@@ -1830,7 +2110,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) ...@@ -1830,7 +2110,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
size = db_bar_size(dev, nr_io_queues); size = db_bar_size(dev, nr_io_queues);
} while (1); } while (1);
dev->dbs = ((void __iomem *)dev->bar) + 4096; dev->dbs = ((void __iomem *)dev->bar) + 4096;
dev->queues[0]->q_db = dev->dbs; adminq->q_db = dev->dbs;
} }
/* Deregister the admin queue's interrupt */ /* Deregister the admin queue's interrupt */
...@@ -1856,6 +2136,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) ...@@ -1856,6 +2136,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
* number of interrupts. * number of interrupts.
*/ */
nr_io_queues = vecs; nr_io_queues = vecs;
dev->max_qid = nr_io_queues;
result = queue_request_irq(dev, adminq, adminq->irqname); result = queue_request_irq(dev, adminq, adminq->irqname);
if (result) { if (result) {
...@@ -1864,49 +2145,13 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) ...@@ -1864,49 +2145,13 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
} }
/* Free previously allocated queues that are no longer usable */ /* Free previously allocated queues that are no longer usable */
spin_lock(&dev_list_lock); nvme_free_queues(dev, nr_io_queues + 1);
for (i = dev->queue_count - 1; i > nr_io_queues; i--) { nvme_assign_io_queues(dev);
struct nvme_queue *nvmeq = dev->queues[i];
spin_lock_irq(&nvmeq->q_lock);
nvme_cancel_ios(nvmeq, false);
spin_unlock_irq(&nvmeq->q_lock);
nvme_free_queue(nvmeq);
dev->queue_count--;
dev->queues[i] = NULL;
}
spin_unlock(&dev_list_lock);
cpu = cpumask_first(cpu_online_mask);
for (i = 0; i < nr_io_queues; i++) {
irq_set_affinity_hint(dev->entry[i].vector, get_cpu_mask(cpu));
cpu = cpumask_next(cpu, cpu_online_mask);
}
q_depth = min_t(int, NVME_CAP_MQES(readq(&dev->bar->cap)) + 1,
NVME_Q_DEPTH);
for (i = dev->queue_count - 1; i < nr_io_queues; i++) {
dev->queues[i + 1] = nvme_alloc_queue(dev, i + 1, q_depth, i);
if (!dev->queues[i + 1]) {
result = -ENOMEM;
goto free_queues;
}
}
for (; i < num_possible_cpus(); i++) { dev->nb.notifier_call = &nvme_cpu_notify;
int target = i % rounddown_pow_of_two(dev->queue_count - 1); result = register_hotcpu_notifier(&dev->nb);
dev->queues[i + 1] = dev->queues[target + 1]; if (result)
}
for (i = 1; i < dev->queue_count; i++) {
result = nvme_create_queue(dev->queues[i], i);
if (result) {
for (--i; i > 0; i--)
nvme_disable_queue(dev, i);
goto free_queues; goto free_queues;
}
}
return 0; return 0;
...@@ -1985,6 +2230,7 @@ static int nvme_dev_add(struct nvme_dev *dev) ...@@ -1985,6 +2230,7 @@ static int nvme_dev_add(struct nvme_dev *dev)
static int nvme_dev_map(struct nvme_dev *dev) static int nvme_dev_map(struct nvme_dev *dev)
{ {
u64 cap;
int bars, result = -ENOMEM; int bars, result = -ENOMEM;
struct pci_dev *pdev = dev->pci_dev; struct pci_dev *pdev = dev->pci_dev;
...@@ -2008,7 +2254,9 @@ static int nvme_dev_map(struct nvme_dev *dev) ...@@ -2008,7 +2254,9 @@ static int nvme_dev_map(struct nvme_dev *dev)
result = -ENODEV; result = -ENODEV;
goto unmap; goto unmap;
} }
dev->db_stride = 1 << NVME_CAP_STRIDE(readq(&dev->bar->cap)); cap = readq(&dev->bar->cap);
dev->q_depth = min_t(int, NVME_CAP_MQES(cap) + 1, NVME_Q_DEPTH);
dev->db_stride = 1 << NVME_CAP_STRIDE(cap);
dev->dbs = ((void __iomem *)dev->bar) + 4096; dev->dbs = ((void __iomem *)dev->bar) + 4096;
return 0; return 0;
...@@ -2164,7 +2412,7 @@ static void nvme_disable_io_queues(struct nvme_dev *dev) ...@@ -2164,7 +2412,7 @@ static void nvme_disable_io_queues(struct nvme_dev *dev)
atomic_set(&dq.refcount, 0); atomic_set(&dq.refcount, 0);
dq.worker = &worker; dq.worker = &worker;
for (i = dev->queue_count - 1; i > 0; i--) { for (i = dev->queue_count - 1; i > 0; i--) {
struct nvme_queue *nvmeq = dev->queues[i]; struct nvme_queue *nvmeq = raw_nvmeq(dev, i);
if (nvme_suspend_queue(nvmeq)) if (nvme_suspend_queue(nvmeq))
continue; continue;
...@@ -2177,19 +2425,38 @@ static void nvme_disable_io_queues(struct nvme_dev *dev) ...@@ -2177,19 +2425,38 @@ static void nvme_disable_io_queues(struct nvme_dev *dev)
kthread_stop(kworker_task); kthread_stop(kworker_task);
} }
/*
* Remove the node from the device list and check
* for whether or not we need to stop the nvme_thread.
*/
static void nvme_dev_list_remove(struct nvme_dev *dev)
{
struct task_struct *tmp = NULL;
spin_lock(&dev_list_lock);
list_del_init(&dev->node);
if (list_empty(&dev_list) && !IS_ERR_OR_NULL(nvme_thread)) {
tmp = nvme_thread;
nvme_thread = NULL;
}
spin_unlock(&dev_list_lock);
if (tmp)
kthread_stop(tmp);
}
static void nvme_dev_shutdown(struct nvme_dev *dev) static void nvme_dev_shutdown(struct nvme_dev *dev)
{ {
int i; int i;
dev->initialized = 0; dev->initialized = 0;
unregister_hotcpu_notifier(&dev->nb);
spin_lock(&dev_list_lock); nvme_dev_list_remove(dev);
list_del_init(&dev->node);
spin_unlock(&dev_list_lock);
if (!dev->bar || (dev->bar && readl(&dev->bar->csts) == -1)) { if (!dev->bar || (dev->bar && readl(&dev->bar->csts) == -1)) {
for (i = dev->queue_count - 1; i >= 0; i--) { for (i = dev->queue_count - 1; i >= 0; i--) {
struct nvme_queue *nvmeq = dev->queues[i]; struct nvme_queue *nvmeq = raw_nvmeq(dev, i);
nvme_suspend_queue(nvmeq); nvme_suspend_queue(nvmeq);
nvme_clear_queue(nvmeq); nvme_clear_queue(nvmeq);
} }
...@@ -2282,6 +2549,7 @@ static void nvme_free_dev(struct kref *kref) ...@@ -2282,6 +2549,7 @@ static void nvme_free_dev(struct kref *kref)
struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref); struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref);
nvme_free_namespaces(dev); nvme_free_namespaces(dev);
free_percpu(dev->io_queue);
kfree(dev->queues); kfree(dev->queues);
kfree(dev->entry); kfree(dev->entry);
kfree(dev); kfree(dev);
...@@ -2325,6 +2593,7 @@ static const struct file_operations nvme_dev_fops = { ...@@ -2325,6 +2593,7 @@ static const struct file_operations nvme_dev_fops = {
static int nvme_dev_start(struct nvme_dev *dev) static int nvme_dev_start(struct nvme_dev *dev)
{ {
int result; int result;
bool start_thread = false;
result = nvme_dev_map(dev); result = nvme_dev_map(dev);
if (result) if (result)
...@@ -2335,9 +2604,24 @@ static int nvme_dev_start(struct nvme_dev *dev) ...@@ -2335,9 +2604,24 @@ static int nvme_dev_start(struct nvme_dev *dev)
goto unmap; goto unmap;
spin_lock(&dev_list_lock); spin_lock(&dev_list_lock);
if (list_empty(&dev_list) && IS_ERR_OR_NULL(nvme_thread)) {
start_thread = true;
nvme_thread = NULL;
}
list_add(&dev->node, &dev_list); list_add(&dev->node, &dev_list);
spin_unlock(&dev_list_lock); spin_unlock(&dev_list_lock);
if (start_thread) {
nvme_thread = kthread_run(nvme_kthread, NULL, "nvme");
wake_up(&nvme_kthread_wait);
} else
wait_event_killable(nvme_kthread_wait, nvme_thread);
if (IS_ERR_OR_NULL(nvme_thread)) {
result = nvme_thread ? PTR_ERR(nvme_thread) : -EINTR;
goto disable;
}
result = nvme_setup_io_queues(dev); result = nvme_setup_io_queues(dev);
if (result && result != -EBUSY) if (result && result != -EBUSY)
goto disable; goto disable;
...@@ -2346,9 +2630,7 @@ static int nvme_dev_start(struct nvme_dev *dev) ...@@ -2346,9 +2630,7 @@ static int nvme_dev_start(struct nvme_dev *dev)
disable: disable:
nvme_disable_queue(dev, 0); nvme_disable_queue(dev, 0);
spin_lock(&dev_list_lock); nvme_dev_list_remove(dev);
list_del_init(&dev->node);
spin_unlock(&dev_list_lock);
unmap: unmap:
nvme_dev_unmap(dev); nvme_dev_unmap(dev);
return result; return result;
...@@ -2367,18 +2649,10 @@ static int nvme_remove_dead_ctrl(void *arg) ...@@ -2367,18 +2649,10 @@ static int nvme_remove_dead_ctrl(void *arg)
static void nvme_remove_disks(struct work_struct *ws) static void nvme_remove_disks(struct work_struct *ws)
{ {
int i;
struct nvme_dev *dev = container_of(ws, struct nvme_dev, reset_work); struct nvme_dev *dev = container_of(ws, struct nvme_dev, reset_work);
nvme_dev_remove(dev); nvme_dev_remove(dev);
spin_lock(&dev_list_lock); nvme_free_queues(dev, 1);
for (i = dev->queue_count - 1; i > 0; i--) {
BUG_ON(!dev->queues[i] || !dev->queues[i]->q_suspended);
nvme_free_queue(dev->queues[i]);
dev->queue_count--;
dev->queues[i] = NULL;
}
spin_unlock(&dev_list_lock);
} }
static int nvme_dev_resume(struct nvme_dev *dev) static int nvme_dev_resume(struct nvme_dev *dev)
...@@ -2441,6 +2715,9 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -2441,6 +2715,9 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
GFP_KERNEL); GFP_KERNEL);
if (!dev->queues) if (!dev->queues)
goto free; goto free;
dev->io_queue = alloc_percpu(unsigned short);
if (!dev->io_queue)
goto free;
INIT_LIST_HEAD(&dev->namespaces); INIT_LIST_HEAD(&dev->namespaces);
dev->reset_workfn = nvme_reset_failed_dev; dev->reset_workfn = nvme_reset_failed_dev;
...@@ -2455,6 +2732,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -2455,6 +2732,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (result) if (result)
goto release; goto release;
kref_init(&dev->kref);
result = nvme_dev_start(dev); result = nvme_dev_start(dev);
if (result) { if (result) {
if (result == -EBUSY) if (result == -EBUSY)
...@@ -2462,7 +2740,6 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -2462,7 +2740,6 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto release_pools; goto release_pools;
} }
kref_init(&dev->kref);
result = nvme_dev_add(dev); result = nvme_dev_add(dev);
if (result) if (result)
goto shutdown; goto shutdown;
...@@ -2491,6 +2768,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -2491,6 +2768,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
release: release:
nvme_release_instance(dev); nvme_release_instance(dev);
free: free:
free_percpu(dev->io_queue);
kfree(dev->queues); kfree(dev->queues);
kfree(dev->entry); kfree(dev->entry);
kfree(dev); kfree(dev);
...@@ -2517,6 +2795,7 @@ static void nvme_remove(struct pci_dev *pdev) ...@@ -2517,6 +2795,7 @@ static void nvme_remove(struct pci_dev *pdev)
nvme_dev_remove(dev); nvme_dev_remove(dev);
nvme_dev_shutdown(dev); nvme_dev_shutdown(dev);
nvme_free_queues(dev, 0); nvme_free_queues(dev, 0);
rcu_barrier();
nvme_release_instance(dev); nvme_release_instance(dev);
nvme_release_prp_pools(dev); nvme_release_prp_pools(dev);
kref_put(&dev->kref, nvme_free_dev); kref_put(&dev->kref, nvme_free_dev);
...@@ -2529,6 +2808,7 @@ static void nvme_remove(struct pci_dev *pdev) ...@@ -2529,6 +2808,7 @@ static void nvme_remove(struct pci_dev *pdev)
#define nvme_slot_reset NULL #define nvme_slot_reset NULL
#define nvme_error_resume NULL #define nvme_error_resume NULL
#ifdef CONFIG_PM_SLEEP
static int nvme_suspend(struct device *dev) static int nvme_suspend(struct device *dev)
{ {
struct pci_dev *pdev = to_pci_dev(dev); struct pci_dev *pdev = to_pci_dev(dev);
...@@ -2549,6 +2829,7 @@ static int nvme_resume(struct device *dev) ...@@ -2549,6 +2829,7 @@ static int nvme_resume(struct device *dev)
} }
return 0; return 0;
} }
#endif
static SIMPLE_DEV_PM_OPS(nvme_dev_pm_ops, nvme_suspend, nvme_resume); static SIMPLE_DEV_PM_OPS(nvme_dev_pm_ops, nvme_suspend, nvme_resume);
...@@ -2563,7 +2844,7 @@ static const struct pci_error_handlers nvme_err_handler = { ...@@ -2563,7 +2844,7 @@ static const struct pci_error_handlers nvme_err_handler = {
/* Move to pci_ids.h later */ /* Move to pci_ids.h later */
#define PCI_CLASS_STORAGE_EXPRESS 0x010802 #define PCI_CLASS_STORAGE_EXPRESS 0x010802
static DEFINE_PCI_DEVICE_TABLE(nvme_id_table) = { static const struct pci_device_id nvme_id_table[] = {
{ PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
{ 0, } { 0, }
}; };
...@@ -2585,14 +2866,11 @@ static int __init nvme_init(void) ...@@ -2585,14 +2866,11 @@ static int __init nvme_init(void)
{ {
int result; int result;
nvme_thread = kthread_run(nvme_kthread, NULL, "nvme"); init_waitqueue_head(&nvme_kthread_wait);
if (IS_ERR(nvme_thread))
return PTR_ERR(nvme_thread);
result = -ENOMEM;
nvme_workq = create_singlethread_workqueue("nvme"); nvme_workq = create_singlethread_workqueue("nvme");
if (!nvme_workq) if (!nvme_workq)
goto kill_kthread; return -ENOMEM;
result = register_blkdev(nvme_major, "nvme"); result = register_blkdev(nvme_major, "nvme");
if (result < 0) if (result < 0)
...@@ -2609,8 +2887,6 @@ static int __init nvme_init(void) ...@@ -2609,8 +2887,6 @@ static int __init nvme_init(void)
unregister_blkdev(nvme_major, "nvme"); unregister_blkdev(nvme_major, "nvme");
kill_workq: kill_workq:
destroy_workqueue(nvme_workq); destroy_workqueue(nvme_workq);
kill_kthread:
kthread_stop(nvme_thread);
return result; return result;
} }
...@@ -2619,11 +2895,11 @@ static void __exit nvme_exit(void) ...@@ -2619,11 +2895,11 @@ static void __exit nvme_exit(void)
pci_unregister_driver(&nvme_driver); pci_unregister_driver(&nvme_driver);
unregister_blkdev(nvme_major, "nvme"); unregister_blkdev(nvme_major, "nvme");
destroy_workqueue(nvme_workq); destroy_workqueue(nvme_workq);
kthread_stop(nvme_thread); BUG_ON(nvme_thread && !IS_ERR(nvme_thread));
} }
MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>"); MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
MODULE_VERSION("0.8"); MODULE_VERSION("0.9");
module_init(nvme_init); module_init(nvme_init);
module_exit(nvme_exit); module_exit(nvme_exit);
...@@ -1562,13 +1562,14 @@ static int nvme_trans_send_fw_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr, ...@@ -1562,13 +1562,14 @@ static int nvme_trans_send_fw_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr,
res = PTR_ERR(iod); res = PTR_ERR(iod);
goto out; goto out;
} }
length = nvme_setup_prps(dev, &c.common, iod, tot_len, length = nvme_setup_prps(dev, iod, tot_len, GFP_KERNEL);
GFP_KERNEL);
if (length != tot_len) { if (length != tot_len) {
res = -ENOMEM; res = -ENOMEM;
goto out_unmap; goto out_unmap;
} }
c.dlfw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
c.dlfw.prp2 = cpu_to_le64(iod->first_dma);
c.dlfw.numd = cpu_to_le32((tot_len/BYTES_TO_DWORDS) - 1); c.dlfw.numd = cpu_to_le32((tot_len/BYTES_TO_DWORDS) - 1);
c.dlfw.offset = cpu_to_le32(offset/BYTES_TO_DWORDS); c.dlfw.offset = cpu_to_le32(offset/BYTES_TO_DWORDS);
} else if (opcode == nvme_admin_activate_fw) { } else if (opcode == nvme_admin_activate_fw) {
...@@ -2033,7 +2034,6 @@ static int nvme_trans_do_nvme_io(struct nvme_ns *ns, struct sg_io_hdr *hdr, ...@@ -2033,7 +2034,6 @@ static int nvme_trans_do_nvme_io(struct nvme_ns *ns, struct sg_io_hdr *hdr,
int res = SNTI_TRANSLATION_SUCCESS; int res = SNTI_TRANSLATION_SUCCESS;
int nvme_sc; int nvme_sc;
struct nvme_dev *dev = ns->dev; struct nvme_dev *dev = ns->dev;
struct nvme_queue *nvmeq;
u32 num_cmds; u32 num_cmds;
struct nvme_iod *iod; struct nvme_iod *iod;
u64 unit_len; u64 unit_len;
...@@ -2045,7 +2045,7 @@ static int nvme_trans_do_nvme_io(struct nvme_ns *ns, struct sg_io_hdr *hdr, ...@@ -2045,7 +2045,7 @@ static int nvme_trans_do_nvme_io(struct nvme_ns *ns, struct sg_io_hdr *hdr,
struct nvme_command c; struct nvme_command c;
u8 opcode = (is_write ? nvme_cmd_write : nvme_cmd_read); u8 opcode = (is_write ? nvme_cmd_write : nvme_cmd_read);
u16 control; u16 control;
u32 max_blocks = nvme_block_nr(ns, dev->max_hw_sectors); u32 max_blocks = queue_max_hw_sectors(ns->queue);
num_cmds = nvme_trans_io_get_num_cmds(hdr, cdb_info, max_blocks); num_cmds = nvme_trans_io_get_num_cmds(hdr, cdb_info, max_blocks);
...@@ -2093,8 +2093,7 @@ static int nvme_trans_do_nvme_io(struct nvme_ns *ns, struct sg_io_hdr *hdr, ...@@ -2093,8 +2093,7 @@ static int nvme_trans_do_nvme_io(struct nvme_ns *ns, struct sg_io_hdr *hdr,
res = PTR_ERR(iod); res = PTR_ERR(iod);
goto out; goto out;
} }
retcode = nvme_setup_prps(dev, &c.common, iod, unit_len, retcode = nvme_setup_prps(dev, iod, unit_len, GFP_KERNEL);
GFP_KERNEL);
if (retcode != unit_len) { if (retcode != unit_len) {
nvme_unmap_user_pages(dev, nvme_unmap_user_pages(dev,
(is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE, (is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
...@@ -2103,21 +2102,12 @@ static int nvme_trans_do_nvme_io(struct nvme_ns *ns, struct sg_io_hdr *hdr, ...@@ -2103,21 +2102,12 @@ static int nvme_trans_do_nvme_io(struct nvme_ns *ns, struct sg_io_hdr *hdr,
res = -ENOMEM; res = -ENOMEM;
goto out; goto out;
} }
c.rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
c.rw.prp2 = cpu_to_le64(iod->first_dma);
nvme_offset += unit_num_blocks; nvme_offset += unit_num_blocks;
nvmeq = get_nvmeq(dev); nvme_sc = nvme_submit_io_cmd(dev, &c, NULL);
/*
* Since nvme_submit_sync_cmd sleeps, we can't keep
* preemption disabled. We may be preempted at any
* point, and be rescheduled to a different CPU. That
* will cause cacheline bouncing, but no additional
* races since q_lock already protects against other
* CPUs.
*/
put_nvmeq(nvmeq);
nvme_sc = nvme_submit_sync_cmd(nvmeq, &c, NULL,
NVME_IO_TIMEOUT);
if (nvme_sc != NVME_SC_SUCCESS) { if (nvme_sc != NVME_SC_SUCCESS) {
nvme_unmap_user_pages(dev, nvme_unmap_user_pages(dev,
(is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE, (is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
...@@ -2644,7 +2634,6 @@ static int nvme_trans_start_stop(struct nvme_ns *ns, struct sg_io_hdr *hdr, ...@@ -2644,7 +2634,6 @@ static int nvme_trans_start_stop(struct nvme_ns *ns, struct sg_io_hdr *hdr,
{ {
int res = SNTI_TRANSLATION_SUCCESS; int res = SNTI_TRANSLATION_SUCCESS;
int nvme_sc; int nvme_sc;
struct nvme_queue *nvmeq;
struct nvme_command c; struct nvme_command c;
u8 immed, pcmod, pc, no_flush, start; u8 immed, pcmod, pc, no_flush, start;
...@@ -2671,10 +2660,7 @@ static int nvme_trans_start_stop(struct nvme_ns *ns, struct sg_io_hdr *hdr, ...@@ -2671,10 +2660,7 @@ static int nvme_trans_start_stop(struct nvme_ns *ns, struct sg_io_hdr *hdr,
c.common.opcode = nvme_cmd_flush; c.common.opcode = nvme_cmd_flush;
c.common.nsid = cpu_to_le32(ns->ns_id); c.common.nsid = cpu_to_le32(ns->ns_id);
nvmeq = get_nvmeq(ns->dev); nvme_sc = nvme_submit_io_cmd(ns->dev, &c, NULL);
put_nvmeq(nvmeq);
nvme_sc = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT);
res = nvme_trans_status_code(hdr, nvme_sc); res = nvme_trans_status_code(hdr, nvme_sc);
if (res) if (res)
goto out; goto out;
...@@ -2697,15 +2683,12 @@ static int nvme_trans_synchronize_cache(struct nvme_ns *ns, ...@@ -2697,15 +2683,12 @@ static int nvme_trans_synchronize_cache(struct nvme_ns *ns,
int res = SNTI_TRANSLATION_SUCCESS; int res = SNTI_TRANSLATION_SUCCESS;
int nvme_sc; int nvme_sc;
struct nvme_command c; struct nvme_command c;
struct nvme_queue *nvmeq;
memset(&c, 0, sizeof(c)); memset(&c, 0, sizeof(c));
c.common.opcode = nvme_cmd_flush; c.common.opcode = nvme_cmd_flush;
c.common.nsid = cpu_to_le32(ns->ns_id); c.common.nsid = cpu_to_le32(ns->ns_id);
nvmeq = get_nvmeq(ns->dev); nvme_sc = nvme_submit_io_cmd(ns->dev, &c, NULL);
put_nvmeq(nvmeq);
nvme_sc = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT);
res = nvme_trans_status_code(hdr, nvme_sc); res = nvme_trans_status_code(hdr, nvme_sc);
if (res) if (res)
...@@ -2872,7 +2855,6 @@ static int nvme_trans_unmap(struct nvme_ns *ns, struct sg_io_hdr *hdr, ...@@ -2872,7 +2855,6 @@ static int nvme_trans_unmap(struct nvme_ns *ns, struct sg_io_hdr *hdr,
struct nvme_dev *dev = ns->dev; struct nvme_dev *dev = ns->dev;
struct scsi_unmap_parm_list *plist; struct scsi_unmap_parm_list *plist;
struct nvme_dsm_range *range; struct nvme_dsm_range *range;
struct nvme_queue *nvmeq;
struct nvme_command c; struct nvme_command c;
int i, nvme_sc, res = -ENOMEM; int i, nvme_sc, res = -ENOMEM;
u16 ndesc, list_len; u16 ndesc, list_len;
...@@ -2914,10 +2896,7 @@ static int nvme_trans_unmap(struct nvme_ns *ns, struct sg_io_hdr *hdr, ...@@ -2914,10 +2896,7 @@ static int nvme_trans_unmap(struct nvme_ns *ns, struct sg_io_hdr *hdr,
c.dsm.nr = cpu_to_le32(ndesc - 1); c.dsm.nr = cpu_to_le32(ndesc - 1);
c.dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD); c.dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
nvmeq = get_nvmeq(dev); nvme_sc = nvme_submit_io_cmd(dev, &c, NULL);
put_nvmeq(nvmeq);
nvme_sc = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT);
res = nvme_trans_status_code(hdr, nvme_sc); res = nvme_trans_status_code(hdr, nvme_sc);
dma_free_coherent(&dev->pci_dev->dev, ndesc * sizeof(*range), dma_free_coherent(&dev->pci_dev->dev, ndesc * sizeof(*range),
......
...@@ -66,20 +66,25 @@ enum { ...@@ -66,20 +66,25 @@ enum {
#define NVME_VS(major, minor) (major << 16 | minor) #define NVME_VS(major, minor) (major << 16 | minor)
#define NVME_IO_TIMEOUT (5 * HZ) extern unsigned char io_timeout;
#define NVME_IO_TIMEOUT (io_timeout * HZ)
/* /*
* Represents an NVM Express device. Each nvme_dev is a PCI function. * Represents an NVM Express device. Each nvme_dev is a PCI function.
*/ */
struct nvme_dev { struct nvme_dev {
struct list_head node; struct list_head node;
struct nvme_queue **queues; struct nvme_queue __rcu **queues;
unsigned short __percpu *io_queue;
u32 __iomem *dbs; u32 __iomem *dbs;
struct pci_dev *pci_dev; struct pci_dev *pci_dev;
struct dma_pool *prp_page_pool; struct dma_pool *prp_page_pool;
struct dma_pool *prp_small_pool; struct dma_pool *prp_small_pool;
int instance; int instance;
int queue_count; unsigned queue_count;
unsigned online_queues;
unsigned max_qid;
int q_depth;
u32 db_stride; u32 db_stride;
u32 ctrl_config; u32 ctrl_config;
struct msix_entry *entry; struct msix_entry *entry;
...@@ -89,6 +94,7 @@ struct nvme_dev { ...@@ -89,6 +94,7 @@ struct nvme_dev {
struct miscdevice miscdev; struct miscdevice miscdev;
work_func_t reset_workfn; work_func_t reset_workfn;
struct work_struct reset_work; struct work_struct reset_work;
struct notifier_block nb;
char name[12]; char name[12];
char serial[20]; char serial[20];
char model[40]; char model[40];
...@@ -131,6 +137,7 @@ struct nvme_iod { ...@@ -131,6 +137,7 @@ struct nvme_iod {
int length; /* Of data, in bytes */ int length; /* Of data, in bytes */
unsigned long start_time; unsigned long start_time;
dma_addr_t first_dma; dma_addr_t first_dma;
struct list_head node;
struct scatterlist sg[0]; struct scatterlist sg[0];
}; };
...@@ -146,16 +153,12 @@ static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector) ...@@ -146,16 +153,12 @@ static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector)
*/ */
void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod); void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod);
int nvme_setup_prps(struct nvme_dev *dev, struct nvme_common_command *cmd, int nvme_setup_prps(struct nvme_dev *, struct nvme_iod *, int , gfp_t);
struct nvme_iod *iod, int total_len, gfp_t gfp);
struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write, struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
unsigned long addr, unsigned length); unsigned long addr, unsigned length);
void nvme_unmap_user_pages(struct nvme_dev *dev, int write, void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
struct nvme_iod *iod); struct nvme_iod *iod);
struct nvme_queue *get_nvmeq(struct nvme_dev *dev); int nvme_submit_io_cmd(struct nvme_dev *, struct nvme_command *, u32 *);
void put_nvmeq(struct nvme_queue *nvmeq);
int nvme_submit_sync_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd,
u32 *result, unsigned timeout);
int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns); int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns);
int nvme_submit_admin_cmd(struct nvme_dev *, struct nvme_command *, int nvme_submit_admin_cmd(struct nvme_dev *, struct nvme_command *,
u32 *result); u32 *result);
......
...@@ -434,6 +434,7 @@ enum { ...@@ -434,6 +434,7 @@ enum {
NVME_SC_REFTAG_CHECK = 0x284, NVME_SC_REFTAG_CHECK = 0x284,
NVME_SC_COMPARE_FAILED = 0x285, NVME_SC_COMPARE_FAILED = 0x285,
NVME_SC_ACCESS_DENIED = 0x286, NVME_SC_ACCESS_DENIED = 0x286,
NVME_SC_DNR = 0x4000,
}; };
struct nvme_completion { struct nvme_completion {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment