Commit 3e1e21c7 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-4.5/nvme' of git://git.kernel.dk/linux-block

Pull NVMe updates from Jens Axboe:
 "Last branch for this series is the nvme changes.  It's in a separate
  branch to avoid splitting too much between core and NVMe changes,
  since NVMe is still helping drive some blk-mq changes.  That said, not
  a huge amount of core changes in here.  The grunt of the work is the
  continued split of the code"

* 'for-4.5/nvme' of git://git.kernel.dk/linux-block: (67 commits)
  uapi: update install list after nvme.h rename
  NVMe: Export NVMe attributes to sysfs group
  NVMe: Shutdown controller only for power-off
  NVMe: IO queue deletion re-write
  NVMe: Remove queue freezing on resets
  NVMe: Use a retryable error code on reset
  NVMe: Fix admin queue ring wrap
  nvme: make SG_IO support optional
  nvme: fixes for NVME_IOCTL_IO_CMD on the char device
  nvme: synchronize access to ctrl->namespaces
  nvme: Move nvme_freeze/unfreeze_queues to nvme core
  PCI/AER: include header file
  NVMe: Export namespace attributes to sysfs
  NVMe: Add pci error handlers
  block: remove REQ_NO_TIMEOUT flag
  nvme: merge iod and cmd_info
  nvme: meta_sg doesn't have to be an array
  nvme: properly free resources for cancelled command
  nvme: simplify completion handling
  nvme: special case AEN requests
  ...
parents 0a13daed a9cf8284
...@@ -66,7 +66,7 @@ struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio, ...@@ -66,7 +66,7 @@ struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,
} }
if (unlikely(!bip)) if (unlikely(!bip))
return NULL; return ERR_PTR(-ENOMEM);
memset(bip, 0, sizeof(*bip)); memset(bip, 0, sizeof(*bip));
...@@ -89,7 +89,7 @@ struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio, ...@@ -89,7 +89,7 @@ struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,
return bip; return bip;
err: err:
mempool_free(bip, bs->bio_integrity_pool); mempool_free(bip, bs->bio_integrity_pool);
return NULL; return ERR_PTR(-ENOMEM);
} }
EXPORT_SYMBOL(bio_integrity_alloc); EXPORT_SYMBOL(bio_integrity_alloc);
...@@ -298,10 +298,10 @@ int bio_integrity_prep(struct bio *bio) ...@@ -298,10 +298,10 @@ int bio_integrity_prep(struct bio *bio)
/* Allocate bio integrity payload and integrity vectors */ /* Allocate bio integrity payload and integrity vectors */
bip = bio_integrity_alloc(bio, GFP_NOIO, nr_pages); bip = bio_integrity_alloc(bio, GFP_NOIO, nr_pages);
if (unlikely(bip == NULL)) { if (IS_ERR(bip)) {
printk(KERN_ERR "could not allocate data integrity bioset\n"); printk(KERN_ERR "could not allocate data integrity bioset\n");
kfree(buf); kfree(buf);
return -EIO; return PTR_ERR(bip);
} }
bip->bip_flags |= BIP_BLOCK_INTEGRITY; bip->bip_flags |= BIP_BLOCK_INTEGRITY;
...@@ -465,9 +465,8 @@ int bio_integrity_clone(struct bio *bio, struct bio *bio_src, ...@@ -465,9 +465,8 @@ int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
BUG_ON(bip_src == NULL); BUG_ON(bip_src == NULL);
bip = bio_integrity_alloc(bio, gfp_mask, bip_src->bip_vcnt); bip = bio_integrity_alloc(bio, gfp_mask, bip_src->bip_vcnt);
if (IS_ERR(bip))
if (bip == NULL) return PTR_ERR(bip);
return -EIO;
memcpy(bip->bip_vec, bip_src->bip_vec, memcpy(bip->bip_vec, bip_src->bip_vec,
bip_src->bip_vcnt * sizeof(struct bio_vec)); bip_src->bip_vcnt * sizeof(struct bio_vec));
......
...@@ -680,6 +680,13 @@ static void blk_queue_usage_counter_release(struct percpu_ref *ref) ...@@ -680,6 +680,13 @@ static void blk_queue_usage_counter_release(struct percpu_ref *ref)
wake_up_all(&q->mq_freeze_wq); wake_up_all(&q->mq_freeze_wq);
} }
static void blk_rq_timed_out_timer(unsigned long data)
{
struct request_queue *q = (struct request_queue *)data;
kblockd_schedule_work(&q->timeout_work);
}
struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
{ {
struct request_queue *q; struct request_queue *q;
...@@ -841,6 +848,7 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn, ...@@ -841,6 +848,7 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
if (blk_init_rl(&q->root_rl, q, GFP_KERNEL)) if (blk_init_rl(&q->root_rl, q, GFP_KERNEL))
goto fail; goto fail;
INIT_WORK(&q->timeout_work, blk_timeout_work);
q->request_fn = rfn; q->request_fn = rfn;
q->prep_rq_fn = NULL; q->prep_rq_fn = NULL;
q->unprep_rq_fn = NULL; q->unprep_rq_fn = NULL;
......
...@@ -603,8 +603,6 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx, ...@@ -603,8 +603,6 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
blk_mq_complete_request(rq, -EIO); blk_mq_complete_request(rq, -EIO);
return; return;
} }
if (rq->cmd_flags & REQ_NO_TIMEOUT)
return;
if (time_after_eq(jiffies, rq->deadline)) { if (time_after_eq(jiffies, rq->deadline)) {
if (!blk_mark_rq_complete(rq)) if (!blk_mark_rq_complete(rq))
...@@ -615,15 +613,19 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx, ...@@ -615,15 +613,19 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
} }
} }
static void blk_mq_rq_timer(unsigned long priv) static void blk_mq_timeout_work(struct work_struct *work)
{ {
struct request_queue *q = (struct request_queue *)priv; struct request_queue *q =
container_of(work, struct request_queue, timeout_work);
struct blk_mq_timeout_data data = { struct blk_mq_timeout_data data = {
.next = 0, .next = 0,
.next_set = 0, .next_set = 0,
}; };
int i; int i;
if (blk_queue_enter(q, true))
return;
blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &data); blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &data);
if (data.next_set) { if (data.next_set) {
...@@ -638,6 +640,7 @@ static void blk_mq_rq_timer(unsigned long priv) ...@@ -638,6 +640,7 @@ static void blk_mq_rq_timer(unsigned long priv)
blk_mq_tag_idle(hctx); blk_mq_tag_idle(hctx);
} }
} }
blk_queue_exit(q);
} }
/* /*
...@@ -2008,7 +2011,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, ...@@ -2008,7 +2011,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
hctxs[i]->queue_num = i; hctxs[i]->queue_num = i;
} }
setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q); INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ); blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
q->nr_queues = nr_cpu_ids; q->nr_queues = nr_cpu_ids;
......
...@@ -127,13 +127,16 @@ static void blk_rq_check_expired(struct request *rq, unsigned long *next_timeout ...@@ -127,13 +127,16 @@ static void blk_rq_check_expired(struct request *rq, unsigned long *next_timeout
} }
} }
void blk_rq_timed_out_timer(unsigned long data) void blk_timeout_work(struct work_struct *work)
{ {
struct request_queue *q = (struct request_queue *) data; struct request_queue *q =
container_of(work, struct request_queue, timeout_work);
unsigned long flags, next = 0; unsigned long flags, next = 0;
struct request *rq, *tmp; struct request *rq, *tmp;
int next_set = 0; int next_set = 0;
if (blk_queue_enter(q, true))
return;
spin_lock_irqsave(q->queue_lock, flags); spin_lock_irqsave(q->queue_lock, flags);
list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list) list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list)
...@@ -143,6 +146,7 @@ void blk_rq_timed_out_timer(unsigned long data) ...@@ -143,6 +146,7 @@ void blk_rq_timed_out_timer(unsigned long data)
mod_timer(&q->timeout, round_jiffies_up(next)); mod_timer(&q->timeout, round_jiffies_up(next));
spin_unlock_irqrestore(q->queue_lock, flags); spin_unlock_irqrestore(q->queue_lock, flags);
blk_queue_exit(q);
} }
/** /**
...@@ -193,9 +197,6 @@ void blk_add_timer(struct request *req) ...@@ -193,9 +197,6 @@ void blk_add_timer(struct request *req)
struct request_queue *q = req->q; struct request_queue *q = req->q;
unsigned long expiry; unsigned long expiry;
if (req->cmd_flags & REQ_NO_TIMEOUT)
return;
/* blk-mq has its own handler, so we don't need ->rq_timed_out_fn */ /* blk-mq has its own handler, so we don't need ->rq_timed_out_fn */
if (!q->mq_ops && !q->rq_timed_out_fn) if (!q->mq_ops && !q->rq_timed_out_fn)
return; return;
......
...@@ -93,7 +93,7 @@ static inline void blk_flush_integrity(void) ...@@ -93,7 +93,7 @@ static inline void blk_flush_integrity(void)
} }
#endif #endif
void blk_rq_timed_out_timer(unsigned long data); void blk_timeout_work(struct work_struct *work);
unsigned long blk_rq_timeout(unsigned long timeout); unsigned long blk_rq_timeout(unsigned long timeout);
void blk_add_timer(struct request *req); void blk_add_timer(struct request *req);
void blk_delete_timer(struct request *); void blk_delete_timer(struct request *);
......
...@@ -8,3 +8,14 @@ config BLK_DEV_NVME ...@@ -8,3 +8,14 @@ config BLK_DEV_NVME
To compile this driver as a module, choose M here: the To compile this driver as a module, choose M here: the
module will be called nvme. module will be called nvme.
config BLK_DEV_NVME_SCSI
bool "SCSI emulation for NVMe device nodes"
depends on BLK_DEV_NVME
---help---
This adds support for the SG_IO ioctl on the NVMe character
and block devices nodes, as well a a translation for a small
number of selected SCSI commands to NVMe commands to the NVMe
driver. If you don't know what this means you probably want
to say N here, and if you know what it means you probably
want to say N as well.
obj-$(CONFIG_BLK_DEV_NVME) += nvme.o obj-$(CONFIG_BLK_DEV_NVME) += nvme.o
lightnvm-$(CONFIG_NVM) := lightnvm.o lightnvm-$(CONFIG_NVM) := lightnvm.o
nvme-y += pci.o scsi.o $(lightnvm-y) nvme-y += core.o pci.o $(lightnvm-y)
nvme-$(CONFIG_BLK_DEV_NVME_SCSI) += scsi.o
This diff is collapsed.
...@@ -294,7 +294,6 @@ static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id) ...@@ -294,7 +294,6 @@ static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id)
static int nvme_nvm_identity(struct nvm_dev *nvmdev, struct nvm_id *nvm_id) static int nvme_nvm_identity(struct nvm_dev *nvmdev, struct nvm_id *nvm_id)
{ {
struct nvme_ns *ns = nvmdev->q->queuedata; struct nvme_ns *ns = nvmdev->q->queuedata;
struct nvme_dev *dev = ns->dev;
struct nvme_nvm_id *nvme_nvm_id; struct nvme_nvm_id *nvme_nvm_id;
struct nvme_nvm_command c = {}; struct nvme_nvm_command c = {};
int ret; int ret;
...@@ -307,7 +306,7 @@ static int nvme_nvm_identity(struct nvm_dev *nvmdev, struct nvm_id *nvm_id) ...@@ -307,7 +306,7 @@ static int nvme_nvm_identity(struct nvm_dev *nvmdev, struct nvm_id *nvm_id)
if (!nvme_nvm_id) if (!nvme_nvm_id)
return -ENOMEM; return -ENOMEM;
ret = nvme_submit_sync_cmd(dev->admin_q, (struct nvme_command *)&c, ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, (struct nvme_command *)&c,
nvme_nvm_id, sizeof(struct nvme_nvm_id)); nvme_nvm_id, sizeof(struct nvme_nvm_id));
if (ret) { if (ret) {
ret = -EIO; ret = -EIO;
...@@ -332,9 +331,8 @@ static int nvme_nvm_get_l2p_tbl(struct nvm_dev *nvmdev, u64 slba, u32 nlb, ...@@ -332,9 +331,8 @@ static int nvme_nvm_get_l2p_tbl(struct nvm_dev *nvmdev, u64 slba, u32 nlb,
nvm_l2p_update_fn *update_l2p, void *priv) nvm_l2p_update_fn *update_l2p, void *priv)
{ {
struct nvme_ns *ns = nvmdev->q->queuedata; struct nvme_ns *ns = nvmdev->q->queuedata;
struct nvme_dev *dev = ns->dev;
struct nvme_nvm_command c = {}; struct nvme_nvm_command c = {};
u32 len = queue_max_hw_sectors(dev->admin_q) << 9; u32 len = queue_max_hw_sectors(ns->ctrl->admin_q) << 9;
u32 nlb_pr_rq = len / sizeof(u64); u32 nlb_pr_rq = len / sizeof(u64);
u64 cmd_slba = slba; u64 cmd_slba = slba;
void *entries; void *entries;
...@@ -352,10 +350,10 @@ static int nvme_nvm_get_l2p_tbl(struct nvm_dev *nvmdev, u64 slba, u32 nlb, ...@@ -352,10 +350,10 @@ static int nvme_nvm_get_l2p_tbl(struct nvm_dev *nvmdev, u64 slba, u32 nlb,
c.l2p.slba = cpu_to_le64(cmd_slba); c.l2p.slba = cpu_to_le64(cmd_slba);
c.l2p.nlb = cpu_to_le32(cmd_nlb); c.l2p.nlb = cpu_to_le32(cmd_nlb);
ret = nvme_submit_sync_cmd(dev->admin_q, ret = nvme_submit_sync_cmd(ns->ctrl->admin_q,
(struct nvme_command *)&c, entries, len); (struct nvme_command *)&c, entries, len);
if (ret) { if (ret) {
dev_err(dev->dev, "L2P table transfer failed (%d)\n", dev_err(ns->ctrl->dev, "L2P table transfer failed (%d)\n",
ret); ret);
ret = -EIO; ret = -EIO;
goto out; goto out;
...@@ -381,7 +379,7 @@ static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa, ...@@ -381,7 +379,7 @@ static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
{ {
struct request_queue *q = nvmdev->q; struct request_queue *q = nvmdev->q;
struct nvme_ns *ns = q->queuedata; struct nvme_ns *ns = q->queuedata;
struct nvme_dev *dev = ns->dev; struct nvme_ctrl *ctrl = ns->ctrl;
struct nvme_nvm_command c = {}; struct nvme_nvm_command c = {};
struct nvme_nvm_bb_tbl *bb_tbl; struct nvme_nvm_bb_tbl *bb_tbl;
int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_blocks; int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_blocks;
...@@ -395,30 +393,30 @@ static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa, ...@@ -395,30 +393,30 @@ static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
if (!bb_tbl) if (!bb_tbl)
return -ENOMEM; return -ENOMEM;
ret = nvme_submit_sync_cmd(dev->admin_q, (struct nvme_command *)&c, ret = nvme_submit_sync_cmd(ctrl->admin_q, (struct nvme_command *)&c,
bb_tbl, tblsz); bb_tbl, tblsz);
if (ret) { if (ret) {
dev_err(dev->dev, "get bad block table failed (%d)\n", ret); dev_err(ctrl->dev, "get bad block table failed (%d)\n", ret);
ret = -EIO; ret = -EIO;
goto out; goto out;
} }
if (bb_tbl->tblid[0] != 'B' || bb_tbl->tblid[1] != 'B' || if (bb_tbl->tblid[0] != 'B' || bb_tbl->tblid[1] != 'B' ||
bb_tbl->tblid[2] != 'L' || bb_tbl->tblid[3] != 'T') { bb_tbl->tblid[2] != 'L' || bb_tbl->tblid[3] != 'T') {
dev_err(dev->dev, "bbt format mismatch\n"); dev_err(ctrl->dev, "bbt format mismatch\n");
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
} }
if (le16_to_cpu(bb_tbl->verid) != 1) { if (le16_to_cpu(bb_tbl->verid) != 1) {
ret = -EINVAL; ret = -EINVAL;
dev_err(dev->dev, "bbt version not supported\n"); dev_err(ctrl->dev, "bbt version not supported\n");
goto out; goto out;
} }
if (le32_to_cpu(bb_tbl->tblks) != nr_blocks) { if (le32_to_cpu(bb_tbl->tblks) != nr_blocks) {
ret = -EINVAL; ret = -EINVAL;
dev_err(dev->dev, "bbt unsuspected blocks returned (%u!=%u)", dev_err(ctrl->dev, "bbt unsuspected blocks returned (%u!=%u)",
le32_to_cpu(bb_tbl->tblks), nr_blocks); le32_to_cpu(bb_tbl->tblks), nr_blocks);
goto out; goto out;
} }
...@@ -434,7 +432,6 @@ static int nvme_nvm_set_bb_tbl(struct nvm_dev *nvmdev, struct nvm_rq *rqd, ...@@ -434,7 +432,6 @@ static int nvme_nvm_set_bb_tbl(struct nvm_dev *nvmdev, struct nvm_rq *rqd,
int type) int type)
{ {
struct nvme_ns *ns = nvmdev->q->queuedata; struct nvme_ns *ns = nvmdev->q->queuedata;
struct nvme_dev *dev = ns->dev;
struct nvme_nvm_command c = {}; struct nvme_nvm_command c = {};
int ret = 0; int ret = 0;
...@@ -444,10 +441,10 @@ static int nvme_nvm_set_bb_tbl(struct nvm_dev *nvmdev, struct nvm_rq *rqd, ...@@ -444,10 +441,10 @@ static int nvme_nvm_set_bb_tbl(struct nvm_dev *nvmdev, struct nvm_rq *rqd,
c.set_bb.nlb = cpu_to_le16(rqd->nr_pages - 1); c.set_bb.nlb = cpu_to_le16(rqd->nr_pages - 1);
c.set_bb.value = type; c.set_bb.value = type;
ret = nvme_submit_sync_cmd(dev->admin_q, (struct nvme_command *)&c, ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, (struct nvme_command *)&c,
NULL, 0); NULL, 0);
if (ret) if (ret)
dev_err(dev->dev, "set bad block table failed (%d)\n", ret); dev_err(ns->ctrl->dev, "set bad block table failed (%d)\n", ret);
return ret; return ret;
} }
...@@ -532,9 +529,8 @@ static int nvme_nvm_erase_block(struct nvm_dev *dev, struct nvm_rq *rqd) ...@@ -532,9 +529,8 @@ static int nvme_nvm_erase_block(struct nvm_dev *dev, struct nvm_rq *rqd)
static void *nvme_nvm_create_dma_pool(struct nvm_dev *nvmdev, char *name) static void *nvme_nvm_create_dma_pool(struct nvm_dev *nvmdev, char *name)
{ {
struct nvme_ns *ns = nvmdev->q->queuedata; struct nvme_ns *ns = nvmdev->q->queuedata;
struct nvme_dev *dev = ns->dev;
return dma_pool_create(name, dev->dev, PAGE_SIZE, PAGE_SIZE, 0); return dma_pool_create(name, ns->ctrl->dev, PAGE_SIZE, PAGE_SIZE, 0);
} }
static void nvme_nvm_destroy_dma_pool(void *pool) static void nvme_nvm_destroy_dma_pool(void *pool)
...@@ -592,8 +588,9 @@ void nvme_nvm_unregister(struct request_queue *q, char *disk_name) ...@@ -592,8 +588,9 @@ void nvme_nvm_unregister(struct request_queue *q, char *disk_name)
int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id) int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id)
{ {
struct nvme_dev *dev = ns->dev; struct nvme_ctrl *ctrl = ns->ctrl;
struct pci_dev *pdev = to_pci_dev(dev->dev); /* XXX: this is poking into PCI structures from generic code! */
struct pci_dev *pdev = to_pci_dev(ctrl->dev);
/* QEMU NVMe simulator - PCI ID + Vendor specific bit */ /* QEMU NVMe simulator - PCI ID + Vendor specific bit */
if (pdev->vendor == PCI_VENDOR_ID_CNEX && if (pdev->vendor == PCI_VENDOR_ID_CNEX &&
......
...@@ -19,58 +19,77 @@ ...@@ -19,58 +19,77 @@
#include <linux/kref.h> #include <linux/kref.h>
#include <linux/blk-mq.h> #include <linux/blk-mq.h>
enum {
/*
* Driver internal status code for commands that were cancelled due
* to timeouts or controller shutdown. The value is negative so
* that it a) doesn't overlap with the unsigned hardware error codes,
* and b) can easily be tested for.
*/
NVME_SC_CANCELLED = -EINTR,
};
extern unsigned char nvme_io_timeout; extern unsigned char nvme_io_timeout;
#define NVME_IO_TIMEOUT (nvme_io_timeout * HZ) #define NVME_IO_TIMEOUT (nvme_io_timeout * HZ)
extern unsigned char admin_timeout;
#define ADMIN_TIMEOUT (admin_timeout * HZ)
extern unsigned char shutdown_timeout;
#define SHUTDOWN_TIMEOUT (shutdown_timeout * HZ)
enum { enum {
NVME_NS_LBA = 0, NVME_NS_LBA = 0,
NVME_NS_LIGHTNVM = 1, NVME_NS_LIGHTNVM = 1,
}; };
/* /*
* Represents an NVM Express device. Each nvme_dev is a PCI function. * List of workarounds for devices that required behavior not specified in
* the standard.
*/ */
struct nvme_dev { enum nvme_quirks {
struct list_head node; /*
struct nvme_queue **queues; * Prefers I/O aligned to a stripe size specified in a vendor
* specific Identify field.
*/
NVME_QUIRK_STRIPE_SIZE = (1 << 0),
/*
* The controller doesn't handle Identify value others than 0 or 1
* correctly.
*/
NVME_QUIRK_IDENTIFY_CNS = (1 << 1),
};
struct nvme_ctrl {
const struct nvme_ctrl_ops *ops;
struct request_queue *admin_q; struct request_queue *admin_q;
struct blk_mq_tag_set tagset;
struct blk_mq_tag_set admin_tagset;
u32 __iomem *dbs;
struct device *dev; struct device *dev;
struct dma_pool *prp_page_pool; struct kref kref;
struct dma_pool *prp_small_pool;
int instance; int instance;
unsigned queue_count; struct blk_mq_tag_set *tagset;
unsigned online_queues;
unsigned max_qid;
int q_depth;
u32 db_stride;
u32 ctrl_config;
struct msix_entry *entry;
struct nvme_bar __iomem *bar;
struct list_head namespaces; struct list_head namespaces;
struct kref kref; struct mutex namespaces_mutex;
struct device *device; struct device *device; /* char device */
struct work_struct reset_work; struct list_head node;
struct work_struct probe_work;
struct work_struct scan_work;
char name[12]; char name[12];
char serial[20]; char serial[20];
char model[40]; char model[40];
char firmware_rev[8]; char firmware_rev[8];
bool subsystem;
u32 ctrl_config;
u32 page_size;
u32 max_hw_sectors; u32 max_hw_sectors;
u32 stripe_size; u32 stripe_size;
u32 page_size;
void __iomem *cmb;
dma_addr_t cmb_dma_addr;
u64 cmb_size;
u32 cmbsz;
u16 oncs; u16 oncs;
u16 abort_limit; atomic_t abort_limit;
u8 event_limit; u8 event_limit;
u8 vwc; u8 vwc;
u32 vs;
bool subsystem;
unsigned long quirks;
}; };
/* /*
...@@ -79,11 +98,14 @@ struct nvme_dev { ...@@ -79,11 +98,14 @@ struct nvme_dev {
struct nvme_ns { struct nvme_ns {
struct list_head list; struct list_head list;
struct nvme_dev *dev; struct nvme_ctrl *ctrl;
struct request_queue *queue; struct request_queue *queue;
struct gendisk *disk; struct gendisk *disk;
struct kref kref; struct kref kref;
u8 eui[8];
u8 uuid[16];
unsigned ns_id; unsigned ns_id;
int lba_shift; int lba_shift;
u16 ms; u16 ms;
...@@ -94,41 +116,156 @@ struct nvme_ns { ...@@ -94,41 +116,156 @@ struct nvme_ns {
u32 mode_select_block_len; u32 mode_select_block_len;
}; };
/* struct nvme_ctrl_ops {
* The nvme_iod describes the data in an I/O, including the list of PRP int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val);
* entries. You can't see it in this data structure because C doesn't let int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val);
* me express that. Use nvme_alloc_iod to ensure there's enough space int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
* allocated to store the PRP list. bool (*io_incapable)(struct nvme_ctrl *ctrl);
*/ int (*reset_ctrl)(struct nvme_ctrl *ctrl);
struct nvme_iod { void (*free_ctrl)(struct nvme_ctrl *ctrl);
unsigned long private; /* For the use of the submitter of the I/O */
int npages; /* In the PRP list. 0 means small pool in use */
int offset; /* Of PRP list */
int nents; /* Used in scatterlist */
int length; /* Of data, in bytes */
dma_addr_t first_dma;
struct scatterlist meta_sg[1]; /* metadata requires single contiguous buffer */
struct scatterlist sg[0];
}; };
static inline bool nvme_ctrl_ready(struct nvme_ctrl *ctrl)
{
u32 val = 0;
if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &val))
return false;
return val & NVME_CSTS_RDY;
}
static inline bool nvme_io_incapable(struct nvme_ctrl *ctrl)
{
u32 val = 0;
if (ctrl->ops->io_incapable(ctrl))
return false;
if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &val))
return false;
return val & NVME_CSTS_CFS;
}
static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl)
{
if (!ctrl->subsystem)
return -ENOTTY;
return ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65);
}
static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector) static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector)
{ {
return (sector >> (ns->lba_shift - 9)); return (sector >> (ns->lba_shift - 9));
} }
static inline void nvme_setup_flush(struct nvme_ns *ns,
struct nvme_command *cmnd)
{
memset(cmnd, 0, sizeof(*cmnd));
cmnd->common.opcode = nvme_cmd_flush;
cmnd->common.nsid = cpu_to_le32(ns->ns_id);
}
static inline void nvme_setup_rw(struct nvme_ns *ns, struct request *req,
struct nvme_command *cmnd)
{
u16 control = 0;
u32 dsmgmt = 0;
if (req->cmd_flags & REQ_FUA)
control |= NVME_RW_FUA;
if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD))
control |= NVME_RW_LR;
if (req->cmd_flags & REQ_RAHEAD)
dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
memset(cmnd, 0, sizeof(*cmnd));
cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read);
cmnd->rw.command_id = req->tag;
cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
if (ns->ms) {
switch (ns->pi_type) {
case NVME_NS_DPS_PI_TYPE3:
control |= NVME_RW_PRINFO_PRCHK_GUARD;
break;
case NVME_NS_DPS_PI_TYPE1:
case NVME_NS_DPS_PI_TYPE2:
control |= NVME_RW_PRINFO_PRCHK_GUARD |
NVME_RW_PRINFO_PRCHK_REF;
cmnd->rw.reftag = cpu_to_le32(
nvme_block_nr(ns, blk_rq_pos(req)));
break;
}
if (!blk_integrity_rq(req))
control |= NVME_RW_PRINFO_PRACT;
}
cmnd->rw.control = cpu_to_le16(control);
cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
}
static inline int nvme_error_status(u16 status)
{
switch (status & 0x7ff) {
case NVME_SC_SUCCESS:
return 0;
case NVME_SC_CAP_EXCEEDED:
return -ENOSPC;
default:
return -EIO;
}
}
static inline bool nvme_req_needs_retry(struct request *req, u16 status)
{
return !(status & NVME_SC_DNR || blk_noretry_request(req)) &&
(jiffies - req->start_time) < req->timeout;
}
int nvme_disable_ctrl(struct nvme_ctrl *ctrl, u64 cap);
int nvme_enable_ctrl(struct nvme_ctrl *ctrl, u64 cap);
int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl);
int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
const struct nvme_ctrl_ops *ops, unsigned long quirks);
void nvme_uninit_ctrl(struct nvme_ctrl *ctrl);
void nvme_put_ctrl(struct nvme_ctrl *ctrl);
int nvme_init_identify(struct nvme_ctrl *ctrl);
void nvme_scan_namespaces(struct nvme_ctrl *ctrl);
void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
void nvme_stop_queues(struct nvme_ctrl *ctrl);
void nvme_start_queues(struct nvme_ctrl *ctrl);
struct request *nvme_alloc_request(struct request_queue *q,
struct nvme_command *cmd, unsigned int flags);
void nvme_requeue_req(struct request *req);
int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
void *buf, unsigned bufflen); void *buf, unsigned bufflen);
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
void *buffer, void __user *ubuffer, unsigned bufflen, void *buffer, unsigned bufflen, u32 *result, unsigned timeout);
int nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
void __user *ubuffer, unsigned bufflen, u32 *result,
unsigned timeout);
int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
void __user *ubuffer, unsigned bufflen,
void __user *meta_buffer, unsigned meta_len, u32 meta_seed,
u32 *result, unsigned timeout); u32 *result, unsigned timeout);
int nvme_identify_ctrl(struct nvme_dev *dev, struct nvme_id_ctrl **id); int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id);
int nvme_identify_ns(struct nvme_dev *dev, unsigned nsid, int nvme_identify_ns(struct nvme_ctrl *dev, unsigned nsid,
struct nvme_id_ns **id); struct nvme_id_ns **id);
int nvme_get_log_page(struct nvme_dev *dev, struct nvme_smart_log **log); int nvme_get_log_page(struct nvme_ctrl *dev, struct nvme_smart_log **log);
int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid, int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid,
dma_addr_t dma_addr, u32 *result); dma_addr_t dma_addr, u32 *result);
int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11, int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
dma_addr_t dma_addr, u32 *result); dma_addr_t dma_addr, u32 *result);
int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
extern spinlock_t dev_list_lock;
struct sg_io_hdr; struct sg_io_hdr;
...@@ -154,4 +291,7 @@ static inline int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *i ...@@ -154,4 +291,7 @@ static inline int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *i
} }
#endif /* CONFIG_NVM */ #endif /* CONFIG_NVM */
int __init nvme_core_init(void);
void nvme_core_exit(void);
#endif /* _NVME_H */ #endif /* _NVME_H */
This diff is collapsed.
This diff is collapsed.
...@@ -615,9 +615,9 @@ iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio) ...@@ -615,9 +615,9 @@ iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio)
} }
bip = bio_integrity_alloc(bio, GFP_NOIO, cmd->t_prot_nents); bip = bio_integrity_alloc(bio, GFP_NOIO, cmd->t_prot_nents);
if (!bip) { if (IS_ERR(bip)) {
pr_err("Unable to allocate bio_integrity_payload\n"); pr_err("Unable to allocate bio_integrity_payload\n");
return -ENOMEM; return PTR_ERR(bip);
} }
bip->bip_iter.bi_size = (cmd->data_length / dev->dev_attrib.block_size) * bip->bip_iter.bi_size = (cmd->data_length / dev->dev_attrib.block_size) *
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#ifndef _AER_H_ #ifndef _AER_H_
#define _AER_H_ #define _AER_H_
#include <linux/errno.h>
#include <linux/types.h> #include <linux/types.h>
#define AER_NONFATAL 0 #define AER_NONFATAL 0
......
...@@ -318,16 +318,6 @@ enum bip_flags { ...@@ -318,16 +318,6 @@ enum bip_flags {
BIP_IP_CHECKSUM = 1 << 4, /* IP checksum */ BIP_IP_CHECKSUM = 1 << 4, /* IP checksum */
}; };
#if defined(CONFIG_BLK_DEV_INTEGRITY)
static inline struct bio_integrity_payload *bio_integrity(struct bio *bio)
{
if (bio->bi_rw & REQ_INTEGRITY)
return bio->bi_integrity;
return NULL;
}
/* /*
* bio integrity payload * bio integrity payload
*/ */
...@@ -349,6 +339,16 @@ struct bio_integrity_payload { ...@@ -349,6 +339,16 @@ struct bio_integrity_payload {
struct bio_vec bip_inline_vecs[0];/* embedded bvec array */ struct bio_vec bip_inline_vecs[0];/* embedded bvec array */
}; };
#if defined(CONFIG_BLK_DEV_INTEGRITY)
static inline struct bio_integrity_payload *bio_integrity(struct bio *bio)
{
if (bio->bi_rw & REQ_INTEGRITY)
return bio->bi_integrity;
return NULL;
}
static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag) static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
{ {
struct bio_integrity_payload *bip = bio_integrity(bio); struct bio_integrity_payload *bip = bio_integrity(bio);
...@@ -795,6 +795,18 @@ static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag) ...@@ -795,6 +795,18 @@ static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
return false; return false;
} }
static inline void *bio_integrity_alloc(struct bio * bio, gfp_t gfp,
unsigned int nr)
{
return ERR_PTR(-EINVAL);
}
static inline int bio_integrity_add_page(struct bio *bio, struct page *page,
unsigned int len, unsigned int offset)
{
return 0;
}
#endif /* CONFIG_BLK_DEV_INTEGRITY */ #endif /* CONFIG_BLK_DEV_INTEGRITY */
#endif /* CONFIG_BLOCK */ #endif /* CONFIG_BLOCK */
......
...@@ -188,7 +188,6 @@ enum rq_flag_bits { ...@@ -188,7 +188,6 @@ enum rq_flag_bits {
__REQ_PM, /* runtime pm request */ __REQ_PM, /* runtime pm request */
__REQ_HASHED, /* on IO scheduler merge hash */ __REQ_HASHED, /* on IO scheduler merge hash */
__REQ_MQ_INFLIGHT, /* track inflight for MQ */ __REQ_MQ_INFLIGHT, /* track inflight for MQ */
__REQ_NO_TIMEOUT, /* requests may never expire */
__REQ_NR_BITS, /* stops here */ __REQ_NR_BITS, /* stops here */
}; };
...@@ -242,7 +241,6 @@ enum rq_flag_bits { ...@@ -242,7 +241,6 @@ enum rq_flag_bits {
#define REQ_PM (1ULL << __REQ_PM) #define REQ_PM (1ULL << __REQ_PM)
#define REQ_HASHED (1ULL << __REQ_HASHED) #define REQ_HASHED (1ULL << __REQ_HASHED)
#define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT) #define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT)
#define REQ_NO_TIMEOUT (1ULL << __REQ_NO_TIMEOUT)
typedef unsigned int blk_qc_t; typedef unsigned int blk_qc_t;
#define BLK_QC_T_NONE -1U #define BLK_QC_T_NONE -1U
......
...@@ -409,6 +409,7 @@ struct request_queue { ...@@ -409,6 +409,7 @@ struct request_queue {
unsigned int rq_timeout; unsigned int rq_timeout;
struct timer_list timeout; struct timer_list timeout;
struct work_struct timeout_work;
struct list_head timeout_list; struct list_head timeout_list;
struct list_head icq_list; struct list_head icq_list;
......
...@@ -17,20 +17,19 @@ ...@@ -17,20 +17,19 @@
#include <linux/types.h> #include <linux/types.h>
struct nvme_bar { enum {
__u64 cap; /* Controller Capabilities */ NVME_REG_CAP = 0x0000, /* Controller Capabilities */
__u32 vs; /* Version */ NVME_REG_VS = 0x0008, /* Version */
__u32 intms; /* Interrupt Mask Set */ NVME_REG_INTMS = 0x000c, /* Interrupt Mask Set */
__u32 intmc; /* Interrupt Mask Clear */ NVME_REG_INTMC = 0x0010, /* Interrupt Mask Set */
__u32 cc; /* Controller Configuration */ NVME_REG_CC = 0x0014, /* Controller Configuration */
__u32 rsvd1; /* Reserved */ NVME_REG_CSTS = 0x001c, /* Controller Status */
__u32 csts; /* Controller Status */ NVME_REG_NSSR = 0x0020, /* NVM Subsystem Reset */
__u32 nssr; /* Subsystem Reset */ NVME_REG_AQA = 0x0024, /* Admin Queue Attributes */
__u32 aqa; /* Admin Queue Attributes */ NVME_REG_ASQ = 0x0028, /* Admin SQ Base Address */
__u64 asq; /* Admin SQ Base Address */ NVME_REG_ACQ = 0x0030, /* Admin SQ Base Address */
__u64 acq; /* Admin CQ Base Address */ NVME_REG_CMBLOC = 0x0038, /* Controller Memory Buffer Location */
__u32 cmbloc; /* Controller Memory Buffer Location */ NVME_REG_CMBSZ = 0x003c, /* Controller Memory Buffer Size */
__u32 cmbsz; /* Controller Memory Buffer Size */
}; };
#define NVME_CAP_MQES(cap) ((cap) & 0xffff) #define NVME_CAP_MQES(cap) ((cap) & 0xffff)
......
...@@ -307,7 +307,7 @@ header-y += nfs_mount.h ...@@ -307,7 +307,7 @@ header-y += nfs_mount.h
header-y += nl80211.h header-y += nl80211.h
header-y += n_r3964.h header-y += n_r3964.h
header-y += nubus.h header-y += nubus.h
header-y += nvme.h header-y += nvme_ioctl.h
header-y += nvram.h header-y += nvram.h
header-y += omap3isp.h header-y += omap3isp.h
header-y += omapfb.h header-y += omapfb.h
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment