Commit ecd06f28 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:
 "A set of fixes that missed the merge window, mostly due to me being
  away around that time.

  Nothing major here, a mix of nvme cleanups and fixes, and one fix for
  the badblocks handling"

* 'for-linus' of git://git.kernel.dk/linux-block:
  nvmet: use symbolic constants for CNS values
  nvme: use symbolic constants for CNS values
  nvme.h: add an enum for cns values
  nvme.h: don't use uuid_be
  nvme.h: resync with nvme-cli
  nvme: Add tertiary number to NVME_VS
  nvme : Add sysfs entry for NVMe CMBs when appropriate
  nvme: don't schedule multiple resets
  nvme: Delete created IO queues on reset
  nvme: Stop probing a removed device
  badblocks: fix overlapping check for clearing
parents e59f30b4 e9c9346e
...@@ -354,7 +354,8 @@ int badblocks_clear(struct badblocks *bb, sector_t s, int sectors) ...@@ -354,7 +354,8 @@ int badblocks_clear(struct badblocks *bb, sector_t s, int sectors)
* current range. Earlier ranges could also overlap, * current range. Earlier ranges could also overlap,
* but only this one can overlap the end of the range. * but only this one can overlap the end of the range.
*/ */
if (BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > target) { if ((BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > target) &&
(BB_OFFSET(p[lo]) < target)) {
/* Partial overlap, leave the tail of this range */ /* Partial overlap, leave the tail of this range */
int ack = BB_ACK(p[lo]); int ack = BB_ACK(p[lo]);
sector_t a = BB_OFFSET(p[lo]); sector_t a = BB_OFFSET(p[lo]);
...@@ -377,7 +378,8 @@ int badblocks_clear(struct badblocks *bb, sector_t s, int sectors) ...@@ -377,7 +378,8 @@ int badblocks_clear(struct badblocks *bb, sector_t s, int sectors)
lo--; lo--;
} }
while (lo >= 0 && while (lo >= 0 &&
BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > s) { (BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > s) &&
(BB_OFFSET(p[lo]) < target)) {
/* This range does overlap */ /* This range does overlap */
if (BB_OFFSET(p[lo]) < s) { if (BB_OFFSET(p[lo]) < s) {
/* Keep the early parts of this range. */ /* Keep the early parts of this range. */
......
...@@ -554,7 +554,7 @@ int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id) ...@@ -554,7 +554,7 @@ int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id)
/* gcc-4.4.4 (at least) has issues with initializers and anon unions */ /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
c.identify.opcode = nvme_admin_identify; c.identify.opcode = nvme_admin_identify;
c.identify.cns = cpu_to_le32(1); c.identify.cns = cpu_to_le32(NVME_ID_CNS_CTRL);
*id = kmalloc(sizeof(struct nvme_id_ctrl), GFP_KERNEL); *id = kmalloc(sizeof(struct nvme_id_ctrl), GFP_KERNEL);
if (!*id) if (!*id)
...@@ -572,7 +572,7 @@ static int nvme_identify_ns_list(struct nvme_ctrl *dev, unsigned nsid, __le32 *n ...@@ -572,7 +572,7 @@ static int nvme_identify_ns_list(struct nvme_ctrl *dev, unsigned nsid, __le32 *n
struct nvme_command c = { }; struct nvme_command c = { };
c.identify.opcode = nvme_admin_identify; c.identify.opcode = nvme_admin_identify;
c.identify.cns = cpu_to_le32(2); c.identify.cns = cpu_to_le32(NVME_ID_CNS_NS_ACTIVE_LIST);
c.identify.nsid = cpu_to_le32(nsid); c.identify.nsid = cpu_to_le32(nsid);
return nvme_submit_sync_cmd(dev->admin_q, &c, ns_list, 0x1000); return nvme_submit_sync_cmd(dev->admin_q, &c, ns_list, 0x1000);
} }
...@@ -900,9 +900,9 @@ static int nvme_revalidate_ns(struct nvme_ns *ns, struct nvme_id_ns **id) ...@@ -900,9 +900,9 @@ static int nvme_revalidate_ns(struct nvme_ns *ns, struct nvme_id_ns **id)
return -ENODEV; return -ENODEV;
} }
if (ns->ctrl->vs >= NVME_VS(1, 1)) if (ns->ctrl->vs >= NVME_VS(1, 1, 0))
memcpy(ns->eui, (*id)->eui64, sizeof(ns->eui)); memcpy(ns->eui, (*id)->eui64, sizeof(ns->eui));
if (ns->ctrl->vs >= NVME_VS(1, 2)) if (ns->ctrl->vs >= NVME_VS(1, 2, 0))
memcpy(ns->uuid, (*id)->nguid, sizeof(ns->uuid)); memcpy(ns->uuid, (*id)->nguid, sizeof(ns->uuid));
return 0; return 0;
...@@ -1086,6 +1086,8 @@ static int nvme_wait_ready(struct nvme_ctrl *ctrl, u64 cap, bool enabled) ...@@ -1086,6 +1086,8 @@ static int nvme_wait_ready(struct nvme_ctrl *ctrl, u64 cap, bool enabled)
int ret; int ret;
while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) { while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) {
if (csts == ~0)
return -ENODEV;
if ((csts & NVME_CSTS_RDY) == bit) if ((csts & NVME_CSTS_RDY) == bit)
break; break;
...@@ -1240,7 +1242,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl) ...@@ -1240,7 +1242,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
} }
page_shift = NVME_CAP_MPSMIN(cap) + 12; page_shift = NVME_CAP_MPSMIN(cap) + 12;
if (ctrl->vs >= NVME_VS(1, 1)) if (ctrl->vs >= NVME_VS(1, 1, 0))
ctrl->subsystem = NVME_CAP_NSSRC(cap); ctrl->subsystem = NVME_CAP_NSSRC(cap);
ret = nvme_identify_ctrl(ctrl, &id); ret = nvme_identify_ctrl(ctrl, &id);
...@@ -1840,7 +1842,7 @@ static void nvme_scan_work(struct work_struct *work) ...@@ -1840,7 +1842,7 @@ static void nvme_scan_work(struct work_struct *work)
return; return;
nn = le32_to_cpu(id->nn); nn = le32_to_cpu(id->nn);
if (ctrl->vs >= NVME_VS(1, 1) && if (ctrl->vs >= NVME_VS(1, 1, 0) &&
!(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) { !(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) {
if (!nvme_scan_ns_list(ctrl, nn)) if (!nvme_scan_ns_list(ctrl, nn))
goto done; goto done;
......
...@@ -99,6 +99,7 @@ struct nvme_dev { ...@@ -99,6 +99,7 @@ struct nvme_dev {
dma_addr_t cmb_dma_addr; dma_addr_t cmb_dma_addr;
u64 cmb_size; u64 cmb_size;
u32 cmbsz; u32 cmbsz;
u32 cmbloc;
struct nvme_ctrl ctrl; struct nvme_ctrl ctrl;
struct completion ioq_wait; struct completion ioq_wait;
}; };
...@@ -893,7 +894,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) ...@@ -893,7 +894,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
"I/O %d QID %d timeout, reset controller\n", "I/O %d QID %d timeout, reset controller\n",
req->tag, nvmeq->qid); req->tag, nvmeq->qid);
nvme_dev_disable(dev, false); nvme_dev_disable(dev, false);
queue_work(nvme_workq, &dev->reset_work); nvme_reset(dev);
/* /*
* Mark the request as handled, since the inline shutdown * Mark the request as handled, since the inline shutdown
...@@ -1214,7 +1215,7 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev) ...@@ -1214,7 +1215,7 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
u64 cap = lo_hi_readq(dev->bar + NVME_REG_CAP); u64 cap = lo_hi_readq(dev->bar + NVME_REG_CAP);
struct nvme_queue *nvmeq; struct nvme_queue *nvmeq;
dev->subsystem = readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 1) ? dev->subsystem = readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 1, 0) ?
NVME_CAP_NSSRC(cap) : 0; NVME_CAP_NSSRC(cap) : 0;
if (dev->subsystem && if (dev->subsystem &&
...@@ -1291,7 +1292,7 @@ static void nvme_watchdog_timer(unsigned long data) ...@@ -1291,7 +1292,7 @@ static void nvme_watchdog_timer(unsigned long data)
/* Skip controllers under certain specific conditions. */ /* Skip controllers under certain specific conditions. */
if (nvme_should_reset(dev, csts)) { if (nvme_should_reset(dev, csts)) {
if (queue_work(nvme_workq, &dev->reset_work)) if (!nvme_reset(dev))
dev_warn(dev->dev, dev_warn(dev->dev,
"Failed status: 0x%x, reset controller.\n", "Failed status: 0x%x, reset controller.\n",
csts); csts);
...@@ -1331,28 +1332,37 @@ static int nvme_create_io_queues(struct nvme_dev *dev) ...@@ -1331,28 +1332,37 @@ static int nvme_create_io_queues(struct nvme_dev *dev)
return ret >= 0 ? 0 : ret; return ret >= 0 ? 0 : ret;
} }
static ssize_t nvme_cmb_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev));
return snprintf(buf, PAGE_SIZE, "cmbloc : x%08x\ncmbsz : x%08x\n",
ndev->cmbloc, ndev->cmbsz);
}
static DEVICE_ATTR(cmb, S_IRUGO, nvme_cmb_show, NULL);
static void __iomem *nvme_map_cmb(struct nvme_dev *dev) static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
{ {
u64 szu, size, offset; u64 szu, size, offset;
u32 cmbloc;
resource_size_t bar_size; resource_size_t bar_size;
struct pci_dev *pdev = to_pci_dev(dev->dev); struct pci_dev *pdev = to_pci_dev(dev->dev);
void __iomem *cmb; void __iomem *cmb;
dma_addr_t dma_addr; dma_addr_t dma_addr;
if (!use_cmb_sqes)
return NULL;
dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ); dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ);
if (!(NVME_CMB_SZ(dev->cmbsz))) if (!(NVME_CMB_SZ(dev->cmbsz)))
return NULL; return NULL;
dev->cmbloc = readl(dev->bar + NVME_REG_CMBLOC);
cmbloc = readl(dev->bar + NVME_REG_CMBLOC); if (!use_cmb_sqes)
return NULL;
szu = (u64)1 << (12 + 4 * NVME_CMB_SZU(dev->cmbsz)); szu = (u64)1 << (12 + 4 * NVME_CMB_SZU(dev->cmbsz));
size = szu * NVME_CMB_SZ(dev->cmbsz); size = szu * NVME_CMB_SZ(dev->cmbsz);
offset = szu * NVME_CMB_OFST(cmbloc); offset = szu * NVME_CMB_OFST(dev->cmbloc);
bar_size = pci_resource_len(pdev, NVME_CMB_BIR(cmbloc)); bar_size = pci_resource_len(pdev, NVME_CMB_BIR(dev->cmbloc));
if (offset > bar_size) if (offset > bar_size)
return NULL; return NULL;
...@@ -1365,7 +1375,7 @@ static void __iomem *nvme_map_cmb(struct nvme_dev *dev) ...@@ -1365,7 +1375,7 @@ static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
if (size > bar_size - offset) if (size > bar_size - offset)
size = bar_size - offset; size = bar_size - offset;
dma_addr = pci_resource_start(pdev, NVME_CMB_BIR(cmbloc)) + offset; dma_addr = pci_resource_start(pdev, NVME_CMB_BIR(dev->cmbloc)) + offset;
cmb = ioremap_wc(dma_addr, size); cmb = ioremap_wc(dma_addr, size);
if (!cmb) if (!cmb)
return NULL; return NULL;
...@@ -1511,9 +1521,9 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode) ...@@ -1511,9 +1521,9 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
return 0; return 0;
} }
static void nvme_disable_io_queues(struct nvme_dev *dev) static void nvme_disable_io_queues(struct nvme_dev *dev, int queues)
{ {
int pass, queues = dev->online_queues - 1; int pass;
unsigned long timeout; unsigned long timeout;
u8 opcode = nvme_admin_delete_sq; u8 opcode = nvme_admin_delete_sq;
...@@ -1616,9 +1626,25 @@ static int nvme_pci_enable(struct nvme_dev *dev) ...@@ -1616,9 +1626,25 @@ static int nvme_pci_enable(struct nvme_dev *dev)
dev->q_depth); dev->q_depth);
} }
if (readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 2)) /*
* CMBs can currently only exist on >=1.2 PCIe devices. We only
* populate sysfs if a CMB is implemented. Note that we add the
* CMB attribute to the nvme_ctrl kobj which removes the need to remove
* it on exit. Since nvme_dev_attrs_group has no name we can pass
* NULL as final argument to sysfs_add_file_to_group.
*/
if (readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 2, 0)) {
dev->cmb = nvme_map_cmb(dev); dev->cmb = nvme_map_cmb(dev);
if (dev->cmbsz) {
if (sysfs_add_file_to_group(&dev->ctrl.device->kobj,
&dev_attr_cmb.attr, NULL))
dev_warn(dev->dev,
"failed to add sysfs attribute for CMB\n");
}
}
pci_enable_pcie_error_reporting(pdev); pci_enable_pcie_error_reporting(pdev);
pci_save_state(pdev); pci_save_state(pdev);
return 0; return 0;
...@@ -1649,7 +1675,7 @@ static void nvme_pci_disable(struct nvme_dev *dev) ...@@ -1649,7 +1675,7 @@ static void nvme_pci_disable(struct nvme_dev *dev)
static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
{ {
int i; int i, queues;
u32 csts = -1; u32 csts = -1;
del_timer_sync(&dev->watchdog_timer); del_timer_sync(&dev->watchdog_timer);
...@@ -1660,6 +1686,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) ...@@ -1660,6 +1686,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
csts = readl(dev->bar + NVME_REG_CSTS); csts = readl(dev->bar + NVME_REG_CSTS);
} }
queues = dev->online_queues - 1;
for (i = dev->queue_count - 1; i > 0; i--) for (i = dev->queue_count - 1; i > 0; i--)
nvme_suspend_queue(dev->queues[i]); nvme_suspend_queue(dev->queues[i]);
...@@ -1671,7 +1698,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) ...@@ -1671,7 +1698,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
if (dev->queue_count) if (dev->queue_count)
nvme_suspend_queue(dev->queues[0]); nvme_suspend_queue(dev->queues[0]);
} else { } else {
nvme_disable_io_queues(dev); nvme_disable_io_queues(dev, queues);
nvme_disable_admin_queue(dev, shutdown); nvme_disable_admin_queue(dev, shutdown);
} }
nvme_pci_disable(dev); nvme_pci_disable(dev);
...@@ -1818,11 +1845,10 @@ static int nvme_reset(struct nvme_dev *dev) ...@@ -1818,11 +1845,10 @@ static int nvme_reset(struct nvme_dev *dev)
{ {
if (!dev->ctrl.admin_q || blk_queue_dying(dev->ctrl.admin_q)) if (!dev->ctrl.admin_q || blk_queue_dying(dev->ctrl.admin_q))
return -ENODEV; return -ENODEV;
if (work_busy(&dev->reset_work))
return -ENODEV;
if (!queue_work(nvme_workq, &dev->reset_work)) if (!queue_work(nvme_workq, &dev->reset_work))
return -EBUSY; return -EBUSY;
flush_work(&dev->reset_work);
return 0; return 0;
} }
...@@ -1846,7 +1872,12 @@ static int nvme_pci_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val) ...@@ -1846,7 +1872,12 @@ static int nvme_pci_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
static int nvme_pci_reset_ctrl(struct nvme_ctrl *ctrl) static int nvme_pci_reset_ctrl(struct nvme_ctrl *ctrl)
{ {
return nvme_reset(to_nvme_dev(ctrl)); struct nvme_dev *dev = to_nvme_dev(ctrl);
int ret = nvme_reset(dev);
if (!ret)
flush_work(&dev->reset_work);
return ret;
} }
static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = { static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
...@@ -1940,7 +1971,7 @@ static void nvme_reset_notify(struct pci_dev *pdev, bool prepare) ...@@ -1940,7 +1971,7 @@ static void nvme_reset_notify(struct pci_dev *pdev, bool prepare)
if (prepare) if (prepare)
nvme_dev_disable(dev, false); nvme_dev_disable(dev, false);
else else
queue_work(nvme_workq, &dev->reset_work); nvme_reset(dev);
} }
static void nvme_shutdown(struct pci_dev *pdev) static void nvme_shutdown(struct pci_dev *pdev)
...@@ -2009,7 +2040,7 @@ static int nvme_resume(struct device *dev) ...@@ -2009,7 +2040,7 @@ static int nvme_resume(struct device *dev)
struct pci_dev *pdev = to_pci_dev(dev); struct pci_dev *pdev = to_pci_dev(dev);
struct nvme_dev *ndev = pci_get_drvdata(pdev); struct nvme_dev *ndev = pci_get_drvdata(pdev);
queue_work(nvme_workq, &ndev->reset_work); nvme_reset(ndev);
return 0; return 0;
} }
#endif #endif
...@@ -2048,7 +2079,7 @@ static pci_ers_result_t nvme_slot_reset(struct pci_dev *pdev) ...@@ -2048,7 +2079,7 @@ static pci_ers_result_t nvme_slot_reset(struct pci_dev *pdev)
dev_info(dev->ctrl.device, "restart after slot reset\n"); dev_info(dev->ctrl.device, "restart after slot reset\n");
pci_restore_state(pdev); pci_restore_state(pdev);
queue_work(nvme_workq, &dev->reset_work); nvme_reset(dev);
return PCI_ERS_RESULT_RECOVERED; return PCI_ERS_RESULT_RECOVERED;
} }
......
...@@ -606,7 +606,7 @@ static int nvme_fill_device_id_eui64(struct nvme_ns *ns, struct sg_io_hdr *hdr, ...@@ -606,7 +606,7 @@ static int nvme_fill_device_id_eui64(struct nvme_ns *ns, struct sg_io_hdr *hdr,
eui = id_ns->eui64; eui = id_ns->eui64;
len = sizeof(id_ns->eui64); len = sizeof(id_ns->eui64);
if (ns->ctrl->vs >= NVME_VS(1, 2)) { if (ns->ctrl->vs >= NVME_VS(1, 2, 0)) {
if (bitmap_empty(eui, len * 8)) { if (bitmap_empty(eui, len * 8)) {
eui = id_ns->nguid; eui = id_ns->nguid;
len = sizeof(id_ns->nguid); len = sizeof(id_ns->nguid);
...@@ -679,7 +679,7 @@ static int nvme_trans_device_id_page(struct nvme_ns *ns, struct sg_io_hdr *hdr, ...@@ -679,7 +679,7 @@ static int nvme_trans_device_id_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
{ {
int res; int res;
if (ns->ctrl->vs >= NVME_VS(1, 1)) { if (ns->ctrl->vs >= NVME_VS(1, 1, 0)) {
res = nvme_fill_device_id_eui64(ns, hdr, resp, alloc_len); res = nvme_fill_device_id_eui64(ns, hdr, resp, alloc_len);
if (res != -EOPNOTSUPP) if (res != -EOPNOTSUPP)
return res; return res;
......
...@@ -199,7 +199,7 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req) ...@@ -199,7 +199,7 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
*/ */
/* we support multiple ports and multiples hosts: */ /* we support multiple ports and multiples hosts: */
id->mic = (1 << 0) | (1 << 1); id->cmic = (1 << 0) | (1 << 1);
/* no limit on data transfer sizes for now */ /* no limit on data transfer sizes for now */
id->mdts = 0; id->mdts = 0;
...@@ -511,13 +511,13 @@ int nvmet_parse_admin_cmd(struct nvmet_req *req) ...@@ -511,13 +511,13 @@ int nvmet_parse_admin_cmd(struct nvmet_req *req)
case nvme_admin_identify: case nvme_admin_identify:
req->data_len = 4096; req->data_len = 4096;
switch (le32_to_cpu(cmd->identify.cns)) { switch (le32_to_cpu(cmd->identify.cns)) {
case 0x00: case NVME_ID_CNS_NS:
req->execute = nvmet_execute_identify_ns; req->execute = nvmet_execute_identify_ns;
return 0; return 0;
case 0x01: case NVME_ID_CNS_CTRL:
req->execute = nvmet_execute_identify_ctrl; req->execute = nvmet_execute_identify_ctrl;
return 0; return 0;
case 0x02: case NVME_ID_CNS_NS_ACTIVE_LIST:
req->execute = nvmet_execute_identify_nslist; req->execute = nvmet_execute_identify_nslist;
return 0; return 0;
} }
......
...@@ -882,7 +882,7 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn, ...@@ -882,7 +882,7 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
if (!subsys) if (!subsys)
return NULL; return NULL;
subsys->ver = (1 << 16) | (2 << 8) | 1; /* NVMe 1.2.1 */ subsys->ver = NVME_VS(1, 2, 1); /* NVMe 1.2.1 */
switch (type) { switch (type) {
case NVME_NQN_NVME: case NVME_NQN_NVME:
......
...@@ -54,7 +54,7 @@ static void nvmet_format_discovery_entry(struct nvmf_disc_rsp_page_hdr *hdr, ...@@ -54,7 +54,7 @@ static void nvmet_format_discovery_entry(struct nvmf_disc_rsp_page_hdr *hdr,
/* we support only dynamic controllers */ /* we support only dynamic controllers */
e->cntlid = cpu_to_le16(NVME_CNTLID_DYNAMIC); e->cntlid = cpu_to_le16(NVME_CNTLID_DYNAMIC);
e->asqsz = cpu_to_le16(NVMF_AQ_DEPTH); e->asqsz = cpu_to_le16(NVMF_AQ_DEPTH);
e->nqntype = type; e->subtype = type;
memcpy(e->trsvcid, port->disc_addr.trsvcid, NVMF_TRSVCID_SIZE); memcpy(e->trsvcid, port->disc_addr.trsvcid, NVMF_TRSVCID_SIZE);
memcpy(e->traddr, port->disc_addr.traddr, NVMF_TRADDR_SIZE); memcpy(e->traddr, port->disc_addr.traddr, NVMF_TRADDR_SIZE);
memcpy(e->tsas.common, port->disc_addr.tsas.common, NVMF_TSAS_SIZE); memcpy(e->tsas.common, port->disc_addr.tsas.common, NVMF_TSAS_SIZE);
...@@ -187,7 +187,7 @@ int nvmet_parse_discovery_cmd(struct nvmet_req *req) ...@@ -187,7 +187,7 @@ int nvmet_parse_discovery_cmd(struct nvmet_req *req)
case nvme_admin_identify: case nvme_admin_identify:
req->data_len = 4096; req->data_len = 4096;
switch (le32_to_cpu(cmd->identify.cns)) { switch (le32_to_cpu(cmd->identify.cns)) {
case 0x01: case NVME_ID_CNS_CTRL:
req->execute = req->execute =
nvmet_execute_identify_disc_ctrl; nvmet_execute_identify_disc_ctrl;
return 0; return 0;
......
...@@ -16,7 +16,6 @@ ...@@ -16,7 +16,6 @@
#define _LINUX_NVME_H #define _LINUX_NVME_H
#include <linux/types.h> #include <linux/types.h>
#include <linux/uuid.h>
/* NQN names in commands fields specified one size */ /* NQN names in commands fields specified one size */
#define NVMF_NQN_FIELD_LEN 256 #define NVMF_NQN_FIELD_LEN 256
...@@ -182,7 +181,7 @@ struct nvme_id_ctrl { ...@@ -182,7 +181,7 @@ struct nvme_id_ctrl {
char fr[8]; char fr[8];
__u8 rab; __u8 rab;
__u8 ieee[3]; __u8 ieee[3];
__u8 mic; __u8 cmic;
__u8 mdts; __u8 mdts;
__le16 cntlid; __le16 cntlid;
__le32 ver; __le32 ver;
...@@ -202,7 +201,13 @@ struct nvme_id_ctrl { ...@@ -202,7 +201,13 @@ struct nvme_id_ctrl {
__u8 apsta; __u8 apsta;
__le16 wctemp; __le16 wctemp;
__le16 cctemp; __le16 cctemp;
__u8 rsvd270[50]; __le16 mtfa;
__le32 hmpre;
__le32 hmmin;
__u8 tnvmcap[16];
__u8 unvmcap[16];
__le32 rpmbs;
__u8 rsvd316[4];
__le16 kas; __le16 kas;
__u8 rsvd322[190]; __u8 rsvd322[190];
__u8 sqes; __u8 sqes;
...@@ -267,7 +272,7 @@ struct nvme_id_ns { ...@@ -267,7 +272,7 @@ struct nvme_id_ns {
__le16 nabo; __le16 nabo;
__le16 nabspf; __le16 nabspf;
__u16 rsvd46; __u16 rsvd46;
__le64 nvmcap[2]; __u8 nvmcap[16];
__u8 rsvd64[40]; __u8 rsvd64[40];
__u8 nguid[16]; __u8 nguid[16];
__u8 eui64[8]; __u8 eui64[8];
...@@ -276,6 +281,16 @@ struct nvme_id_ns { ...@@ -276,6 +281,16 @@ struct nvme_id_ns {
__u8 vs[3712]; __u8 vs[3712];
}; };
enum {
NVME_ID_CNS_NS = 0x00,
NVME_ID_CNS_CTRL = 0x01,
NVME_ID_CNS_NS_ACTIVE_LIST = 0x02,
NVME_ID_CNS_NS_PRESENT_LIST = 0x10,
NVME_ID_CNS_NS_PRESENT = 0x11,
NVME_ID_CNS_CTRL_NS_LIST = 0x12,
NVME_ID_CNS_CTRL_LIST = 0x13,
};
enum { enum {
NVME_NS_FEAT_THIN = 1 << 0, NVME_NS_FEAT_THIN = 1 << 0,
NVME_NS_FLBAS_LBA_MASK = 0xf, NVME_NS_FLBAS_LBA_MASK = 0xf,
...@@ -556,8 +571,10 @@ enum nvme_admin_opcode { ...@@ -556,8 +571,10 @@ enum nvme_admin_opcode {
nvme_admin_set_features = 0x09, nvme_admin_set_features = 0x09,
nvme_admin_get_features = 0x0a, nvme_admin_get_features = 0x0a,
nvme_admin_async_event = 0x0c, nvme_admin_async_event = 0x0c,
nvme_admin_ns_mgmt = 0x0d,
nvme_admin_activate_fw = 0x10, nvme_admin_activate_fw = 0x10,
nvme_admin_download_fw = 0x11, nvme_admin_download_fw = 0x11,
nvme_admin_ns_attach = 0x15,
nvme_admin_keep_alive = 0x18, nvme_admin_keep_alive = 0x18,
nvme_admin_format_nvm = 0x80, nvme_admin_format_nvm = 0x80,
nvme_admin_security_send = 0x81, nvme_admin_security_send = 0x81,
...@@ -583,6 +600,7 @@ enum { ...@@ -583,6 +600,7 @@ enum {
NVME_FEAT_WRITE_ATOMIC = 0x0a, NVME_FEAT_WRITE_ATOMIC = 0x0a,
NVME_FEAT_ASYNC_EVENT = 0x0b, NVME_FEAT_ASYNC_EVENT = 0x0b,
NVME_FEAT_AUTO_PST = 0x0c, NVME_FEAT_AUTO_PST = 0x0c,
NVME_FEAT_HOST_MEM_BUF = 0x0d,
NVME_FEAT_KATO = 0x0f, NVME_FEAT_KATO = 0x0f,
NVME_FEAT_SW_PROGRESS = 0x80, NVME_FEAT_SW_PROGRESS = 0x80,
NVME_FEAT_HOST_ID = 0x81, NVME_FEAT_HOST_ID = 0x81,
...@@ -745,7 +763,7 @@ struct nvmf_common_command { ...@@ -745,7 +763,7 @@ struct nvmf_common_command {
struct nvmf_disc_rsp_page_entry { struct nvmf_disc_rsp_page_entry {
__u8 trtype; __u8 trtype;
__u8 adrfam; __u8 adrfam;
__u8 nqntype; __u8 subtype;
__u8 treq; __u8 treq;
__le16 portid; __le16 portid;
__le16 cntlid; __le16 cntlid;
...@@ -794,7 +812,7 @@ struct nvmf_connect_command { ...@@ -794,7 +812,7 @@ struct nvmf_connect_command {
}; };
struct nvmf_connect_data { struct nvmf_connect_data {
uuid_be hostid; __u8 hostid[16];
__le16 cntlid; __le16 cntlid;
char resv4[238]; char resv4[238];
char subsysnqn[NVMF_NQN_FIELD_LEN]; char subsysnqn[NVMF_NQN_FIELD_LEN];
...@@ -905,12 +923,23 @@ enum { ...@@ -905,12 +923,23 @@ enum {
NVME_SC_INVALID_VECTOR = 0x108, NVME_SC_INVALID_VECTOR = 0x108,
NVME_SC_INVALID_LOG_PAGE = 0x109, NVME_SC_INVALID_LOG_PAGE = 0x109,
NVME_SC_INVALID_FORMAT = 0x10a, NVME_SC_INVALID_FORMAT = 0x10a,
NVME_SC_FIRMWARE_NEEDS_RESET = 0x10b, NVME_SC_FW_NEEDS_CONV_RESET = 0x10b,
NVME_SC_INVALID_QUEUE = 0x10c, NVME_SC_INVALID_QUEUE = 0x10c,
NVME_SC_FEATURE_NOT_SAVEABLE = 0x10d, NVME_SC_FEATURE_NOT_SAVEABLE = 0x10d,
NVME_SC_FEATURE_NOT_CHANGEABLE = 0x10e, NVME_SC_FEATURE_NOT_CHANGEABLE = 0x10e,
NVME_SC_FEATURE_NOT_PER_NS = 0x10f, NVME_SC_FEATURE_NOT_PER_NS = 0x10f,
NVME_SC_FW_NEEDS_RESET_SUBSYS = 0x110, NVME_SC_FW_NEEDS_SUBSYS_RESET = 0x110,
NVME_SC_FW_NEEDS_RESET = 0x111,
NVME_SC_FW_NEEDS_MAX_TIME = 0x112,
NVME_SC_FW_ACIVATE_PROHIBITED = 0x113,
NVME_SC_OVERLAPPING_RANGE = 0x114,
NVME_SC_NS_INSUFFICENT_CAP = 0x115,
NVME_SC_NS_ID_UNAVAILABLE = 0x116,
NVME_SC_NS_ALREADY_ATTACHED = 0x118,
NVME_SC_NS_IS_PRIVATE = 0x119,
NVME_SC_NS_NOT_ATTACHED = 0x11a,
NVME_SC_THIN_PROV_NOT_SUPP = 0x11b,
NVME_SC_CTRL_LIST_INVALID = 0x11c,
/* /*
* I/O Command Set Specific - NVM commands: * I/O Command Set Specific - NVM commands:
...@@ -941,6 +970,7 @@ enum { ...@@ -941,6 +970,7 @@ enum {
NVME_SC_REFTAG_CHECK = 0x284, NVME_SC_REFTAG_CHECK = 0x284,
NVME_SC_COMPARE_FAILED = 0x285, NVME_SC_COMPARE_FAILED = 0x285,
NVME_SC_ACCESS_DENIED = 0x286, NVME_SC_ACCESS_DENIED = 0x286,
NVME_SC_UNWRITTEN_BLOCK = 0x287,
NVME_SC_DNR = 0x4000, NVME_SC_DNR = 0x4000,
}; };
...@@ -960,6 +990,7 @@ struct nvme_completion { ...@@ -960,6 +990,7 @@ struct nvme_completion {
__le16 status; /* did the command fail, and if so, why? */ __le16 status; /* did the command fail, and if so, why? */
}; };
#define NVME_VS(major, minor) (((major) << 16) | ((minor) << 8)) #define NVME_VS(major, minor, tertiary) \
(((major) << 16) | ((minor) << 8) | (tertiary))
#endif /* _LINUX_NVME_H */ #endif /* _LINUX_NVME_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment