Commit 0151ef00 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:
 "A small set of fixes for -rc2 - two fixes for BFQ, documentation and
  code, and a removal of an unused variable in nbd. Outside of that, a
  small collection of fixes from the usual crew on the nvme side"

* 'for-linus' of git://git.kernel.dk/linux-block:
  nvmet: don't report 0-bytes in serial number
  nvmet: preserve controller serial number between reboots
  nvmet: Move serial number from controller to subsystem
  nvmet: prefix version configfs file with attr
  nvme-pci: Fix an error handling path in 'nvme_probe()'
  nvme-pci: Remove nvme_setup_prps BUG_ON
  nvme-pci: add another device ID with stripe quirk
  nvmet-fc: fix byte swapping in nvmet_fc_ls_create_association
  nvme: fix byte swapping in the streams code
  nbd: kill unused ret in recv_work
  bfq: dispatch request to prevent queue stalling after the request completion
  bfq: fix typos in comments about B-WF2Q+ algorithm
parents bb236dbe 42de82a8
...@@ -4299,6 +4299,9 @@ static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd) ...@@ -4299,6 +4299,9 @@ static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd)
bfq_bfqq_expire(bfqd, bfqq, false, bfq_bfqq_expire(bfqd, bfqq, false,
BFQQE_NO_MORE_REQUESTS); BFQQE_NO_MORE_REQUESTS);
} }
if (!bfqd->rq_in_driver)
bfq_schedule_dispatch(bfqd);
} }
static void bfq_put_rq_priv_body(struct bfq_queue *bfqq) static void bfq_put_rq_priv_body(struct bfq_queue *bfqq)
......
...@@ -52,7 +52,7 @@ struct bfq_entity; ...@@ -52,7 +52,7 @@ struct bfq_entity;
struct bfq_service_tree { struct bfq_service_tree {
/* tree for active entities (i.e., those backlogged) */ /* tree for active entities (i.e., those backlogged) */
struct rb_root active; struct rb_root active;
/* tree for idle entities (i.e., not backlogged, with V <= F_i)*/ /* tree for idle entities (i.e., not backlogged, with V < F_i)*/
struct rb_root idle; struct rb_root idle;
/* idle entity with minimum F_i */ /* idle entity with minimum F_i */
......
...@@ -1297,7 +1297,7 @@ static void bfq_update_vtime(struct bfq_service_tree *st, u64 new_value) ...@@ -1297,7 +1297,7 @@ static void bfq_update_vtime(struct bfq_service_tree *st, u64 new_value)
* *
* This function searches the first schedulable entity, starting from the * This function searches the first schedulable entity, starting from the
* root of the tree and going on the left every time on this side there is * root of the tree and going on the left every time on this side there is
* a subtree with at least one eligible (start >= vtime) entity. The path on * a subtree with at least one eligible (start <= vtime) entity. The path on
* the right is followed only if a) the left subtree contains no eligible * the right is followed only if a) the left subtree contains no eligible
* entities and b) no eligible entity has been found yet. * entities and b) no eligible entity has been found yet.
*/ */
......
...@@ -626,7 +626,6 @@ static void recv_work(struct work_struct *work) ...@@ -626,7 +626,6 @@ static void recv_work(struct work_struct *work)
struct nbd_device *nbd = args->nbd; struct nbd_device *nbd = args->nbd;
struct nbd_config *config = nbd->config; struct nbd_config *config = nbd->config;
struct nbd_cmd *cmd; struct nbd_cmd *cmd;
int ret = 0;
while (1) { while (1) {
cmd = nbd_read_stat(nbd, args->index); cmd = nbd_read_stat(nbd, args->index);
...@@ -636,7 +635,6 @@ static void recv_work(struct work_struct *work) ...@@ -636,7 +635,6 @@ static void recv_work(struct work_struct *work)
mutex_lock(&nsock->tx_lock); mutex_lock(&nsock->tx_lock);
nbd_mark_nsock_dead(nbd, nsock, 1); nbd_mark_nsock_dead(nbd, nsock, 1);
mutex_unlock(&nsock->tx_lock); mutex_unlock(&nsock->tx_lock);
ret = PTR_ERR(cmd);
break; break;
} }
......
...@@ -336,7 +336,7 @@ static int nvme_get_stream_params(struct nvme_ctrl *ctrl, ...@@ -336,7 +336,7 @@ static int nvme_get_stream_params(struct nvme_ctrl *ctrl,
c.directive.opcode = nvme_admin_directive_recv; c.directive.opcode = nvme_admin_directive_recv;
c.directive.nsid = cpu_to_le32(nsid); c.directive.nsid = cpu_to_le32(nsid);
c.directive.numd = sizeof(*s); c.directive.numd = cpu_to_le32(sizeof(*s));
c.directive.doper = NVME_DIR_RCV_ST_OP_PARAM; c.directive.doper = NVME_DIR_RCV_ST_OP_PARAM;
c.directive.dtype = NVME_DIR_STREAMS; c.directive.dtype = NVME_DIR_STREAMS;
......
...@@ -539,7 +539,7 @@ static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi) ...@@ -539,7 +539,7 @@ static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi)
} }
#endif #endif
static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req) static blk_status_t nvme_setup_prps(struct nvme_dev *dev, struct request *req)
{ {
struct nvme_iod *iod = blk_mq_rq_to_pdu(req); struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
struct dma_pool *pool; struct dma_pool *pool;
...@@ -556,7 +556,7 @@ static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req) ...@@ -556,7 +556,7 @@ static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req)
length -= (page_size - offset); length -= (page_size - offset);
if (length <= 0) if (length <= 0)
return true; return BLK_STS_OK;
dma_len -= (page_size - offset); dma_len -= (page_size - offset);
if (dma_len) { if (dma_len) {
...@@ -569,7 +569,7 @@ static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req) ...@@ -569,7 +569,7 @@ static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req)
if (length <= page_size) { if (length <= page_size) {
iod->first_dma = dma_addr; iod->first_dma = dma_addr;
return true; return BLK_STS_OK;
} }
nprps = DIV_ROUND_UP(length, page_size); nprps = DIV_ROUND_UP(length, page_size);
...@@ -585,7 +585,7 @@ static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req) ...@@ -585,7 +585,7 @@ static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req)
if (!prp_list) { if (!prp_list) {
iod->first_dma = dma_addr; iod->first_dma = dma_addr;
iod->npages = -1; iod->npages = -1;
return false; return BLK_STS_RESOURCE;
} }
list[0] = prp_list; list[0] = prp_list;
iod->first_dma = prp_dma; iod->first_dma = prp_dma;
...@@ -595,7 +595,7 @@ static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req) ...@@ -595,7 +595,7 @@ static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req)
__le64 *old_prp_list = prp_list; __le64 *old_prp_list = prp_list;
prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma); prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
if (!prp_list) if (!prp_list)
return false; return BLK_STS_RESOURCE;
list[iod->npages++] = prp_list; list[iod->npages++] = prp_list;
prp_list[0] = old_prp_list[i - 1]; prp_list[0] = old_prp_list[i - 1];
old_prp_list[i - 1] = cpu_to_le64(prp_dma); old_prp_list[i - 1] = cpu_to_le64(prp_dma);
...@@ -609,13 +609,29 @@ static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req) ...@@ -609,13 +609,29 @@ static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req)
break; break;
if (dma_len > 0) if (dma_len > 0)
continue; continue;
BUG_ON(dma_len < 0); if (unlikely(dma_len < 0))
goto bad_sgl;
sg = sg_next(sg); sg = sg_next(sg);
dma_addr = sg_dma_address(sg); dma_addr = sg_dma_address(sg);
dma_len = sg_dma_len(sg); dma_len = sg_dma_len(sg);
} }
return true; return BLK_STS_OK;
bad_sgl:
if (WARN_ONCE(1, "Invalid SGL for payload:%d nents:%d\n",
blk_rq_payload_bytes(req), iod->nents)) {
for_each_sg(iod->sg, sg, iod->nents, i) {
dma_addr_t phys = sg_phys(sg);
pr_warn("sg[%d] phys_addr:%pad offset:%d length:%d "
"dma_address:%pad dma_length:%d\n", i, &phys,
sg->offset, sg->length,
&sg_dma_address(sg),
sg_dma_len(sg));
}
}
return BLK_STS_IOERR;
} }
static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req, static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
...@@ -637,7 +653,8 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req, ...@@ -637,7 +653,8 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
DMA_ATTR_NO_WARN)) DMA_ATTR_NO_WARN))
goto out; goto out;
if (!nvme_setup_prps(dev, req)) ret = nvme_setup_prps(dev, req);
if (ret != BLK_STS_OK)
goto out_unmap; goto out_unmap;
ret = BLK_STS_IOERR; ret = BLK_STS_IOERR;
...@@ -2282,7 +2299,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -2282,7 +2299,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
result = nvme_dev_map(dev); result = nvme_dev_map(dev);
if (result) if (result)
goto free; goto put_pci;
INIT_WORK(&dev->ctrl.reset_work, nvme_reset_work); INIT_WORK(&dev->ctrl.reset_work, nvme_reset_work);
INIT_WORK(&dev->remove_work, nvme_remove_dead_ctrl_work); INIT_WORK(&dev->remove_work, nvme_remove_dead_ctrl_work);
...@@ -2291,7 +2308,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -2291,7 +2308,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
result = nvme_setup_prp_pools(dev); result = nvme_setup_prp_pools(dev);
if (result) if (result)
goto put_pci; goto unmap;
quirks |= check_dell_samsung_bug(pdev); quirks |= check_dell_samsung_bug(pdev);
...@@ -2308,9 +2325,10 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -2308,9 +2325,10 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
release_pools: release_pools:
nvme_release_prp_pools(dev); nvme_release_prp_pools(dev);
unmap:
nvme_dev_unmap(dev);
put_pci: put_pci:
put_device(dev->dev); put_device(dev->dev);
nvme_dev_unmap(dev);
free: free:
kfree(dev->queues); kfree(dev->queues);
kfree(dev); kfree(dev);
...@@ -2466,6 +2484,9 @@ static const struct pci_device_id nvme_id_table[] = { ...@@ -2466,6 +2484,9 @@ static const struct pci_device_id nvme_id_table[] = {
{ PCI_VDEVICE(INTEL, 0x0a54), { PCI_VDEVICE(INTEL, 0x0a54),
.driver_data = NVME_QUIRK_STRIPE_SIZE | .driver_data = NVME_QUIRK_STRIPE_SIZE |
NVME_QUIRK_DEALLOCATE_ZEROES, }, NVME_QUIRK_DEALLOCATE_ZEROES, },
{ PCI_VDEVICE(INTEL, 0x0a55),
.driver_data = NVME_QUIRK_STRIPE_SIZE |
NVME_QUIRK_DEALLOCATE_ZEROES, },
{ PCI_VDEVICE(INTEL, 0xf1a5), /* Intel 600P/P3100 */ { PCI_VDEVICE(INTEL, 0xf1a5), /* Intel 600P/P3100 */
.driver_data = NVME_QUIRK_NO_DEEPEST_PS }, .driver_data = NVME_QUIRK_NO_DEEPEST_PS },
{ PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */ { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */
......
...@@ -168,11 +168,21 @@ static void nvmet_execute_get_log_page(struct nvmet_req *req) ...@@ -168,11 +168,21 @@ static void nvmet_execute_get_log_page(struct nvmet_req *req)
nvmet_req_complete(req, status); nvmet_req_complete(req, status);
} }
static void copy_and_pad(char *dst, int dst_len, const char *src, int src_len)
{
int len = min(src_len, dst_len);
memcpy(dst, src, len);
if (dst_len > len)
memset(dst + len, ' ', dst_len - len);
}
static void nvmet_execute_identify_ctrl(struct nvmet_req *req) static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
{ {
struct nvmet_ctrl *ctrl = req->sq->ctrl; struct nvmet_ctrl *ctrl = req->sq->ctrl;
struct nvme_id_ctrl *id; struct nvme_id_ctrl *id;
u16 status = 0; u16 status = 0;
const char model[] = "Linux";
id = kzalloc(sizeof(*id), GFP_KERNEL); id = kzalloc(sizeof(*id), GFP_KERNEL);
if (!id) { if (!id) {
...@@ -184,8 +194,10 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req) ...@@ -184,8 +194,10 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
id->vid = 0; id->vid = 0;
id->ssvid = 0; id->ssvid = 0;
memset(id->sn, ' ', sizeof(id->sn)); bin2hex(id->sn, &ctrl->subsys->serial,
snprintf(id->sn, sizeof(id->sn), "%llx", ctrl->serial); min(sizeof(ctrl->subsys->serial), sizeof(id->sn) / 2));
copy_and_pad(id->mn, sizeof(id->mn), model, sizeof(model) - 1);
copy_and_pad(id->fr, sizeof(id->fr), UTS_RELEASE, strlen(UTS_RELEASE));
memset(id->mn, ' ', sizeof(id->mn)); memset(id->mn, ' ', sizeof(id->mn));
strncpy((char *)id->mn, "Linux", sizeof(id->mn)); strncpy((char *)id->mn, "Linux", sizeof(id->mn));
......
...@@ -650,7 +650,7 @@ static ssize_t nvmet_subsys_attr_allow_any_host_store(struct config_item *item, ...@@ -650,7 +650,7 @@ static ssize_t nvmet_subsys_attr_allow_any_host_store(struct config_item *item,
CONFIGFS_ATTR(nvmet_subsys_, attr_allow_any_host); CONFIGFS_ATTR(nvmet_subsys_, attr_allow_any_host);
static ssize_t nvmet_subsys_version_show(struct config_item *item, static ssize_t nvmet_subsys_attr_version_show(struct config_item *item,
char *page) char *page)
{ {
struct nvmet_subsys *subsys = to_subsys(item); struct nvmet_subsys *subsys = to_subsys(item);
...@@ -666,7 +666,7 @@ static ssize_t nvmet_subsys_version_show(struct config_item *item, ...@@ -666,7 +666,7 @@ static ssize_t nvmet_subsys_version_show(struct config_item *item,
(int)NVME_MINOR(subsys->ver)); (int)NVME_MINOR(subsys->ver));
} }
static ssize_t nvmet_subsys_version_store(struct config_item *item, static ssize_t nvmet_subsys_attr_version_store(struct config_item *item,
const char *page, size_t count) const char *page, size_t count)
{ {
struct nvmet_subsys *subsys = to_subsys(item); struct nvmet_subsys *subsys = to_subsys(item);
...@@ -684,11 +684,33 @@ static ssize_t nvmet_subsys_version_store(struct config_item *item, ...@@ -684,11 +684,33 @@ static ssize_t nvmet_subsys_version_store(struct config_item *item,
return count; return count;
} }
CONFIGFS_ATTR(nvmet_subsys_, version); CONFIGFS_ATTR(nvmet_subsys_, attr_version);
static ssize_t nvmet_subsys_attr_serial_show(struct config_item *item,
char *page)
{
struct nvmet_subsys *subsys = to_subsys(item);
return snprintf(page, PAGE_SIZE, "%llx\n", subsys->serial);
}
static ssize_t nvmet_subsys_attr_serial_store(struct config_item *item,
const char *page, size_t count)
{
struct nvmet_subsys *subsys = to_subsys(item);
down_write(&nvmet_config_sem);
sscanf(page, "%llx\n", &subsys->serial);
up_write(&nvmet_config_sem);
return count;
}
CONFIGFS_ATTR(nvmet_subsys_, attr_serial);
static struct configfs_attribute *nvmet_subsys_attrs[] = { static struct configfs_attribute *nvmet_subsys_attrs[] = {
&nvmet_subsys_attr_attr_allow_any_host, &nvmet_subsys_attr_attr_allow_any_host,
&nvmet_subsys_attr_version, &nvmet_subsys_attr_attr_version,
&nvmet_subsys_attr_attr_serial,
NULL, NULL,
}; };
......
...@@ -767,9 +767,6 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, ...@@ -767,9 +767,6 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE); memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE);
memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE); memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE);
/* generate a random serial number as our controllers are ephemeral: */
get_random_bytes(&ctrl->serial, sizeof(ctrl->serial));
kref_init(&ctrl->ref); kref_init(&ctrl->ref);
ctrl->subsys = subsys; ctrl->subsys = subsys;
...@@ -928,6 +925,8 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn, ...@@ -928,6 +925,8 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
return NULL; return NULL;
subsys->ver = NVME_VS(1, 3, 0); /* NVMe 1.3.0 */ subsys->ver = NVME_VS(1, 3, 0); /* NVMe 1.3.0 */
/* generate a random serial number as our controllers are ephemeral: */
get_random_bytes(&subsys->serial, sizeof(subsys->serial));
switch (type) { switch (type) {
case NVME_NQN_NVME: case NVME_NQN_NVME:
......
...@@ -1174,14 +1174,14 @@ nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport, ...@@ -1174,14 +1174,14 @@ nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport,
*/ */
if (iod->rqstdatalen < FCNVME_LSDESC_CRA_RQST_MINLEN) if (iod->rqstdatalen < FCNVME_LSDESC_CRA_RQST_MINLEN)
ret = VERR_CR_ASSOC_LEN; ret = VERR_CR_ASSOC_LEN;
else if (rqst->desc_list_len < else if (be32_to_cpu(rqst->desc_list_len) <
cpu_to_be32(FCNVME_LSDESC_CRA_RQST_MIN_LISTLEN)) FCNVME_LSDESC_CRA_RQST_MIN_LISTLEN)
ret = VERR_CR_ASSOC_RQST_LEN; ret = VERR_CR_ASSOC_RQST_LEN;
else if (rqst->assoc_cmd.desc_tag != else if (rqst->assoc_cmd.desc_tag !=
cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD)) cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD))
ret = VERR_CR_ASSOC_CMD; ret = VERR_CR_ASSOC_CMD;
else if (rqst->assoc_cmd.desc_len < else if (be32_to_cpu(rqst->assoc_cmd.desc_len) <
cpu_to_be32(FCNVME_LSDESC_CRA_CMD_DESC_MIN_DESCLEN)) FCNVME_LSDESC_CRA_CMD_DESC_MIN_DESCLEN)
ret = VERR_CR_ASSOC_CMD_LEN; ret = VERR_CR_ASSOC_CMD_LEN;
else if (!rqst->assoc_cmd.ersp_ratio || else if (!rqst->assoc_cmd.ersp_ratio ||
(be16_to_cpu(rqst->assoc_cmd.ersp_ratio) >= (be16_to_cpu(rqst->assoc_cmd.ersp_ratio) >=
......
...@@ -112,7 +112,6 @@ struct nvmet_ctrl { ...@@ -112,7 +112,6 @@ struct nvmet_ctrl {
struct mutex lock; struct mutex lock;
u64 cap; u64 cap;
u64 serial;
u32 cc; u32 cc;
u32 csts; u32 csts;
...@@ -152,6 +151,7 @@ struct nvmet_subsys { ...@@ -152,6 +151,7 @@ struct nvmet_subsys {
u16 max_qid; u16 max_qid;
u64 ver; u64 ver;
u64 serial;
char *subsysnqn; char *subsysnqn;
struct config_group group; struct config_group group;
......
...@@ -963,14 +963,14 @@ struct nvme_dbbuf { ...@@ -963,14 +963,14 @@ struct nvme_dbbuf {
}; };
struct streams_directive_params { struct streams_directive_params {
__u16 msl; __le16 msl;
__u16 nssa; __le16 nssa;
__u16 nsso; __le16 nsso;
__u8 rsvd[10]; __u8 rsvd[10];
__u32 sws; __le32 sws;
__u16 sgs; __le16 sgs;
__u16 nsa; __le16 nsa;
__u16 nso; __le16 nso;
__u8 rsvd2[6]; __u8 rsvd2[6];
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment