Commit b0b6e2c9 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'block-6.1-2022-11-11' of git://git.kernel.dk/linux

Pull block fixes from Jens Axboe:

 - NVMe pull request via Christoph:
        - Quiet user passthrough command errors (Keith Busch)
        - Fix memory leak in nvmet_subsys_attr_model_store_locked
        - Fix a memory leak in nvmet-auth (Sagi Grimberg)

 - Fix a potential NULL point deref in bfq (Yu)

 - Allocate command/response buffers separately for DMA for sed-opal,
   rather than rely on embedded alignment (Serge)

* tag 'block-6.1-2022-11-11' of git://git.kernel.dk/linux:
  nvmet: fix a memory leak
  nvmet: fix memory leak in nvmet_subsys_attr_model_store_locked
  nvme: quiet user passthrough command errors
  block: sed-opal: kmalloc the cmd/resp buffers
  block, bfq: fix null pointer dereference in bfq_bio_bfqg()
parents 4e6b2b2e df24560d
...@@ -610,6 +610,10 @@ struct bfq_group *bfq_bio_bfqg(struct bfq_data *bfqd, struct bio *bio) ...@@ -610,6 +610,10 @@ struct bfq_group *bfq_bio_bfqg(struct bfq_data *bfqd, struct bio *bio)
struct bfq_group *bfqg; struct bfq_group *bfqg;
while (blkg) { while (blkg) {
if (!blkg->online) {
blkg = blkg->parent;
continue;
}
bfqg = blkg_to_bfqg(blkg); bfqg = blkg_to_bfqg(blkg);
if (bfqg->online) { if (bfqg->online) {
bio_associate_blkg_from_css(bio, &blkg->blkcg->css); bio_associate_blkg_from_css(bio, &blkg->blkcg->css);
......
...@@ -87,8 +87,8 @@ struct opal_dev { ...@@ -87,8 +87,8 @@ struct opal_dev {
u64 lowest_lba; u64 lowest_lba;
size_t pos; size_t pos;
u8 cmd[IO_BUFFER_LENGTH]; u8 *cmd;
u8 resp[IO_BUFFER_LENGTH]; u8 *resp;
struct parsed_resp parsed; struct parsed_resp parsed;
size_t prev_d_len; size_t prev_d_len;
...@@ -2175,6 +2175,8 @@ void free_opal_dev(struct opal_dev *dev) ...@@ -2175,6 +2175,8 @@ void free_opal_dev(struct opal_dev *dev)
return; return;
clean_opal_dev(dev); clean_opal_dev(dev);
kfree(dev->resp);
kfree(dev->cmd);
kfree(dev); kfree(dev);
} }
EXPORT_SYMBOL(free_opal_dev); EXPORT_SYMBOL(free_opal_dev);
...@@ -2187,6 +2189,18 @@ struct opal_dev *init_opal_dev(void *data, sec_send_recv *send_recv) ...@@ -2187,6 +2189,18 @@ struct opal_dev *init_opal_dev(void *data, sec_send_recv *send_recv)
if (!dev) if (!dev)
return NULL; return NULL;
/*
* Presumably DMA-able buffers must be cache-aligned. Kmalloc makes
* sure the allocated buffer is DMA-safe in that regard.
*/
dev->cmd = kmalloc(IO_BUFFER_LENGTH, GFP_KERNEL);
if (!dev->cmd)
goto err_free_dev;
dev->resp = kmalloc(IO_BUFFER_LENGTH, GFP_KERNEL);
if (!dev->resp)
goto err_free_cmd;
INIT_LIST_HEAD(&dev->unlk_lst); INIT_LIST_HEAD(&dev->unlk_lst);
mutex_init(&dev->dev_lock); mutex_init(&dev->dev_lock);
dev->flags = 0; dev->flags = 0;
...@@ -2194,11 +2208,21 @@ struct opal_dev *init_opal_dev(void *data, sec_send_recv *send_recv) ...@@ -2194,11 +2208,21 @@ struct opal_dev *init_opal_dev(void *data, sec_send_recv *send_recv)
dev->send_recv = send_recv; dev->send_recv = send_recv;
if (check_opal_support(dev) != 0) { if (check_opal_support(dev) != 0) {
pr_debug("Opal is not supported on this device\n"); pr_debug("Opal is not supported on this device\n");
kfree(dev); goto err_free_resp;
return NULL;
} }
return dev; return dev;
err_free_resp:
kfree(dev->resp);
err_free_cmd:
kfree(dev->cmd);
err_free_dev:
kfree(dev);
return NULL;
} }
EXPORT_SYMBOL(init_opal_dev); EXPORT_SYMBOL(init_opal_dev);
......
...@@ -675,6 +675,7 @@ void nvme_init_request(struct request *req, struct nvme_command *cmd) ...@@ -675,6 +675,7 @@ void nvme_init_request(struct request *req, struct nvme_command *cmd)
if (req->mq_hctx->type == HCTX_TYPE_POLL) if (req->mq_hctx->type == HCTX_TYPE_POLL)
req->cmd_flags |= REQ_POLLED; req->cmd_flags |= REQ_POLLED;
nvme_clear_nvme_request(req); nvme_clear_nvme_request(req);
req->rq_flags |= RQF_QUIET;
memcpy(nvme_req(req)->cmd, cmd, sizeof(*cmd)); memcpy(nvme_req(req)->cmd, cmd, sizeof(*cmd));
} }
EXPORT_SYMBOL_GPL(nvme_init_request); EXPORT_SYMBOL_GPL(nvme_init_request);
...@@ -1037,7 +1038,6 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, ...@@ -1037,7 +1038,6 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
goto out; goto out;
} }
req->rq_flags |= RQF_QUIET;
ret = nvme_execute_rq(req, at_head); ret = nvme_execute_rq(req, at_head);
if (result && ret >= 0) if (result && ret >= 0)
*result = nvme_req(req)->result; *result = nvme_req(req)->result;
...@@ -1227,7 +1227,6 @@ static void nvme_keep_alive_work(struct work_struct *work) ...@@ -1227,7 +1227,6 @@ static void nvme_keep_alive_work(struct work_struct *work)
rq->timeout = ctrl->kato * HZ; rq->timeout = ctrl->kato * HZ;
rq->end_io = nvme_keep_alive_end_io; rq->end_io = nvme_keep_alive_end_io;
rq->end_io_data = ctrl; rq->end_io_data = ctrl;
rq->rq_flags |= RQF_QUIET;
blk_execute_rq_nowait(rq, false); blk_execute_rq_nowait(rq, false);
} }
......
...@@ -1436,7 +1436,6 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req) ...@@ -1436,7 +1436,6 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
abort_req->end_io = abort_endio; abort_req->end_io = abort_endio;
abort_req->end_io_data = NULL; abort_req->end_io_data = NULL;
abort_req->rq_flags |= RQF_QUIET;
blk_execute_rq_nowait(abort_req, false); blk_execute_rq_nowait(abort_req, false);
/* /*
...@@ -2490,7 +2489,6 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode) ...@@ -2490,7 +2489,6 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
req->end_io_data = nvmeq; req->end_io_data = nvmeq;
init_completion(&nvmeq->delete_done); init_completion(&nvmeq->delete_done);
req->rq_flags |= RQF_QUIET;
blk_execute_rq_nowait(req, false); blk_execute_rq_nowait(req, false);
return 0; return 0;
} }
......
...@@ -1215,6 +1215,7 @@ static ssize_t nvmet_subsys_attr_model_store_locked(struct nvmet_subsys *subsys, ...@@ -1215,6 +1215,7 @@ static ssize_t nvmet_subsys_attr_model_store_locked(struct nvmet_subsys *subsys,
const char *page, size_t count) const char *page, size_t count)
{ {
int pos = 0, len; int pos = 0, len;
char *val;
if (subsys->subsys_discovered) { if (subsys->subsys_discovered) {
pr_err("Can't set model number. %s is already assigned\n", pr_err("Can't set model number. %s is already assigned\n",
...@@ -1237,9 +1238,11 @@ static ssize_t nvmet_subsys_attr_model_store_locked(struct nvmet_subsys *subsys, ...@@ -1237,9 +1238,11 @@ static ssize_t nvmet_subsys_attr_model_store_locked(struct nvmet_subsys *subsys,
return -EINVAL; return -EINVAL;
} }
subsys->model_number = kmemdup_nul(page, len, GFP_KERNEL); val = kmemdup_nul(page, len, GFP_KERNEL);
if (!subsys->model_number) if (!val)
return -ENOMEM; return -ENOMEM;
kfree(subsys->model_number);
subsys->model_number = val;
return count; return count;
} }
...@@ -1836,6 +1839,7 @@ static void nvmet_host_release(struct config_item *item) ...@@ -1836,6 +1839,7 @@ static void nvmet_host_release(struct config_item *item)
#ifdef CONFIG_NVME_TARGET_AUTH #ifdef CONFIG_NVME_TARGET_AUTH
kfree(host->dhchap_secret); kfree(host->dhchap_secret);
kfree(host->dhchap_ctrl_secret);
#endif #endif
kfree(host); kfree(host);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment