Commit 4fd84bc9 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'block-5.10-2020-11-20' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:

 - NVMe pull request from Christoph:
      - Doorbell Buffer freeing fix (Minwoo Im)
      - CSE log leak fix (Keith Busch)

 - blk-cgroup hd_struct leak fix (Christoph)

 - Flush request state fix (Ming)

 - dasd NULL deref fix (Stefan)

* tag 'block-5.10-2020-11-20' of git://git.kernel.dk/linux-block:
  s390/dasd: fix null pointer dereference for ERP requests
  blk-cgroup: fix a hd_struct leak in blkcg_fill_root_iostats
  nvme: fix memory leak freeing command effects
  nvme: directly cache command effects log
  nvme: free sq/cq dbbuf pointers when dbbuf set fails
  block: mark flush request as IDLE when it is really finished
parents fa5fca78 45f703a0
......@@ -849,6 +849,7 @@ static void blkcg_fill_root_iostats(void)
blkg_iostat_set(&blkg->iostat.cur, &tmp);
u64_stats_update_end(&blkg->iostat.sync);
}
disk_put_part(part);
}
}
......
......@@ -225,13 +225,18 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
/* release the tag's ownership to the req cloned from */
spin_lock_irqsave(&fq->mq_flush_lock, flags);
WRITE_ONCE(flush_rq->state, MQ_RQ_IDLE);
if (!refcount_dec_and_test(&flush_rq->ref)) {
fq->rq_status = error;
spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
return;
}
/*
* Flush request has to be marked as IDLE when it is really ended
* because its .end_io() is called from timeout code path too for
* avoiding use-after-free.
*/
WRITE_ONCE(flush_rq->state, MQ_RQ_IDLE);
if (fq->rq_status != BLK_STS_OK)
error = fq->rq_status;
......
......@@ -2929,7 +2929,7 @@ int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi,
static int nvme_get_effects_log(struct nvme_ctrl *ctrl, u8 csi,
struct nvme_effects_log **log)
{
struct nvme_cel *cel = xa_load(&ctrl->cels, csi);
struct nvme_effects_log *cel = xa_load(&ctrl->cels, csi);
int ret;
if (cel)
......@@ -2940,16 +2940,15 @@ static int nvme_get_effects_log(struct nvme_ctrl *ctrl, u8 csi,
return -ENOMEM;
ret = nvme_get_log(ctrl, 0x00, NVME_LOG_CMD_EFFECTS, 0, csi,
&cel->log, sizeof(cel->log), 0);
cel, sizeof(*cel), 0);
if (ret) {
kfree(cel);
return ret;
}
cel->csi = csi;
xa_store(&ctrl->cels, cel->csi, cel, GFP_KERNEL);
xa_store(&ctrl->cels, csi, cel, GFP_KERNEL);
out:
*log = &cel->log;
*log = cel;
return 0;
}
......@@ -4374,6 +4373,19 @@ void nvme_uninit_ctrl(struct nvme_ctrl *ctrl)
}
EXPORT_SYMBOL_GPL(nvme_uninit_ctrl);
static void nvme_free_cels(struct nvme_ctrl *ctrl)
{
struct nvme_effects_log *cel;
unsigned long i;
xa_for_each (&ctrl->cels, i, cel) {
xa_erase(&ctrl->cels, i);
kfree(cel);
}
xa_destroy(&ctrl->cels);
}
static void nvme_free_ctrl(struct device *dev)
{
struct nvme_ctrl *ctrl =
......@@ -4383,8 +4395,7 @@ static void nvme_free_ctrl(struct device *dev)
if (!subsys || ctrl->instance != subsys->instance)
ida_simple_remove(&nvme_instance_ida, ctrl->instance);
xa_destroy(&ctrl->cels);
nvme_free_cels(ctrl);
nvme_mpath_uninit(ctrl);
__free_page(ctrl->discard_page);
......
......@@ -226,12 +226,6 @@ struct nvme_fault_inject {
#endif
};
struct nvme_cel {
struct list_head entry;
struct nvme_effects_log log;
u8 csi;
};
struct nvme_ctrl {
bool comp_seen;
enum nvme_ctrl_state state;
......
......@@ -292,9 +292,21 @@ static void nvme_dbbuf_init(struct nvme_dev *dev,
nvmeq->dbbuf_cq_ei = &dev->dbbuf_eis[cq_idx(qid, dev->db_stride)];
}
static void nvme_dbbuf_free(struct nvme_queue *nvmeq)
{
if (!nvmeq->qid)
return;
nvmeq->dbbuf_sq_db = NULL;
nvmeq->dbbuf_cq_db = NULL;
nvmeq->dbbuf_sq_ei = NULL;
nvmeq->dbbuf_cq_ei = NULL;
}
static void nvme_dbbuf_set(struct nvme_dev *dev)
{
struct nvme_command c;
unsigned int i;
if (!dev->dbbuf_dbs)
return;
......@@ -308,6 +320,9 @@ static void nvme_dbbuf_set(struct nvme_dev *dev)
dev_warn(dev->ctrl.device, "unable to set dbbuf\n");
/* Free memory and continue on */
nvme_dbbuf_dma_free(dev);
for (i = 1; i <= dev->online_queues; i++)
nvme_dbbuf_free(&dev->queues[i]);
}
}
......
......@@ -2980,6 +2980,12 @@ static int _dasd_requeue_request(struct dasd_ccw_req *cqr)
if (!block)
return -EINVAL;
/*
* If the request is an ERP request there is nothing to requeue.
* This will be done with the remaining original request.
*/
if (cqr->refers)
return 0;
spin_lock_irq(&cqr->dq->lock);
req = (struct request *) cqr->callback_data;
blk_mq_requeue_request(req, false);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment