Commit e960f71a authored by Jens Axboe's avatar Jens Axboe

Merge branch 'nvme-5.1' of git://git.infradead.org/nvme into for-5.1/block

Pull 5.1 NVMe material from Christoph:

"Below is our current (small) queue of NVMe patches for Linux 5.1. We
 want the re-addition of the Write Zeroes support to be in linu-next for
 a few weeks as it caused some problems last time. The only other patch
 is a cleanup from Sagi."

* 'nvme-5.1' of git://git.infradead.org/nvme:
  nvme: remove the .stop_ctrl callout
  nvme: add support for the Write Zeroes command
parents bb94aea1 794a4cb3
...@@ -611,6 +611,22 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req, ...@@ -611,6 +611,22 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
return BLK_STS_OK; return BLK_STS_OK;
} }
static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns,
struct request *req, struct nvme_command *cmnd)
{
if (ns->ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES)
return nvme_setup_discard(ns, req, cmnd);
cmnd->write_zeroes.opcode = nvme_cmd_write_zeroes;
cmnd->write_zeroes.nsid = cpu_to_le32(ns->head->ns_id);
cmnd->write_zeroes.slba =
cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
cmnd->write_zeroes.length =
cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
cmnd->write_zeroes.control = 0;
return BLK_STS_OK;
}
static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns, static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
struct request *req, struct nvme_command *cmnd) struct request *req, struct nvme_command *cmnd)
{ {
...@@ -705,7 +721,8 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req, ...@@ -705,7 +721,8 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
nvme_setup_flush(ns, cmd); nvme_setup_flush(ns, cmd);
break; break;
case REQ_OP_WRITE_ZEROES: case REQ_OP_WRITE_ZEROES:
/* currently only aliased to deallocate for a few ctrls: */ ret = nvme_setup_write_zeroes(ns, req, cmd);
break;
case REQ_OP_DISCARD: case REQ_OP_DISCARD:
ret = nvme_setup_discard(ns, req, cmd); ret = nvme_setup_discard(ns, req, cmd);
break; break;
...@@ -1509,6 +1526,37 @@ static void nvme_config_discard(struct nvme_ns *ns) ...@@ -1509,6 +1526,37 @@ static void nvme_config_discard(struct nvme_ns *ns)
blk_queue_max_write_zeroes_sectors(queue, UINT_MAX); blk_queue_max_write_zeroes_sectors(queue, UINT_MAX);
} }
static inline void nvme_config_write_zeroes(struct nvme_ns *ns)
{
u32 max_sectors;
unsigned short bs = 1 << ns->lba_shift;
if (!(ns->ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES))
return;
/*
* Even though NVMe spec explicitly states that MDTS is not
* applicable to the write-zeroes:- "The restriction does not apply to
* commands that do not transfer data between the host and the
* controller (e.g., Write Uncorrectable ro Write Zeroes command).".
* In order to be more cautious use controller's max_hw_sectors value
* to configure the maximum sectors for the write-zeroes which is
* configured based on the controller's MDTS field in the
* nvme_init_identify() if available.
*/
if (ns->ctrl->max_hw_sectors == UINT_MAX)
max_sectors = ((u32)(USHRT_MAX + 1) * bs) >> 9;
else
max_sectors = ((u32)(ns->ctrl->max_hw_sectors + 1) * bs) >> 9;
blk_queue_max_write_zeroes_sectors(ns->queue, max_sectors);
}
static inline void nvme_ns_config_oncs(struct nvme_ns *ns)
{
nvme_config_discard(ns);
nvme_config_write_zeroes(ns);
}
static void nvme_report_ns_ids(struct nvme_ctrl *ctrl, unsigned int nsid, static void nvme_report_ns_ids(struct nvme_ctrl *ctrl, unsigned int nsid,
struct nvme_id_ns *id, struct nvme_ns_ids *ids) struct nvme_id_ns *id, struct nvme_ns_ids *ids)
{ {
...@@ -1562,7 +1610,7 @@ static void nvme_update_disk_info(struct gendisk *disk, ...@@ -1562,7 +1610,7 @@ static void nvme_update_disk_info(struct gendisk *disk,
capacity = 0; capacity = 0;
set_capacity(disk, capacity); set_capacity(disk, capacity);
nvme_config_discard(ns); nvme_ns_config_oncs(ns);
if (id->nsattr & (1 << 0)) if (id->nsattr & (1 << 0))
set_disk_ro(disk, true); set_disk_ro(disk, true);
...@@ -3591,8 +3639,6 @@ void nvme_stop_ctrl(struct nvme_ctrl *ctrl) ...@@ -3591,8 +3639,6 @@ void nvme_stop_ctrl(struct nvme_ctrl *ctrl)
nvme_stop_keep_alive(ctrl); nvme_stop_keep_alive(ctrl);
flush_work(&ctrl->async_event_work); flush_work(&ctrl->async_event_work);
cancel_work_sync(&ctrl->fw_act_work); cancel_work_sync(&ctrl->fw_act_work);
if (ctrl->ops->stop_ctrl)
ctrl->ops->stop_ctrl(ctrl);
} }
EXPORT_SYMBOL_GPL(nvme_stop_ctrl); EXPORT_SYMBOL_GPL(nvme_stop_ctrl);
......
...@@ -363,7 +363,6 @@ struct nvme_ctrl_ops { ...@@ -363,7 +363,6 @@ struct nvme_ctrl_ops {
void (*submit_async_event)(struct nvme_ctrl *ctrl); void (*submit_async_event)(struct nvme_ctrl *ctrl);
void (*delete_ctrl)(struct nvme_ctrl *ctrl); void (*delete_ctrl)(struct nvme_ctrl *ctrl);
int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size); int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size);
void (*stop_ctrl)(struct nvme_ctrl *ctrl);
}; };
#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
......
...@@ -942,14 +942,6 @@ static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl, ...@@ -942,14 +942,6 @@ static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
} }
} }
static void nvme_rdma_stop_ctrl(struct nvme_ctrl *nctrl)
{
struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
cancel_work_sync(&ctrl->err_work);
cancel_delayed_work_sync(&ctrl->reconnect_work);
}
static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl) static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl)
{ {
struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl); struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
...@@ -1854,6 +1846,9 @@ static const struct blk_mq_ops nvme_rdma_admin_mq_ops = { ...@@ -1854,6 +1846,9 @@ static const struct blk_mq_ops nvme_rdma_admin_mq_ops = {
static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown) static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown)
{ {
cancel_work_sync(&ctrl->err_work);
cancel_delayed_work_sync(&ctrl->reconnect_work);
nvme_rdma_teardown_io_queues(ctrl, shutdown); nvme_rdma_teardown_io_queues(ctrl, shutdown);
if (shutdown) if (shutdown)
nvme_shutdown_ctrl(&ctrl->ctrl); nvme_shutdown_ctrl(&ctrl->ctrl);
...@@ -1902,7 +1897,6 @@ static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = { ...@@ -1902,7 +1897,6 @@ static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = {
.submit_async_event = nvme_rdma_submit_async_event, .submit_async_event = nvme_rdma_submit_async_event,
.delete_ctrl = nvme_rdma_delete_ctrl, .delete_ctrl = nvme_rdma_delete_ctrl,
.get_address = nvmf_get_address, .get_address = nvmf_get_address,
.stop_ctrl = nvme_rdma_stop_ctrl,
}; };
/* /*
......
...@@ -1822,6 +1822,9 @@ static void nvme_tcp_error_recovery_work(struct work_struct *work) ...@@ -1822,6 +1822,9 @@ static void nvme_tcp_error_recovery_work(struct work_struct *work)
static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown) static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
{ {
cancel_work_sync(&to_tcp_ctrl(ctrl)->err_work);
cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work);
nvme_tcp_teardown_io_queues(ctrl, shutdown); nvme_tcp_teardown_io_queues(ctrl, shutdown);
if (shutdown) if (shutdown)
nvme_shutdown_ctrl(ctrl); nvme_shutdown_ctrl(ctrl);
...@@ -1859,12 +1862,6 @@ static void nvme_reset_ctrl_work(struct work_struct *work) ...@@ -1859,12 +1862,6 @@ static void nvme_reset_ctrl_work(struct work_struct *work)
nvme_tcp_reconnect_or_remove(ctrl); nvme_tcp_reconnect_or_remove(ctrl);
} }
static void nvme_tcp_stop_ctrl(struct nvme_ctrl *ctrl)
{
cancel_work_sync(&to_tcp_ctrl(ctrl)->err_work);
cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work);
}
static void nvme_tcp_free_ctrl(struct nvme_ctrl *nctrl) static void nvme_tcp_free_ctrl(struct nvme_ctrl *nctrl)
{ {
struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
...@@ -2115,7 +2112,6 @@ static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops = { ...@@ -2115,7 +2112,6 @@ static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops = {
.submit_async_event = nvme_tcp_submit_async_event, .submit_async_event = nvme_tcp_submit_async_event,
.delete_ctrl = nvme_tcp_delete_ctrl, .delete_ctrl = nvme_tcp_delete_ctrl,
.get_address = nvmf_get_address, .get_address = nvmf_get_address,
.stop_ctrl = nvme_tcp_stop_ctrl,
}; };
static bool static bool
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment