Commit 4884f8bf authored by Jens Axboe's avatar Jens Axboe

Merge branch 'nvme-4.19' of git://git.infradead.org/nvme into for-4.19/block

Pull NVMe updates from Christoph:

"This should be the last round of NVMe updates before the 4.19 merge
 window opens.  It conatins support for write protected (aka read-only)
 namespaces from Chaitanya, two ANA fixes from Hannes and a fabrics
 fix from Tal Shorer."

* 'nvme-4.19' of git://git.infradead.org/nvme:
  nvme-fabrics: fix ctrl_loss_tmo < 0 to reconnect forever
  nvmet: add ns write protect support
  nvme: set gendisk read only based on nsattr
  nvme.h: add support for ns write protect definitions
  nvme.h: fixup ANA group descriptor format
  nvme: fixup crash on failed discovery
parents cbb751c0 66414e80
...@@ -1484,6 +1484,12 @@ static void nvme_update_disk_info(struct gendisk *disk, ...@@ -1484,6 +1484,12 @@ static void nvme_update_disk_info(struct gendisk *disk,
set_capacity(disk, capacity); set_capacity(disk, capacity);
nvme_config_discard(ns); nvme_config_discard(ns);
if (id->nsattr & (1 << 0))
set_disk_ro(disk, true);
else
set_disk_ro(disk, false);
blk_mq_unfreeze_queue(disk->queue); blk_mq_unfreeze_queue(disk->queue);
} }
......
...@@ -474,7 +474,7 @@ EXPORT_SYMBOL_GPL(nvmf_connect_io_queue); ...@@ -474,7 +474,7 @@ EXPORT_SYMBOL_GPL(nvmf_connect_io_queue);
bool nvmf_should_reconnect(struct nvme_ctrl *ctrl) bool nvmf_should_reconnect(struct nvme_ctrl *ctrl)
{ {
if (ctrl->opts->max_reconnects != -1 && if (ctrl->opts->max_reconnects == -1 ||
ctrl->nr_reconnects < ctrl->opts->max_reconnects) ctrl->nr_reconnects < ctrl->opts->max_reconnects)
return true; return true;
......
...@@ -22,7 +22,7 @@ MODULE_PARM_DESC(multipath, ...@@ -22,7 +22,7 @@ MODULE_PARM_DESC(multipath,
inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl) inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
{ {
return multipath && (ctrl->subsys->cmic & (1 << 3)); return multipath && ctrl->subsys && (ctrl->subsys->cmic & (1 << 3));
} }
/* /*
......
...@@ -372,6 +372,8 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req) ...@@ -372,6 +372,8 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
id->psd[0].entry_lat = cpu_to_le32(0x10); id->psd[0].entry_lat = cpu_to_le32(0x10);
id->psd[0].exit_lat = cpu_to_le32(0x4); id->psd[0].exit_lat = cpu_to_le32(0x4);
id->nwpc = 1 << 0; /* write protect and no write protect */
status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id)); status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
kfree(id); kfree(id);
...@@ -433,6 +435,8 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req) ...@@ -433,6 +435,8 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
id->lbaf[0].ds = ns->blksize_shift; id->lbaf[0].ds = ns->blksize_shift;
if (ns->readonly)
id->nsattr |= (1 << 0);
nvmet_put_namespace(ns); nvmet_put_namespace(ns);
done: done:
status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id)); status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
...@@ -545,6 +549,52 @@ static void nvmet_execute_abort(struct nvmet_req *req) ...@@ -545,6 +549,52 @@ static void nvmet_execute_abort(struct nvmet_req *req)
nvmet_req_complete(req, 0); nvmet_req_complete(req, 0);
} }
static u16 nvmet_write_protect_flush_sync(struct nvmet_req *req)
{
u16 status;
if (req->ns->file)
status = nvmet_file_flush(req);
else
status = nvmet_bdev_flush(req);
if (status)
pr_err("write protect flush failed nsid: %u\n", req->ns->nsid);
return status;
}
static u16 nvmet_set_feat_write_protect(struct nvmet_req *req)
{
u32 write_protect = le32_to_cpu(req->cmd->common.cdw10[1]);
struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
u16 status = NVME_SC_FEATURE_NOT_CHANGEABLE;
req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->rw.nsid);
if (unlikely(!req->ns))
return status;
mutex_lock(&subsys->lock);
switch (write_protect) {
case NVME_NS_WRITE_PROTECT:
req->ns->readonly = true;
status = nvmet_write_protect_flush_sync(req);
if (status)
req->ns->readonly = false;
break;
case NVME_NS_NO_WRITE_PROTECT:
req->ns->readonly = false;
status = 0;
break;
default:
break;
}
if (!status)
nvmet_ns_changed(subsys, req->ns->nsid);
mutex_unlock(&subsys->lock);
return status;
}
static void nvmet_execute_set_features(struct nvmet_req *req) static void nvmet_execute_set_features(struct nvmet_req *req)
{ {
struct nvmet_subsys *subsys = req->sq->ctrl->subsys; struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
...@@ -575,6 +625,9 @@ static void nvmet_execute_set_features(struct nvmet_req *req) ...@@ -575,6 +625,9 @@ static void nvmet_execute_set_features(struct nvmet_req *req)
case NVME_FEAT_HOST_ID: case NVME_FEAT_HOST_ID:
status = NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR; status = NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
break; break;
case NVME_FEAT_WRITE_PROTECT:
status = nvmet_set_feat_write_protect(req);
break;
default: default:
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
break; break;
...@@ -583,6 +636,26 @@ static void nvmet_execute_set_features(struct nvmet_req *req) ...@@ -583,6 +636,26 @@ static void nvmet_execute_set_features(struct nvmet_req *req)
nvmet_req_complete(req, status); nvmet_req_complete(req, status);
} }
static u16 nvmet_get_feat_write_protect(struct nvmet_req *req)
{
struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
u32 result;
req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->common.nsid);
if (!req->ns)
return NVME_SC_INVALID_NS | NVME_SC_DNR;
mutex_lock(&subsys->lock);
if (req->ns->readonly == true)
result = NVME_NS_WRITE_PROTECT;
else
result = NVME_NS_NO_WRITE_PROTECT;
nvmet_set_result(req, result);
mutex_unlock(&subsys->lock);
return 0;
}
static void nvmet_execute_get_features(struct nvmet_req *req) static void nvmet_execute_get_features(struct nvmet_req *req)
{ {
struct nvmet_subsys *subsys = req->sq->ctrl->subsys; struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
...@@ -634,6 +707,9 @@ static void nvmet_execute_get_features(struct nvmet_req *req) ...@@ -634,6 +707,9 @@ static void nvmet_execute_get_features(struct nvmet_req *req)
status = nvmet_copy_to_sgl(req, 0, &req->sq->ctrl->hostid, status = nvmet_copy_to_sgl(req, 0, &req->sq->ctrl->hostid,
sizeof(req->sq->ctrl->hostid)); sizeof(req->sq->ctrl->hostid));
break; break;
case NVME_FEAT_WRITE_PROTECT:
status = nvmet_get_feat_write_protect(req);
break;
default: default:
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
break; break;
......
...@@ -180,7 +180,7 @@ static void nvmet_add_to_changed_ns_log(struct nvmet_ctrl *ctrl, __le32 nsid) ...@@ -180,7 +180,7 @@ static void nvmet_add_to_changed_ns_log(struct nvmet_ctrl *ctrl, __le32 nsid)
mutex_unlock(&ctrl->lock); mutex_unlock(&ctrl->lock);
} }
static void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid) void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid)
{ {
struct nvmet_ctrl *ctrl; struct nvmet_ctrl *ctrl;
...@@ -609,6 +609,21 @@ static inline u16 nvmet_check_ana_state(struct nvmet_port *port, ...@@ -609,6 +609,21 @@ static inline u16 nvmet_check_ana_state(struct nvmet_port *port,
return 0; return 0;
} }
static inline u16 nvmet_io_cmd_check_access(struct nvmet_req *req)
{
if (unlikely(req->ns->readonly)) {
switch (req->cmd->common.opcode) {
case nvme_cmd_read:
case nvme_cmd_flush:
break;
default:
return NVME_SC_NS_WRITE_PROTECTED;
}
}
return 0;
}
static u16 nvmet_parse_io_cmd(struct nvmet_req *req) static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
{ {
struct nvme_command *cmd = req->cmd; struct nvme_command *cmd = req->cmd;
...@@ -622,6 +637,9 @@ static u16 nvmet_parse_io_cmd(struct nvmet_req *req) ...@@ -622,6 +637,9 @@ static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
if (unlikely(!req->ns)) if (unlikely(!req->ns))
return NVME_SC_INVALID_NS | NVME_SC_DNR; return NVME_SC_INVALID_NS | NVME_SC_DNR;
ret = nvmet_check_ana_state(req->port, req->ns); ret = nvmet_check_ana_state(req->port, req->ns);
if (unlikely(ret))
return ret;
ret = nvmet_io_cmd_check_access(req);
if (unlikely(ret)) if (unlikely(ret))
return ret; return ret;
......
...@@ -124,6 +124,13 @@ static void nvmet_bdev_execute_flush(struct nvmet_req *req) ...@@ -124,6 +124,13 @@ static void nvmet_bdev_execute_flush(struct nvmet_req *req)
submit_bio(bio); submit_bio(bio);
} }
u16 nvmet_bdev_flush(struct nvmet_req *req)
{
if (blkdev_issue_flush(req->ns->bdev, GFP_KERNEL, NULL))
return NVME_SC_INTERNAL | NVME_SC_DNR;
return 0;
}
static u16 nvmet_bdev_discard_range(struct nvmet_ns *ns, static u16 nvmet_bdev_discard_range(struct nvmet_ns *ns,
struct nvme_dsm_range *range, struct bio **bio) struct nvme_dsm_range *range, struct bio **bio)
{ {
......
...@@ -211,14 +211,18 @@ static void nvmet_file_execute_rw_buffered_io(struct nvmet_req *req) ...@@ -211,14 +211,18 @@ static void nvmet_file_execute_rw_buffered_io(struct nvmet_req *req)
queue_work(buffered_io_wq, &req->f.work); queue_work(buffered_io_wq, &req->f.work);
} }
u16 nvmet_file_flush(struct nvmet_req *req)
{
if (vfs_fsync(req->ns->file, 1) < 0)
return NVME_SC_INTERNAL | NVME_SC_DNR;
return 0;
}
static void nvmet_file_flush_work(struct work_struct *w) static void nvmet_file_flush_work(struct work_struct *w)
{ {
struct nvmet_req *req = container_of(w, struct nvmet_req, f.work); struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
int ret;
ret = vfs_fsync(req->ns->file, 1);
nvmet_req_complete(req, ret < 0 ? NVME_SC_INTERNAL | NVME_SC_DNR : 0); nvmet_req_complete(req, nvmet_file_flush(req));
} }
static void nvmet_file_execute_flush(struct nvmet_req *req) static void nvmet_file_execute_flush(struct nvmet_req *req)
......
...@@ -58,6 +58,7 @@ struct nvmet_ns { ...@@ -58,6 +58,7 @@ struct nvmet_ns {
struct percpu_ref ref; struct percpu_ref ref;
struct block_device *bdev; struct block_device *bdev;
struct file *file; struct file *file;
bool readonly;
u32 nsid; u32 nsid;
u32 blksize_shift; u32 blksize_shift;
loff_t size; loff_t size;
...@@ -429,6 +430,9 @@ int nvmet_bdev_ns_enable(struct nvmet_ns *ns); ...@@ -429,6 +430,9 @@ int nvmet_bdev_ns_enable(struct nvmet_ns *ns);
int nvmet_file_ns_enable(struct nvmet_ns *ns); int nvmet_file_ns_enable(struct nvmet_ns *ns);
void nvmet_bdev_ns_disable(struct nvmet_ns *ns); void nvmet_bdev_ns_disable(struct nvmet_ns *ns);
void nvmet_file_ns_disable(struct nvmet_ns *ns); void nvmet_file_ns_disable(struct nvmet_ns *ns);
u16 nvmet_bdev_flush(struct nvmet_req *req);
u16 nvmet_file_flush(struct nvmet_req *req);
void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid);
static inline u32 nvmet_rw_len(struct nvmet_req *req) static inline u32 nvmet_rw_len(struct nvmet_req *req)
{ {
......
...@@ -259,7 +259,7 @@ struct nvme_id_ctrl { ...@@ -259,7 +259,7 @@ struct nvme_id_ctrl {
__le16 awun; __le16 awun;
__le16 awupf; __le16 awupf;
__u8 nvscc; __u8 nvscc;
__u8 rsvd531; __u8 nwpc;
__le16 acwu; __le16 acwu;
__u8 rsvd534[2]; __u8 rsvd534[2];
__le32 sgls; __le32 sgls;
...@@ -320,7 +320,9 @@ struct nvme_id_ns { ...@@ -320,7 +320,9 @@ struct nvme_id_ns {
__u8 nvmcap[16]; __u8 nvmcap[16];
__u8 rsvd64[28]; __u8 rsvd64[28];
__le32 anagrpid; __le32 anagrpid;
__u8 rsvd96[8]; __u8 rsvd96[3];
__u8 nsattr;
__u8 rsvd100[4];
__u8 nguid[16]; __u8 nguid[16];
__u8 eui64[8]; __u8 eui64[8];
struct nvme_lbaf lbaf[16]; struct nvme_lbaf lbaf[16];
...@@ -446,7 +448,7 @@ struct nvme_ana_group_desc { ...@@ -446,7 +448,7 @@ struct nvme_ana_group_desc {
__le32 nnsids; __le32 nnsids;
__le64 chgcnt; __le64 chgcnt;
__u8 state; __u8 state;
__u8 rsvd17[7]; __u8 rsvd17[15];
__le32 nsids[]; __le32 nsids[];
}; };
...@@ -794,6 +796,7 @@ enum { ...@@ -794,6 +796,7 @@ enum {
NVME_FEAT_HOST_ID = 0x81, NVME_FEAT_HOST_ID = 0x81,
NVME_FEAT_RESV_MASK = 0x82, NVME_FEAT_RESV_MASK = 0x82,
NVME_FEAT_RESV_PERSIST = 0x83, NVME_FEAT_RESV_PERSIST = 0x83,
NVME_FEAT_WRITE_PROTECT = 0x84,
NVME_LOG_ERROR = 0x01, NVME_LOG_ERROR = 0x01,
NVME_LOG_SMART = 0x02, NVME_LOG_SMART = 0x02,
NVME_LOG_FW_SLOT = 0x03, NVME_LOG_FW_SLOT = 0x03,
...@@ -807,6 +810,14 @@ enum { ...@@ -807,6 +810,14 @@ enum {
NVME_FWACT_ACTV = (2 << 3), NVME_FWACT_ACTV = (2 << 3),
}; };
/* NVMe Namespace Write Protect State */
enum {
NVME_NS_NO_WRITE_PROTECT = 0,
NVME_NS_WRITE_PROTECT,
NVME_NS_WRITE_PROTECT_POWER_CYCLE,
NVME_NS_WRITE_PROTECT_PERMANENT,
};
#define NVME_MAX_CHANGED_NAMESPACES 1024 #define NVME_MAX_CHANGED_NAMESPACES 1024
struct nvme_identify { struct nvme_identify {
...@@ -1153,6 +1164,8 @@ enum { ...@@ -1153,6 +1164,8 @@ enum {
NVME_SC_SGL_INVALID_OFFSET = 0x16, NVME_SC_SGL_INVALID_OFFSET = 0x16,
NVME_SC_SGL_INVALID_SUBTYPE = 0x17, NVME_SC_SGL_INVALID_SUBTYPE = 0x17,
NVME_SC_NS_WRITE_PROTECTED = 0x20,
NVME_SC_LBA_RANGE = 0x80, NVME_SC_LBA_RANGE = 0x80,
NVME_SC_CAP_EXCEEDED = 0x81, NVME_SC_CAP_EXCEEDED = 0x81,
NVME_SC_NS_NOT_READY = 0x82, NVME_SC_NS_NOT_READY = 0x82,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment