Commit 2b76da95 authored by Jens Axboe's avatar Jens Axboe

Merge branch 'nvme-4.14' of git://git.infradead.org/nvme into for-4.14/block-postmerge

Pull NVMe changes from Christoph:

"Below is the current set of NVMe updates for Linux 4.14, now against
 your postmerge branch, and with three more patches.

 The biggest bit comes from Sagi and refactors the RDMA driver to
 prepare for more code sharing in the setup and teardown path.  But we
 have various features and bug fixes from a lot of people as well."
parents cd996fb4 1d5df6af
This diff is collapsed.
......@@ -735,6 +735,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
goto out;
}
if (uuid_parse(p, &hostid)) {
pr_err("Invalid hostid %s\n", p);
ret = -EINVAL;
goto out;
}
......
......@@ -220,6 +220,90 @@ static int __nvme_fc_del_ctrl(struct nvme_fc_ctrl *);
static void __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *,
struct nvme_fc_queue *, unsigned int);
static void
nvme_fc_free_lport(struct kref *ref)
{
struct nvme_fc_lport *lport =
container_of(ref, struct nvme_fc_lport, ref);
unsigned long flags;
WARN_ON(lport->localport.port_state != FC_OBJSTATE_DELETED);
WARN_ON(!list_empty(&lport->endp_list));
/* remove from transport list */
spin_lock_irqsave(&nvme_fc_lock, flags);
list_del(&lport->port_list);
spin_unlock_irqrestore(&nvme_fc_lock, flags);
/* let the LLDD know we've finished tearing it down */
lport->ops->localport_delete(&lport->localport);
ida_simple_remove(&nvme_fc_local_port_cnt, lport->localport.port_num);
ida_destroy(&lport->endp_cnt);
put_device(lport->dev);
kfree(lport);
}
static void
nvme_fc_lport_put(struct nvme_fc_lport *lport)
{
kref_put(&lport->ref, nvme_fc_free_lport);
}
static int
nvme_fc_lport_get(struct nvme_fc_lport *lport)
{
return kref_get_unless_zero(&lport->ref);
}
static struct nvme_fc_lport *
nvme_fc_attach_to_unreg_lport(struct nvme_fc_port_info *pinfo)
{
struct nvme_fc_lport *lport;
unsigned long flags;
spin_lock_irqsave(&nvme_fc_lock, flags);
list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
if (lport->localport.node_name != pinfo->node_name ||
lport->localport.port_name != pinfo->port_name)
continue;
if (lport->localport.port_state != FC_OBJSTATE_DELETED) {
lport = ERR_PTR(-EEXIST);
goto out_done;
}
if (!nvme_fc_lport_get(lport)) {
/*
* fails if ref cnt already 0. If so,
* act as if lport already deleted
*/
lport = NULL;
goto out_done;
}
/* resume the lport */
lport->localport.port_role = pinfo->port_role;
lport->localport.port_id = pinfo->port_id;
lport->localport.port_state = FC_OBJSTATE_ONLINE;
spin_unlock_irqrestore(&nvme_fc_lock, flags);
return lport;
}
lport = NULL;
out_done:
spin_unlock_irqrestore(&nvme_fc_lock, flags);
return lport;
}
/**
* nvme_fc_register_localport - transport entry point called by an
......@@ -257,6 +341,28 @@ nvme_fc_register_localport(struct nvme_fc_port_info *pinfo,
goto out_reghost_failed;
}
/*
* look to see if there is already a localport that had been
* deregistered and in the process of waiting for all the
* references to fully be removed. If the references haven't
* expired, we can simply re-enable the localport. Remoteports
* and controller reconnections should resume naturally.
*/
newrec = nvme_fc_attach_to_unreg_lport(pinfo);
/* found an lport, but something about its state is bad */
if (IS_ERR(newrec)) {
ret = PTR_ERR(newrec);
goto out_reghost_failed;
/* found existing lport, which was resumed */
} else if (newrec) {
*portptr = &newrec->localport;
return 0;
}
/* nothing found - allocate a new localport struct */
newrec = kmalloc((sizeof(*newrec) + template->local_priv_sz),
GFP_KERNEL);
if (!newrec) {
......@@ -310,44 +416,6 @@ nvme_fc_register_localport(struct nvme_fc_port_info *pinfo,
}
EXPORT_SYMBOL_GPL(nvme_fc_register_localport);
static void
nvme_fc_free_lport(struct kref *ref)
{
struct nvme_fc_lport *lport =
container_of(ref, struct nvme_fc_lport, ref);
unsigned long flags;
WARN_ON(lport->localport.port_state != FC_OBJSTATE_DELETED);
WARN_ON(!list_empty(&lport->endp_list));
/* remove from transport list */
spin_lock_irqsave(&nvme_fc_lock, flags);
list_del(&lport->port_list);
spin_unlock_irqrestore(&nvme_fc_lock, flags);
/* let the LLDD know we've finished tearing it down */
lport->ops->localport_delete(&lport->localport);
ida_simple_remove(&nvme_fc_local_port_cnt, lport->localport.port_num);
ida_destroy(&lport->endp_cnt);
put_device(lport->dev);
kfree(lport);
}
static void
nvme_fc_lport_put(struct nvme_fc_lport *lport)
{
kref_put(&lport->ref, nvme_fc_free_lport);
}
static int
nvme_fc_lport_get(struct nvme_fc_lport *lport)
{
return kref_get_unless_zero(&lport->ref);
}
/**
* nvme_fc_unregister_localport - transport entry point called by an
* LLDD to deregister/remove a previously
......@@ -2731,6 +2799,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
ret = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
if (ret)
goto out_free_queues;
ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set;
ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
if (IS_ERR(ctrl->ctrl.admin_q)) {
......
......@@ -125,6 +125,7 @@ struct nvme_ctrl {
struct kref kref;
int instance;
struct blk_mq_tag_set *tagset;
struct blk_mq_tag_set *admin_tagset;
struct list_head namespaces;
struct mutex namespaces_mutex;
struct device *device; /* char device */
......@@ -142,6 +143,7 @@ struct nvme_ctrl {
u16 cntlid;
u32 ctrl_config;
u16 mtfa;
u32 queue_count;
u64 cap;
......@@ -160,6 +162,7 @@ struct nvme_ctrl {
u16 kas;
u8 npss;
u8 apsta;
unsigned int shutdown_timeout;
unsigned int kato;
bool subsystem;
unsigned long quirks;
......@@ -167,6 +170,7 @@ struct nvme_ctrl {
struct work_struct scan_work;
struct work_struct async_event_work;
struct delayed_work ka_work;
struct work_struct fw_act_work;
/* Power saving configuration */
u64 ps_max_latency_us;
......@@ -207,13 +211,9 @@ struct nvme_ns {
bool ext;
u8 pi_type;
unsigned long flags;
u16 noiob;
#define NVME_NS_REMOVING 0
#define NVME_NS_DEAD 1
u64 mode_select_num_blocks;
u32 mode_select_block_len;
u16 noiob;
};
struct nvme_ctrl_ops {
......
......@@ -555,8 +555,10 @@ static blk_status_t nvme_setup_prps(struct nvme_dev *dev, struct request *req)
int nprps, i;
length -= (page_size - offset);
if (length <= 0)
if (length <= 0) {
iod->first_dma = 0;
return BLK_STS_OK;
}
dma_len -= (page_size - offset);
if (dma_len) {
......@@ -1376,6 +1378,7 @@ static int nvme_alloc_admin_tags(struct nvme_dev *dev)
if (blk_mq_alloc_tag_set(&dev->admin_tagset))
return -ENOMEM;
dev->ctrl.admin_tagset = &dev->admin_tagset;
dev->ctrl.admin_q = blk_mq_init_queue(&dev->admin_tagset);
if (IS_ERR(dev->ctrl.admin_q)) {
......
This diff is collapsed.
......@@ -100,7 +100,7 @@ static u16 nvmet_get_smart_log(struct nvmet_req *req,
u16 status;
WARN_ON(req == NULL || slog == NULL);
if (req->cmd->get_log_page.nsid == cpu_to_le32(0xFFFFFFFF))
if (req->cmd->get_log_page.nsid == cpu_to_le32(NVME_NSID_ALL))
status = nvmet_get_smart_log_all(req, slog);
else
status = nvmet_get_smart_log_nsid(req, slog);
......@@ -168,15 +168,6 @@ static void nvmet_execute_get_log_page(struct nvmet_req *req)
nvmet_req_complete(req, status);
}
static void copy_and_pad(char *dst, int dst_len, const char *src, int src_len)
{
int len = min(src_len, dst_len);
memcpy(dst, src, len);
if (dst_len > len)
memset(dst + len, ' ', dst_len - len);
}
static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
{
struct nvmet_ctrl *ctrl = req->sq->ctrl;
......@@ -196,8 +187,9 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
bin2hex(id->sn, &ctrl->subsys->serial,
min(sizeof(ctrl->subsys->serial), sizeof(id->sn) / 2));
copy_and_pad(id->mn, sizeof(id->mn), model, sizeof(model) - 1);
copy_and_pad(id->fr, sizeof(id->fr), UTS_RELEASE, strlen(UTS_RELEASE));
memcpy_and_pad(id->mn, sizeof(id->mn), model, sizeof(model) - 1, ' ');
memcpy_and_pad(id->fr, sizeof(id->fr),
UTS_RELEASE, strlen(UTS_RELEASE), ' ');
id->rab = 6;
......
......@@ -444,7 +444,7 @@ static struct config_group *nvmet_ns_make(struct config_group *group,
goto out;
ret = -EINVAL;
if (nsid == 0 || nsid == 0xffffffff)
if (nsid == 0 || nsid == NVME_NSID_ALL)
goto out;
ret = -ENOMEM;
......
......@@ -538,37 +538,37 @@ EXPORT_SYMBOL_GPL(nvmet_req_uninit);
static inline bool nvmet_cc_en(u32 cc)
{
return cc & 0x1;
return (cc >> NVME_CC_EN_SHIFT) & 0x1;
}
static inline u8 nvmet_cc_css(u32 cc)
{
return (cc >> 4) & 0x7;
return (cc >> NVME_CC_CSS_SHIFT) & 0x7;
}
static inline u8 nvmet_cc_mps(u32 cc)
{
return (cc >> 7) & 0xf;
return (cc >> NVME_CC_MPS_SHIFT) & 0xf;
}
static inline u8 nvmet_cc_ams(u32 cc)
{
return (cc >> 11) & 0x7;
return (cc >> NVME_CC_AMS_SHIFT) & 0x7;
}
static inline u8 nvmet_cc_shn(u32 cc)
{
return (cc >> 14) & 0x3;
return (cc >> NVME_CC_SHN_SHIFT) & 0x3;
}
static inline u8 nvmet_cc_iosqes(u32 cc)
{
return (cc >> 16) & 0xf;
return (cc >> NVME_CC_IOSQES_SHIFT) & 0xf;
}
static inline u8 nvmet_cc_iocqes(u32 cc)
{
return (cc >> 20) & 0xf;
return (cc >> NVME_CC_IOCQES_SHIFT) & 0xf;
}
static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
......@@ -749,6 +749,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
hostnqn, subsysnqn);
req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn);
up_read(&nvmet_config_sem);
status = NVME_SC_CONNECT_INVALID_HOST | NVME_SC_DNR;
goto out_put_subsystem;
}
up_read(&nvmet_config_sem);
......
......@@ -58,7 +58,8 @@ struct nvmet_fc_ls_iod {
struct work_struct work;
} __aligned(sizeof(unsigned long long));
#define NVMET_FC_MAX_KB_PER_XFR 256
#define NVMET_FC_MAX_SEQ_LENGTH (256 * 1024)
#define NVMET_FC_MAX_XFR_SGENTS (NVMET_FC_MAX_SEQ_LENGTH / PAGE_SIZE)
enum nvmet_fcp_datadir {
NVMET_FCP_NODATA,
......@@ -74,9 +75,7 @@ struct nvmet_fc_fcp_iod {
struct nvme_fc_ersp_iu rspiubuf;
dma_addr_t rspdma;
struct scatterlist *data_sg;
struct scatterlist *next_sg;
int data_sg_cnt;
u32 next_sg_offset;
u32 total_length;
u32 offset;
enum nvmet_fcp_datadir io_dir;
......@@ -112,6 +111,7 @@ struct nvmet_fc_tgtport {
struct ida assoc_cnt;
struct nvmet_port *port;
struct kref ref;
u32 max_sg_cnt;
};
struct nvmet_fc_defer_fcp_req {
......@@ -994,6 +994,8 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
INIT_LIST_HEAD(&newrec->assoc_list);
kref_init(&newrec->ref);
ida_init(&newrec->assoc_cnt);
newrec->max_sg_cnt = min_t(u32, NVMET_FC_MAX_XFR_SGENTS,
template->max_sgl_segments);
ret = nvmet_fc_alloc_ls_iodlist(newrec);
if (ret) {
......@@ -1866,51 +1868,23 @@ nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
struct nvmet_fc_fcp_iod *fod, u8 op)
{
struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
struct scatterlist *sg, *datasg;
unsigned long flags;
u32 tlen, sg_off;
u32 tlen;
int ret;
fcpreq->op = op;
fcpreq->offset = fod->offset;
fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC;
tlen = min_t(u32, (NVMET_FC_MAX_KB_PER_XFR * 1024),
tlen = min_t(u32, tgtport->max_sg_cnt * PAGE_SIZE,
(fod->total_length - fod->offset));
tlen = min_t(u32, tlen, NVME_FC_MAX_SEGMENTS * PAGE_SIZE);
tlen = min_t(u32, tlen, fod->tgtport->ops->max_sgl_segments
* PAGE_SIZE);
fcpreq->transfer_length = tlen;
fcpreq->transferred_length = 0;
fcpreq->fcp_error = 0;
fcpreq->rsplen = 0;
fcpreq->sg_cnt = 0;
datasg = fod->next_sg;
sg_off = fod->next_sg_offset;
for (sg = fcpreq->sg ; tlen; sg++) {
*sg = *datasg;
if (sg_off) {
sg->offset += sg_off;
sg->length -= sg_off;
sg->dma_address += sg_off;
sg_off = 0;
}
if (tlen < sg->length) {
sg->length = tlen;
fod->next_sg = datasg;
fod->next_sg_offset += tlen;
} else if (tlen == sg->length) {
fod->next_sg_offset = 0;
fod->next_sg = sg_next(datasg);
} else {
fod->next_sg_offset = 0;
datasg = sg_next(datasg);
}
tlen -= sg->length;
fcpreq->sg_cnt++;
}
fcpreq->sg = &fod->data_sg[fod->offset / PAGE_SIZE];
fcpreq->sg_cnt = DIV_ROUND_UP(tlen, PAGE_SIZE);
/*
* If the last READDATA request: check if LLDD supports
......@@ -2225,8 +2199,6 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
fod->req.sg = fod->data_sg;
fod->req.sg_cnt = fod->data_sg_cnt;
fod->offset = 0;
fod->next_sg = fod->data_sg;
fod->next_sg_offset = 0;
if (fod->io_dir == NVMET_FCP_WRITE) {
/* pull the data over before invoking nvmet layer */
......
......@@ -193,9 +193,6 @@ fcloop_parse_nm_options(struct device *dev, u64 *nname, u64 *pname,
#define TGTPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
#define ALL_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN | NVMF_OPT_ROLES | \
NVMF_OPT_FCADDR | NVMF_OPT_LPWWNN | NVMF_OPT_LPWWPN)
static DEFINE_SPINLOCK(fcloop_lock);
static LIST_HEAD(fcloop_lports);
......
......@@ -375,6 +375,7 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
error = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
if (error)
goto out_free_sq;
ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set;
ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
if (IS_ERR(ctrl->ctrl.admin_q)) {
......
......@@ -624,7 +624,7 @@ struct nvmefc_tgt_fcp_req {
u32 timeout;
u32 transfer_length;
struct fc_ba_rjt ba_rjt;
struct scatterlist sg[NVME_FC_MAX_SEGMENTS];
struct scatterlist *sg;
int sg_cnt;
void *rspaddr;
dma_addr_t rspdma;
......
......@@ -32,6 +32,8 @@
#define NVME_RDMA_IP_PORT 4420
#define NVME_NSID_ALL 0xffffffff
enum nvme_subsys_type {
NVME_NQN_DISC = 1, /* Discovery type target subsystem */
NVME_NQN_NVME = 2, /* NVME type target subsystem */
......@@ -133,19 +135,26 @@ enum {
enum {
NVME_CC_ENABLE = 1 << 0,
NVME_CC_CSS_NVM = 0 << 4,
NVME_CC_EN_SHIFT = 0,
NVME_CC_CSS_SHIFT = 4,
NVME_CC_MPS_SHIFT = 7,
NVME_CC_ARB_RR = 0 << 11,
NVME_CC_ARB_WRRU = 1 << 11,
NVME_CC_ARB_VS = 7 << 11,
NVME_CC_SHN_NONE = 0 << 14,
NVME_CC_SHN_NORMAL = 1 << 14,
NVME_CC_SHN_ABRUPT = 2 << 14,
NVME_CC_SHN_MASK = 3 << 14,
NVME_CC_IOSQES = NVME_NVM_IOSQES << 16,
NVME_CC_IOCQES = NVME_NVM_IOCQES << 20,
NVME_CC_AMS_SHIFT = 11,
NVME_CC_SHN_SHIFT = 14,
NVME_CC_IOSQES_SHIFT = 16,
NVME_CC_IOCQES_SHIFT = 20,
NVME_CC_AMS_RR = 0 << NVME_CC_AMS_SHIFT,
NVME_CC_AMS_WRRU = 1 << NVME_CC_AMS_SHIFT,
NVME_CC_AMS_VS = 7 << NVME_CC_AMS_SHIFT,
NVME_CC_SHN_NONE = 0 << NVME_CC_SHN_SHIFT,
NVME_CC_SHN_NORMAL = 1 << NVME_CC_SHN_SHIFT,
NVME_CC_SHN_ABRUPT = 2 << NVME_CC_SHN_SHIFT,
NVME_CC_SHN_MASK = 3 << NVME_CC_SHN_SHIFT,
NVME_CC_IOSQES = NVME_NVM_IOSQES << NVME_CC_IOSQES_SHIFT,
NVME_CC_IOCQES = NVME_NVM_IOCQES << NVME_CC_IOCQES_SHIFT,
NVME_CSTS_RDY = 1 << 0,
NVME_CSTS_CFS = 1 << 1,
NVME_CSTS_NSSRO = 1 << 4,
NVME_CSTS_PP = 1 << 5,
NVME_CSTS_SHST_NORMAL = 0 << 2,
NVME_CSTS_SHST_OCCUR = 1 << 2,
NVME_CSTS_SHST_CMPLT = 2 << 2,
......@@ -251,6 +260,7 @@ enum {
NVME_CTRL_ONCS_WRITE_UNCORRECTABLE = 1 << 1,
NVME_CTRL_ONCS_DSM = 1 << 2,
NVME_CTRL_ONCS_WRITE_ZEROES = 1 << 3,
NVME_CTRL_ONCS_TIMESTAMP = 1 << 6,
NVME_CTRL_VWC_PRESENT = 1 << 0,
NVME_CTRL_OACS_SEC_SUPP = 1 << 0,
NVME_CTRL_OACS_DIRECTIVES = 1 << 5,
......@@ -376,6 +386,13 @@ struct nvme_smart_log {
__u8 rsvd216[296];
};
struct nvme_fw_slot_info_log {
__u8 afi;
__u8 rsvd1[7];
__le64 frs[7];
__u8 rsvd64[448];
};
enum {
NVME_SMART_CRIT_SPARE = 1 << 0,
NVME_SMART_CRIT_TEMPERATURE = 1 << 1,
......@@ -386,6 +403,7 @@ enum {
enum {
NVME_AER_NOTICE_NS_CHANGED = 0x0002,
NVME_AER_NOTICE_FW_ACT_STARTING = 0x0102,
};
struct nvme_lba_range_type {
......@@ -677,6 +695,7 @@ enum {
NVME_FEAT_ASYNC_EVENT = 0x0b,
NVME_FEAT_AUTO_PST = 0x0c,
NVME_FEAT_HOST_MEM_BUF = 0x0d,
NVME_FEAT_TIMESTAMP = 0x0e,
NVME_FEAT_KATO = 0x0f,
NVME_FEAT_SW_PROGRESS = 0x80,
NVME_FEAT_HOST_ID = 0x81,
......
......@@ -200,6 +200,7 @@ static inline const char *kbasename(const char *path)
void fortify_panic(const char *name) __noreturn __cold;
void __read_overflow(void) __compiletime_error("detected read beyond size of object passed as 1st parameter");
void __read_overflow2(void) __compiletime_error("detected read beyond size of object passed as 2nd parameter");
void __read_overflow3(void) __compiletime_error("detected read beyond size of object passed as 3rd parameter");
void __write_overflow(void) __compiletime_error("detected write beyond size of object passed as 1st parameter");
#if !defined(__NO_FORTIFY) && defined(__OPTIMIZE__) && defined(CONFIG_FORTIFY_SOURCE)
......@@ -395,4 +396,33 @@ __FORTIFY_INLINE char *strcpy(char *p, const char *q)
#endif
/**
* memcpy_and_pad - Copy one buffer to another with padding
* @dest: Where to copy to
* @dest_len: The destination buffer size
* @src: Where to copy from
* @count: The number of bytes to copy
* @pad: Character to use for padding if space is left in destination.
*/
__FORTIFY_INLINE void memcpy_and_pad(void *dest, size_t dest_len,
const void *src, size_t count, int pad)
{
size_t dest_size = __builtin_object_size(dest, 0);
size_t src_size = __builtin_object_size(src, 0);
if (__builtin_constant_p(dest_len) && __builtin_constant_p(count)) {
if (dest_size < dest_len && dest_size < count)
__write_overflow();
else if (src_size < dest_len && src_size < count)
__read_overflow3();
}
if (dest_size < dest_len)
fortify_panic(__func__);
if (dest_len > count) {
memcpy(dest, src, count);
memset(dest + count, pad, dest_len - count);
} else
memcpy(dest, src, dest_len);
}
#endif /* _LINUX_STRING_H_ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment