Commit bd313968 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'block-5.13-2021-05-07' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:

 - dasd spelling fixes (Bhaskar)

 - Limit bio max size on multi-page bvecs to the hardware limit, to
   avoid overly large bio's (and hence latencies). Originally queued for
   the merge window, but needed a fix and was dropped from the initial
   pull (Changheun)

 - NVMe pull request (Christoph):
      - reset the bdev to ns head when failover (Daniel Wagner)
      - remove unsupported command noise (Keith Busch)
      - misc passthrough improvements (Kanchan Joshi)
      - fix controller ioctl through ns_head (Minwoo Im)
      - fix controller timeouts during reset (Tao Chiu)

 - rnbd fixes/cleanups (Gioh, Md, Dima)

 - Fix iov_iter re-expansion (yangerkun)

* tag 'block-5.13-2021-05-07' of git://git.kernel.dk/linux-block:
  block: reexpand iov_iter after read/write
  nvmet: remove unsupported command noise
  nvme-multipath: reset bdev to ns head when failover
  nvme-pci: fix controller reset hang when racing with nvme_timeout
  nvme: move the fabrics queue ready check routines to core
  nvme: avoid memset for passthrough requests
  nvme: add nvme_get_ns helper
  nvme: fix controller ioctl through ns_head
  bio: limit bio max size
  RDMA/rtrs: fix uninitialized symbol 'cnt'
  s390: dasd: Mundane spelling fixes
  block/rnbd: Remove all likely and unlikely
  block/rnbd-clt: Check the return value of the function rtrs_clt_query
  block/rnbd: Fix style issues
  block/rnbd-clt: Change queue_depth type in rnbd_clt_session to size_t
parents 28b4afeb cf7b39a0
......@@ -255,6 +255,13 @@ void bio_init(struct bio *bio, struct bio_vec *table,
}
EXPORT_SYMBOL(bio_init);
unsigned int bio_max_size(struct bio *bio)
{
struct block_device *bdev = bio->bi_bdev;
return bdev ? bdev->bd_disk->queue->limits.bio_max_bytes : UINT_MAX;
}
/**
* bio_reset - reinitialize a bio
* @bio: bio to reset
......@@ -866,7 +873,7 @@ bool __bio_try_merge_page(struct bio *bio, struct page *page,
struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
if (page_is_mergeable(bv, page, len, off, same_page)) {
if (bio->bi_iter.bi_size > UINT_MAX - len) {
if (bio->bi_iter.bi_size > bio_max_size(bio) - len) {
*same_page = false;
return false;
}
......@@ -995,6 +1002,7 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
{
unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt;
unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt;
unsigned int bytes_left = bio_max_size(bio) - bio->bi_iter.bi_size;
struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
struct page **pages = (struct page **)bv;
bool same_page = false;
......@@ -1010,7 +1018,8 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
BUILD_BUG_ON(PAGE_PTRS_PER_BVEC < 2);
pages += entries_left * (PAGE_PTRS_PER_BVEC - 1);
size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset);
size = iov_iter_get_pages(iter, pages, bytes_left, nr_pages,
&offset);
if (unlikely(size <= 0))
return size ? size : -EFAULT;
......
......@@ -32,6 +32,7 @@ EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
*/
void blk_set_default_limits(struct queue_limits *lim)
{
lim->bio_max_bytes = UINT_MAX;
lim->max_segments = BLK_MAX_SEGMENTS;
lim->max_discard_segments = 1;
lim->max_integrity_segments = 0;
......@@ -140,6 +141,10 @@ void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_secto
limits->logical_block_size >> SECTOR_SHIFT);
limits->max_sectors = max_sectors;
if (check_shl_overflow(max_sectors, SECTOR_SHIFT,
&limits->bio_max_bytes))
limits->bio_max_bytes = UINT_MAX;
q->backing_dev_info->io_pages = max_sectors >> (PAGE_SHIFT - 9);
}
EXPORT_SYMBOL(blk_queue_max_hw_sectors);
......
......@@ -88,7 +88,7 @@ static int rnbd_clt_set_dev_attr(struct rnbd_clt_dev *dev,
dev->discard_alignment = le32_to_cpu(rsp->discard_alignment);
dev->secure_discard = le16_to_cpu(rsp->secure_discard);
dev->rotational = rsp->rotational;
dev->wc = !!(rsp->cache_policy & RNBD_WRITEBACK);
dev->wc = !!(rsp->cache_policy & RNBD_WRITEBACK);
dev->fua = !!(rsp->cache_policy & RNBD_FUA);
dev->max_hw_sectors = sess->max_io_size / SECTOR_SIZE;
......@@ -241,7 +241,7 @@ static bool rnbd_rerun_if_needed(struct rnbd_clt_session *sess)
cpu_q = rnbd_get_cpu_qlist(sess, nxt_cpu(cpu_q->cpu))) {
if (!spin_trylock_irqsave(&cpu_q->requeue_lock, flags))
continue;
if (unlikely(!test_bit(cpu_q->cpu, sess->cpu_queues_bm)))
if (!test_bit(cpu_q->cpu, sess->cpu_queues_bm))
goto unlock;
q = list_first_entry_or_null(&cpu_q->requeue_list,
typeof(*q), requeue_list);
......@@ -320,7 +320,7 @@ static struct rtrs_permit *rnbd_get_permit(struct rnbd_clt_session *sess,
struct rtrs_permit *permit;
permit = rtrs_clt_get_permit(sess->rtrs, con_type, wait);
if (likely(permit))
if (permit)
/* We have a subtle rare case here, when all permits can be
* consumed before busy counter increased. This is safe,
* because loser will get NULL as a permit, observe 0 busy
......@@ -351,12 +351,11 @@ static struct rnbd_iu *rnbd_get_iu(struct rnbd_clt_session *sess,
struct rtrs_permit *permit;
iu = kzalloc(sizeof(*iu), GFP_KERNEL);
if (!iu) {
if (!iu)
return NULL;
}
permit = rnbd_get_permit(sess, con_type, wait);
if (unlikely(!permit)) {
if (!permit) {
kfree(iu);
return NULL;
}
......@@ -692,7 +691,11 @@ static void remap_devs(struct rnbd_clt_session *sess)
return;
}
rtrs_clt_query(sess->rtrs, &attrs);
err = rtrs_clt_query(sess->rtrs, &attrs);
if (err) {
pr_err("rtrs_clt_query(\"%s\"): %d\n", sess->sessname, err);
return;
}
mutex_lock(&sess->lock);
sess->max_io_size = attrs.max_io_size;
......@@ -805,7 +808,7 @@ static struct rnbd_clt_session *alloc_sess(const char *sessname)
mutex_init(&sess->lock);
INIT_LIST_HEAD(&sess->devs_list);
INIT_LIST_HEAD(&sess->list);
bitmap_zero(sess->cpu_queues_bm, NR_CPUS);
bitmap_zero(sess->cpu_queues_bm, num_possible_cpus());
init_waitqueue_head(&sess->rtrs_waitq);
refcount_set(&sess->refcount, 1);
......@@ -1047,7 +1050,7 @@ static int rnbd_client_xfer_request(struct rnbd_clt_dev *dev,
};
err = rtrs_clt_request(rq_data_dir(rq), &req_ops, rtrs, permit,
&vec, 1, size, iu->sgt.sgl, sg_cnt);
if (unlikely(err)) {
if (err) {
rnbd_clt_err_rl(dev, "RTRS failed to transfer IO, err: %d\n",
err);
return err;
......@@ -1078,7 +1081,7 @@ static bool rnbd_clt_dev_add_to_requeue(struct rnbd_clt_dev *dev,
cpu_q = get_cpu_ptr(sess->cpu_queues);
spin_lock_irqsave(&cpu_q->requeue_lock, flags);
if (likely(!test_and_set_bit_lock(0, &q->in_list))) {
if (!test_and_set_bit_lock(0, &q->in_list)) {
if (WARN_ON(!list_empty(&q->requeue_list)))
goto unlock;
......@@ -1090,7 +1093,7 @@ static bool rnbd_clt_dev_add_to_requeue(struct rnbd_clt_dev *dev,
*/
smp_mb__before_atomic();
}
if (likely(atomic_read(&sess->busy))) {
if (atomic_read(&sess->busy)) {
list_add_tail(&q->requeue_list, &cpu_q->requeue_list);
} else {
/* Very unlikely, but possible: busy counter was
......@@ -1118,7 +1121,7 @@ static void rnbd_clt_dev_kick_mq_queue(struct rnbd_clt_dev *dev,
if (delay != RNBD_DELAY_IFBUSY)
blk_mq_delay_run_hw_queue(hctx, delay);
else if (unlikely(!rnbd_clt_dev_add_to_requeue(dev, q)))
else if (!rnbd_clt_dev_add_to_requeue(dev, q))
/*
* If session is not busy we have to restart
* the queue ourselves.
......@@ -1135,12 +1138,12 @@ static blk_status_t rnbd_queue_rq(struct blk_mq_hw_ctx *hctx,
int err;
blk_status_t ret = BLK_STS_IOERR;
if (unlikely(dev->dev_state != DEV_STATE_MAPPED))
if (dev->dev_state != DEV_STATE_MAPPED)
return BLK_STS_IOERR;
iu->permit = rnbd_get_permit(dev->sess, RTRS_IO_CON,
RTRS_PERMIT_NOWAIT);
if (unlikely(!iu->permit)) {
if (!iu->permit) {
rnbd_clt_dev_kick_mq_queue(dev, hctx, RNBD_DELAY_IFBUSY);
return BLK_STS_RESOURCE;
}
......@@ -1148,7 +1151,8 @@ static blk_status_t rnbd_queue_rq(struct blk_mq_hw_ctx *hctx,
iu->sgt.sgl = iu->first_sgl;
err = sg_alloc_table_chained(&iu->sgt,
/* Even-if the request has no segment,
* sglist must have one entry at least */
* sglist must have one entry at least.
*/
blk_rq_nr_phys_segments(rq) ? : 1,
iu->sgt.sgl,
RNBD_INLINE_SG_CNT);
......@@ -1161,9 +1165,9 @@ static blk_status_t rnbd_queue_rq(struct blk_mq_hw_ctx *hctx,
blk_mq_start_request(rq);
err = rnbd_client_xfer_request(dev, rq, iu);
if (likely(err == 0))
if (err == 0)
return BLK_STS_OK;
if (unlikely(err == -EAGAIN || err == -ENOMEM)) {
if (err == -EAGAIN || err == -ENOMEM) {
rnbd_clt_dev_kick_mq_queue(dev, hctx, 10/*ms*/);
ret = BLK_STS_RESOURCE;
}
......@@ -1294,7 +1298,11 @@ find_and_get_or_create_sess(const char *sessname,
err = PTR_ERR(sess->rtrs);
goto wake_up_and_put;
}
rtrs_clt_query(sess->rtrs, &attrs);
err = rtrs_clt_query(sess->rtrs, &attrs);
if (err)
goto close_rtrs;
sess->max_io_size = attrs.max_io_size;
sess->queue_depth = attrs.queue_depth;
sess->nr_poll_queues = nr_poll_queues;
......@@ -1576,7 +1584,7 @@ struct rnbd_clt_dev *rnbd_clt_map_device(const char *sessname,
struct rnbd_clt_dev *dev;
int ret;
if (unlikely(exists_devpath(pathname, sessname)))
if (exists_devpath(pathname, sessname))
return ERR_PTR(-EEXIST);
sess = find_and_get_or_create_sess(sessname, paths, path_cnt, port_nr, nr_poll_queues);
......
......@@ -87,7 +87,7 @@ struct rnbd_clt_session {
DECLARE_BITMAP(cpu_queues_bm, NR_CPUS);
int __percpu *cpu_rr; /* per-cpu var for CPU round-robin */
atomic_t busy;
int queue_depth;
size_t queue_depth;
u32 max_io_size;
struct blk_mq_tag_set tag_set;
u32 nr_poll_queues;
......
......@@ -104,7 +104,7 @@ rnbd_get_sess_dev(int dev_id, struct rnbd_srv_session *srv_sess)
rcu_read_lock();
sess_dev = xa_load(&srv_sess->index_idr, dev_id);
if (likely(sess_dev))
if (sess_dev)
ret = kref_get_unless_zero(&sess_dev->kref);
rcu_read_unlock();
......
......@@ -2976,7 +2976,8 @@ EXPORT_SYMBOL(rtrs_clt_request);
int rtrs_clt_rdma_cq_direct(struct rtrs_clt *clt, unsigned int index)
{
int cnt;
/* If no path, return -1 for block layer not to try again */
int cnt = -1;
struct rtrs_con *con;
struct rtrs_clt_sess *sess;
struct path_it it;
......
......@@ -576,6 +576,11 @@ static void nvme_free_ns(struct kref *kref)
kfree(ns);
}
static inline bool nvme_get_ns(struct nvme_ns *ns)
{
return kref_get_unless_zero(&ns->kref);
}
void nvme_put_ns(struct nvme_ns *ns)
{
kref_put(&ns->kref, nvme_free_ns);
......@@ -584,9 +589,6 @@ EXPORT_SYMBOL_NS_GPL(nvme_put_ns, NVME_TARGET_PASSTHRU);
static inline void nvme_clear_nvme_request(struct request *req)
{
struct nvme_command *cmd = nvme_req(req)->cmd;
memset(cmd, 0, sizeof(*cmd));
nvme_req(req)->retries = 0;
nvme_req(req)->flags = 0;
req->rq_flags |= RQF_DONTPREP;
......@@ -637,6 +639,66 @@ static struct request *nvme_alloc_request_qid(struct request_queue *q,
return req;
}
/*
* For something we're not in a state to send to the device the default action
* is to busy it and retry it after the controller state is recovered. However,
* if the controller is deleting or if anything is marked for failfast or
* nvme multipath it is immediately failed.
*
* Note: commands used to initialize the controller will be marked for failfast.
* Note: nvme cli/ioctl commands are marked for failfast.
*/
blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl,
struct request *rq)
{
if (ctrl->state != NVME_CTRL_DELETING_NOIO &&
ctrl->state != NVME_CTRL_DEAD &&
!test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags) &&
!blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
return BLK_STS_RESOURCE;
return nvme_host_path_error(rq);
}
EXPORT_SYMBOL_GPL(nvme_fail_nonready_command);
bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
bool queue_live)
{
struct nvme_request *req = nvme_req(rq);
/*
* currently we have a problem sending passthru commands
* on the admin_q if the controller is not LIVE because we can't
* make sure that they are going out after the admin connect,
* controller enable and/or other commands in the initialization
* sequence. until the controller will be LIVE, fail with
* BLK_STS_RESOURCE so that they will be rescheduled.
*/
if (rq->q == ctrl->admin_q && (req->flags & NVME_REQ_USERCMD))
return false;
if (ctrl->ops->flags & NVME_F_FABRICS) {
/*
* Only allow commands on a live queue, except for the connect
* command, which is require to set the queue live in the
* appropinquate states.
*/
switch (ctrl->state) {
case NVME_CTRL_CONNECTING:
if (blk_rq_is_passthrough(rq) && nvme_is_fabrics(req->cmd) &&
req->cmd->fabrics.fctype == nvme_fabrics_type_connect)
return true;
break;
default:
break;
case NVME_CTRL_DEAD:
return false;
}
}
return queue_live;
}
EXPORT_SYMBOL_GPL(__nvme_check_ready);
static int nvme_toggle_streams(struct nvme_ctrl *ctrl, bool enable)
{
struct nvme_command c;
......@@ -898,8 +960,10 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req)
struct nvme_command *cmd = nvme_req(req)->cmd;
blk_status_t ret = BLK_STS_OK;
if (!(req->rq_flags & RQF_DONTPREP))
if (!(req->rq_flags & RQF_DONTPREP)) {
nvme_clear_nvme_request(req);
memset(cmd, 0, sizeof(*cmd));
}
switch (req_op(req)) {
case REQ_OP_DRV_IN:
......@@ -1494,7 +1558,7 @@ static int nvme_ns_open(struct nvme_ns *ns)
/* should never be called due to GENHD_FL_HIDDEN */
if (WARN_ON_ONCE(nvme_ns_head_multipath(ns->head)))
goto fail;
if (!kref_get_unless_zero(&ns->kref))
if (!nvme_get_ns(ns))
goto fail;
if (!try_module_get(ns->ctrl->ops->module))
goto fail_put_ns;
......@@ -1999,28 +2063,6 @@ static const struct block_device_operations nvme_bdev_ops = {
.pr_ops = &nvme_pr_ops,
};
#ifdef CONFIG_NVME_MULTIPATH
struct nvme_ctrl *nvme_find_get_live_ctrl(struct nvme_subsystem *subsys)
{
struct nvme_ctrl *ctrl;
int ret;
ret = mutex_lock_killable(&nvme_subsystems_lock);
if (ret)
return ERR_PTR(ret);
list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
if (ctrl->state == NVME_CTRL_LIVE)
goto found;
}
mutex_unlock(&nvme_subsystems_lock);
return ERR_PTR(-EWOULDBLOCK);
found:
nvme_get_ctrl(ctrl);
mutex_unlock(&nvme_subsystems_lock);
return ctrl;
}
#endif /* CONFIG_NVME_MULTIPATH */
static int nvme_wait_ready(struct nvme_ctrl *ctrl, u64 cap, bool enabled)
{
unsigned long timeout =
......@@ -3604,7 +3646,7 @@ struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
down_read(&ctrl->namespaces_rwsem);
list_for_each_entry(ns, &ctrl->namespaces, list) {
if (ns->head->ns_id == nsid) {
if (!kref_get_unless_zero(&ns->kref))
if (!nvme_get_ns(ns))
continue;
ret = ns;
break;
......
......@@ -533,63 +533,6 @@ static struct nvmf_transport_ops *nvmf_lookup_transport(
return NULL;
}
/*
* For something we're not in a state to send to the device the default action
* is to busy it and retry it after the controller state is recovered. However,
* if the controller is deleting or if anything is marked for failfast or
* nvme multipath it is immediately failed.
*
* Note: commands used to initialize the controller will be marked for failfast.
* Note: nvme cli/ioctl commands are marked for failfast.
*/
blk_status_t nvmf_fail_nonready_command(struct nvme_ctrl *ctrl,
struct request *rq)
{
if (ctrl->state != NVME_CTRL_DELETING_NOIO &&
ctrl->state != NVME_CTRL_DEAD &&
!test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags) &&
!blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
return BLK_STS_RESOURCE;
return nvme_host_path_error(rq);
}
EXPORT_SYMBOL_GPL(nvmf_fail_nonready_command);
bool __nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
bool queue_live)
{
struct nvme_request *req = nvme_req(rq);
/*
* currently we have a problem sending passthru commands
* on the admin_q if the controller is not LIVE because we can't
* make sure that they are going out after the admin connect,
* controller enable and/or other commands in the initialization
* sequence. until the controller will be LIVE, fail with
* BLK_STS_RESOURCE so that they will be rescheduled.
*/
if (rq->q == ctrl->admin_q && (req->flags & NVME_REQ_USERCMD))
return false;
/*
* Only allow commands on a live queue, except for the connect command,
* which is require to set the queue live in the appropinquate states.
*/
switch (ctrl->state) {
case NVME_CTRL_CONNECTING:
if (blk_rq_is_passthrough(rq) && nvme_is_fabrics(req->cmd) &&
req->cmd->fabrics.fctype == nvme_fabrics_type_connect)
return true;
break;
default:
break;
case NVME_CTRL_DEAD:
return false;
}
return queue_live;
}
EXPORT_SYMBOL_GPL(__nvmf_check_ready);
static const match_table_t opt_tokens = {
{ NVMF_OPT_TRANSPORT, "transport=%s" },
{ NVMF_OPT_TRADDR, "traddr=%s" },
......
......@@ -184,20 +184,7 @@ void nvmf_unregister_transport(struct nvmf_transport_ops *ops);
void nvmf_free_options(struct nvmf_ctrl_options *opts);
int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size);
bool nvmf_should_reconnect(struct nvme_ctrl *ctrl);
blk_status_t nvmf_fail_nonready_command(struct nvme_ctrl *ctrl,
struct request *rq);
bool __nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
bool queue_live);
bool nvmf_ip_options_match(struct nvme_ctrl *ctrl,
struct nvmf_ctrl_options *opts);
static inline bool nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
bool queue_live)
{
if (likely(ctrl->state == NVME_CTRL_LIVE ||
ctrl->state == NVME_CTRL_DELETING))
return true;
return __nvmf_check_ready(ctrl, rq, queue_live);
}
#endif /* _NVME_FABRICS_H */
......@@ -2766,8 +2766,8 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
blk_status_t ret;
if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE ||
!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
!nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq);
ret = nvme_setup_cmd(ns, rq);
if (ret)
......
......@@ -370,41 +370,45 @@ long nvme_ns_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
}
#ifdef CONFIG_NVME_MULTIPATH
static int nvme_ns_head_ctrl_ioctl(struct nvme_ns_head *head,
unsigned int cmd, void __user *argp)
static int nvme_ns_head_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd,
void __user *argp, struct nvme_ns_head *head, int srcu_idx)
{
struct nvme_ctrl *ctrl = nvme_find_get_live_ctrl(head->subsys);
struct nvme_ctrl *ctrl = ns->ctrl;
int ret;
if (IS_ERR(ctrl))
return PTR_ERR(ctrl);
ret = nvme_ctrl_ioctl(ctrl, cmd, argp);
nvme_put_ctrl(ctrl);
return ret;
}
nvme_get_ctrl(ns->ctrl);
nvme_put_ns_from_disk(head, srcu_idx);
ret = nvme_ctrl_ioctl(ns->ctrl, cmd, argp);
static int nvme_ns_head_ns_ioctl(struct nvme_ns_head *head,
unsigned int cmd, void __user *argp)
{
int srcu_idx = srcu_read_lock(&head->srcu);
struct nvme_ns *ns = nvme_find_path(head);
int ret = -EWOULDBLOCK;
if (ns)
ret = nvme_ns_ioctl(ns, cmd, argp);
srcu_read_unlock(&head->srcu, srcu_idx);
nvme_put_ctrl(ctrl);
return ret;
}
int nvme_ns_head_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
struct nvme_ns_head *head = bdev->bd_disk->private_data;
struct nvme_ns_head *head = NULL;
void __user *argp = (void __user *)arg;
struct nvme_ns *ns;
int srcu_idx, ret;
ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx);
if (unlikely(!ns))
return -EWOULDBLOCK;
/*
* Handle ioctls that apply to the controller instead of the namespace
* seperately and drop the ns SRCU reference early. This avoids a
* deadlock when deleting namespaces using the passthrough interface.
*/
if (is_ctrl_ioctl(cmd))
return nvme_ns_head_ctrl_ioctl(head, cmd, argp);
return nvme_ns_head_ns_ioctl(head, cmd, argp);
ret = nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx);
else {
ret = nvme_ns_ioctl(ns, cmd, argp);
nvme_put_ns_from_disk(head, srcu_idx);
}
return ret;
}
long nvme_ns_head_chr_ioctl(struct file *file, unsigned int cmd,
......@@ -414,10 +418,23 @@ long nvme_ns_head_chr_ioctl(struct file *file, unsigned int cmd,
struct nvme_ns_head *head =
container_of(cdev, struct nvme_ns_head, cdev);
void __user *argp = (void __user *)arg;
struct nvme_ns *ns;
int srcu_idx, ret;
srcu_idx = srcu_read_lock(&head->srcu);
ns = nvme_find_path(head);
if (!ns) {
srcu_read_unlock(&head->srcu, srcu_idx);
return -EWOULDBLOCK;
}
if (is_ctrl_ioctl(cmd))
return nvme_ns_head_ctrl_ioctl(head, cmd, argp);
return nvme_ns_head_ns_ioctl(head, cmd, argp);
return nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx);
ret = nvme_ns_ioctl(ns, cmd, argp);
nvme_put_ns_from_disk(head, srcu_idx);
return ret;
}
#endif /* CONFIG_NVME_MULTIPATH */
......
......@@ -70,6 +70,7 @@ void nvme_failover_req(struct request *req)
struct nvme_ns *ns = req->q->queuedata;
u16 status = nvme_req(req)->status & 0x7ff;
unsigned long flags;
struct bio *bio;
nvme_mpath_clear_current_path(ns);
......@@ -84,6 +85,8 @@ void nvme_failover_req(struct request *req)
}
spin_lock_irqsave(&ns->head->requeue_lock, flags);
for (bio = req->bio; bio; bio = bio->bi_next)
bio_set_dev(bio, ns->head->disk->part0);
blk_steal_bios(&ns->head->requeue_list, req);
spin_unlock_irqrestore(&ns->head->requeue_lock, flags);
......
......@@ -638,6 +638,21 @@ struct request *nvme_alloc_request(struct request_queue *q,
struct nvme_command *cmd, blk_mq_req_flags_t flags);
void nvme_cleanup_cmd(struct request *req);
blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req);
blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl,
struct request *req);
bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
bool queue_live);
static inline bool nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
bool queue_live)
{
if (likely(ctrl->state == NVME_CTRL_LIVE))
return true;
if (ctrl->ops->flags & NVME_F_FABRICS &&
ctrl->state == NVME_CTRL_DELETING)
return true;
return __nvme_check_ready(ctrl, rq, queue_live);
}
int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
void *buf, unsigned bufflen);
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
......@@ -664,7 +679,6 @@ struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk,
void nvme_put_ns_from_disk(struct nvme_ns_head *head, int idx);
bool nvme_tryget_ns_head(struct nvme_ns_head *head);
void nvme_put_ns_head(struct nvme_ns_head *head);
struct nvme_ctrl *nvme_find_get_live_ctrl(struct nvme_subsystem *subsys);
int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device,
const struct file_operations *fops, struct module *owner);
void nvme_cdev_del(struct cdev *cdev, struct device *cdev_device);
......
......@@ -933,6 +933,9 @@ static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags)))
return BLK_STS_IOERR;
if (!nvme_check_ready(&dev->ctrl, req, true))
return nvme_fail_nonready_command(&dev->ctrl, req);
ret = nvme_setup_cmd(ns, req);
if (ret)
return ret;
......
......@@ -2050,8 +2050,8 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
WARN_ON_ONCE(rq->tag < 0);
if (!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
if (!nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq);
dev = queue->device->dev;
......
......@@ -2338,8 +2338,8 @@ static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx,
bool queue_ready = test_bit(NVME_TCP_Q_LIVE, &queue->flags);
blk_status_t ret;
if (!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
if (!nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq);
ret = nvme_tcp_setup_cmd_pdu(ns, rq);
if (unlikely(ret))
......
......@@ -307,7 +307,7 @@ static void nvmet_execute_get_log_page(struct nvmet_req *req)
case NVME_LOG_ANA:
return nvmet_execute_get_log_page_ana(req);
}
pr_err("unhandled lid %d on qid %d\n",
pr_debug("unhandled lid %d on qid %d\n",
req->cmd->get_log_page.lid, req->sq->qid);
req->error_loc = offsetof(struct nvme_get_log_page_command, lid);
nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
......@@ -659,7 +659,7 @@ static void nvmet_execute_identify(struct nvmet_req *req)
return nvmet_execute_identify_desclist(req);
}
pr_err("unhandled identify cns %d on qid %d\n",
pr_debug("unhandled identify cns %d on qid %d\n",
req->cmd->identify.cns, req->sq->qid);
req->error_loc = offsetof(struct nvme_identify, cns);
nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
......@@ -977,7 +977,7 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
return 0;
}
pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode,
pr_debug("unhandled cmd %d on qid %d\n", cmd->common.opcode,
req->sq->qid);
req->error_loc = offsetof(struct nvme_common_command, opcode);
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
......
......@@ -138,8 +138,8 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
bool queue_ready = test_bit(NVME_LOOP_Q_LIVE, &queue->flags);
blk_status_t ret;
if (!nvmf_check_ready(&queue->ctrl->ctrl, req, queue_ready))
return nvmf_fail_nonready_command(&queue->ctrl->ctrl, req);
if (!nvme_check_ready(&queue->ctrl->ctrl, req, queue_ready))
return nvme_fail_nonready_command(&queue->ctrl->ctrl, req);
ret = nvme_setup_cmd(ns, req);
if (ret)
......
......@@ -52,7 +52,7 @@
#define DASD_ECKD_CCW_RCD 0xFA
#define DASD_ECKD_CCW_DSO 0xF7
/* Define Subssystem Function / Orders */
/* Define Subsystem Function / Orders */
#define DSO_ORDER_RAS 0x81
/*
......@@ -110,7 +110,7 @@
#define DASD_ECKD_PG_GROUPED 0x10
/*
* Size that is reportet for large volumes in the old 16-bit no_cyl field
* Size that is reported for large volumes in the old 16-bit no_cyl field
*/
#define LV_COMPAT_CYL 0xFFFE
......@@ -555,7 +555,7 @@ struct dasd_dso_ras_ext_range {
} __packed;
/*
* Define Subsytem Operation - Release Allocated Space
* Define Subsystem Operation - Release Allocated Space
*/
struct dasd_dso_ras_data {
__u8 order;
......@@ -676,7 +676,7 @@ struct dasd_eckd_private {
struct dasd_ext_pool_sum eps;
u32 real_cyl;
/* alias managemnet */
/* alias management */
struct dasd_uid uid;
struct alias_pav_group *pavgroup;
struct alias_lcu *lcu;
......
......@@ -1677,6 +1677,7 @@ ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
struct inode *bd_inode = bdev_file_inode(file);
loff_t size = i_size_read(bd_inode);
struct blk_plug plug;
size_t shorted = 0;
ssize_t ret;
if (bdev_read_only(I_BDEV(bd_inode)))
......@@ -1694,12 +1695,17 @@ ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
if ((iocb->ki_flags & (IOCB_NOWAIT | IOCB_DIRECT)) == IOCB_NOWAIT)
return -EOPNOTSUPP;
iov_iter_truncate(from, size - iocb->ki_pos);
size -= iocb->ki_pos;
if (iov_iter_count(from) > size) {
shorted = iov_iter_count(from) - size;
iov_iter_truncate(from, size);
}
blk_start_plug(&plug);
ret = __generic_file_write_iter(iocb, from);
if (ret > 0)
ret = generic_write_sync(iocb, ret);
iov_iter_reexpand(from, iov_iter_count(from) + shorted);
blk_finish_plug(&plug);
return ret;
}
......@@ -1711,13 +1717,21 @@ ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
struct inode *bd_inode = bdev_file_inode(file);
loff_t size = i_size_read(bd_inode);
loff_t pos = iocb->ki_pos;
size_t shorted = 0;
ssize_t ret;
if (pos >= size)
return 0;
size -= pos;
iov_iter_truncate(to, size);
return generic_file_read_iter(iocb, to);
if (iov_iter_count(to) > size) {
shorted = iov_iter_count(to) - size;
iov_iter_truncate(to, size);
}
ret = generic_file_read_iter(iocb, to);
iov_iter_reexpand(to, iov_iter_count(to) + shorted);
return ret;
}
EXPORT_SYMBOL_GPL(blkdev_read_iter);
......
......@@ -106,6 +106,8 @@ static inline void *bio_data(struct bio *bio)
return NULL;
}
extern unsigned int bio_max_size(struct bio *bio);
/**
* bio_full - check if the bio is full
* @bio: bio to check
......@@ -119,7 +121,7 @@ static inline bool bio_full(struct bio *bio, unsigned len)
if (bio->bi_vcnt >= bio->bi_max_vecs)
return true;
if (bio->bi_iter.bi_size > UINT_MAX - len)
if (bio->bi_iter.bi_size > bio_max_size(bio) - len)
return true;
return false;
......
......@@ -326,6 +326,8 @@ enum blk_bounce {
};
struct queue_limits {
unsigned int bio_max_bytes;
enum blk_bounce bounce;
unsigned long seg_boundary_mask;
unsigned long virt_boundary_mask;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment