Commit 44579f35 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'block-5.5-20191221' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:
 "Let's try this one again, this time without the compat_ioctl changes.
  We've got those fixed up, but that can go out next week.

  This contains:

   - block queue flush lockdep annotation (Bart)

   - Type fix for bsg_queue_rq() (Bart)

   - Three dasd fixes (Stefan, Jan)

   - nbd deadlock fix (Mike)

   - Error handling bio user map fix (Yang)

   - iocost fix (Tejun)

   - sbitmap waitqueue addition fix that affects the kyber IO scheduler
     (David)"

* tag 'block-5.5-20191221' of git://git.kernel.dk/linux-block:
  sbitmap: only queue kyber's wait callback if not already active
  block: fix memleak when __blk_rq_map_user_iov() is failed
  s390/dasd: fix typo in copyright statement
  s390/dasd: fix memleak in path handling error case
  s390/dasd/cio: Interpret ccw_device_get_mdc return value correctly
  block: Fix a lockdep complaint triggered by request queue flushing
  block: Fix the type of 'sts' in bsg_queue_rq()
  block: end bio with BLK_STS_AGAIN in case of non-mq devs and REQ_NOWAIT
  nbd: fix shutdown and recv work deadlock v2
  iocost: over-budget forced IOs should schedule async delay
parents a313c8e0 df034c93
...@@ -885,11 +885,14 @@ generic_make_request_checks(struct bio *bio) ...@@ -885,11 +885,14 @@ generic_make_request_checks(struct bio *bio)
} }
/* /*
* For a REQ_NOWAIT based request, return -EOPNOTSUPP * Non-mq queues do not honor REQ_NOWAIT, so complete a bio
* if queue is not a request based queue. * with BLK_STS_AGAIN status in order to catch -EAGAIN and
* to give a chance to the caller to repeat request gracefully.
*/ */
if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_mq(q)) if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_mq(q)) {
goto not_supported; status = BLK_STS_AGAIN;
goto end_io;
}
if (should_fail_bio(bio)) if (should_fail_bio(bio))
goto end_io; goto end_io;
......
...@@ -69,6 +69,7 @@ ...@@ -69,6 +69,7 @@
#include <linux/blkdev.h> #include <linux/blkdev.h>
#include <linux/gfp.h> #include <linux/gfp.h>
#include <linux/blk-mq.h> #include <linux/blk-mq.h>
#include <linux/lockdep.h>
#include "blk.h" #include "blk.h"
#include "blk-mq.h" #include "blk-mq.h"
...@@ -505,6 +506,9 @@ struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q, ...@@ -505,6 +506,9 @@ struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
INIT_LIST_HEAD(&fq->flush_queue[1]); INIT_LIST_HEAD(&fq->flush_queue[1]);
INIT_LIST_HEAD(&fq->flush_data_in_flight); INIT_LIST_HEAD(&fq->flush_data_in_flight);
lockdep_register_key(&fq->key);
lockdep_set_class(&fq->mq_flush_lock, &fq->key);
return fq; return fq;
fail_rq: fail_rq:
...@@ -519,6 +523,7 @@ void blk_free_flush_queue(struct blk_flush_queue *fq) ...@@ -519,6 +523,7 @@ void blk_free_flush_queue(struct blk_flush_queue *fq)
if (!fq) if (!fq)
return; return;
lockdep_unregister_key(&fq->key);
kfree(fq->flush_rq); kfree(fq->flush_rq);
kfree(fq); kfree(fq);
} }
...@@ -1212,7 +1212,7 @@ static enum hrtimer_restart iocg_waitq_timer_fn(struct hrtimer *timer) ...@@ -1212,7 +1212,7 @@ static enum hrtimer_restart iocg_waitq_timer_fn(struct hrtimer *timer)
return HRTIMER_NORESTART; return HRTIMER_NORESTART;
} }
static void iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now, u64 cost) static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now, u64 cost)
{ {
struct ioc *ioc = iocg->ioc; struct ioc *ioc = iocg->ioc;
struct blkcg_gq *blkg = iocg_to_blkg(iocg); struct blkcg_gq *blkg = iocg_to_blkg(iocg);
...@@ -1229,11 +1229,11 @@ static void iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now, u64 cost) ...@@ -1229,11 +1229,11 @@ static void iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now, u64 cost)
/* clear or maintain depending on the overage */ /* clear or maintain depending on the overage */
if (time_before_eq64(vtime, now->vnow)) { if (time_before_eq64(vtime, now->vnow)) {
blkcg_clear_delay(blkg); blkcg_clear_delay(blkg);
return; return false;
} }
if (!atomic_read(&blkg->use_delay) && if (!atomic_read(&blkg->use_delay) &&
time_before_eq64(vtime, now->vnow + vmargin)) time_before_eq64(vtime, now->vnow + vmargin))
return; return false;
/* use delay */ /* use delay */
if (cost) { if (cost) {
...@@ -1250,10 +1250,11 @@ static void iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now, u64 cost) ...@@ -1250,10 +1250,11 @@ static void iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now, u64 cost)
oexpires = ktime_to_ns(hrtimer_get_softexpires(&iocg->delay_timer)); oexpires = ktime_to_ns(hrtimer_get_softexpires(&iocg->delay_timer));
if (hrtimer_is_queued(&iocg->delay_timer) && if (hrtimer_is_queued(&iocg->delay_timer) &&
abs(oexpires - expires) <= margin_ns / 4) abs(oexpires - expires) <= margin_ns / 4)
return; return true;
hrtimer_start_range_ns(&iocg->delay_timer, ns_to_ktime(expires), hrtimer_start_range_ns(&iocg->delay_timer, ns_to_ktime(expires),
margin_ns / 4, HRTIMER_MODE_ABS); margin_ns / 4, HRTIMER_MODE_ABS);
return true;
} }
static enum hrtimer_restart iocg_delay_timer_fn(struct hrtimer *timer) static enum hrtimer_restart iocg_delay_timer_fn(struct hrtimer *timer)
...@@ -1739,7 +1740,9 @@ static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio) ...@@ -1739,7 +1740,9 @@ static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio)
*/ */
if (bio_issue_as_root_blkg(bio) || fatal_signal_pending(current)) { if (bio_issue_as_root_blkg(bio) || fatal_signal_pending(current)) {
atomic64_add(abs_cost, &iocg->abs_vdebt); atomic64_add(abs_cost, &iocg->abs_vdebt);
iocg_kick_delay(iocg, &now, cost); if (iocg_kick_delay(iocg, &now, cost))
blkcg_schedule_throttle(rqos->q,
(bio->bi_opf & REQ_SWAP) == REQ_SWAP);
return; return;
} }
......
...@@ -151,7 +151,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, ...@@ -151,7 +151,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
return 0; return 0;
unmap_rq: unmap_rq:
__blk_rq_unmap_user(bio); blk_rq_unmap_user(bio);
fail: fail:
rq->bio = NULL; rq->bio = NULL;
return ret; return ret;
......
...@@ -30,6 +30,7 @@ struct blk_flush_queue { ...@@ -30,6 +30,7 @@ struct blk_flush_queue {
* at the same time * at the same time
*/ */
struct request *orig_rq; struct request *orig_rq;
struct lock_class_key key;
spinlock_t mq_flush_lock; spinlock_t mq_flush_lock;
}; };
......
...@@ -266,7 +266,7 @@ static blk_status_t bsg_queue_rq(struct blk_mq_hw_ctx *hctx, ...@@ -266,7 +266,7 @@ static blk_status_t bsg_queue_rq(struct blk_mq_hw_ctx *hctx,
struct request *req = bd->rq; struct request *req = bd->rq;
struct bsg_set *bset = struct bsg_set *bset =
container_of(q->tag_set, struct bsg_set, tag_set); container_of(q->tag_set, struct bsg_set, tag_set);
int sts = BLK_STS_IOERR; blk_status_t sts = BLK_STS_IOERR;
int ret; int ret;
blk_mq_start_request(req); blk_mq_start_request(req);
......
...@@ -1296,10 +1296,10 @@ static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *b ...@@ -1296,10 +1296,10 @@ static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *b
mutex_unlock(&nbd->config_lock); mutex_unlock(&nbd->config_lock);
ret = wait_event_interruptible(config->recv_wq, ret = wait_event_interruptible(config->recv_wq,
atomic_read(&config->recv_threads) == 0); atomic_read(&config->recv_threads) == 0);
if (ret) { if (ret)
sock_shutdown(nbd); sock_shutdown(nbd);
flush_workqueue(nbd->recv_workq); flush_workqueue(nbd->recv_workq);
}
mutex_lock(&nbd->config_lock); mutex_lock(&nbd->config_lock);
nbd_bdev_reset(bdev); nbd_bdev_reset(bdev);
/* user requested, ignore socket errors */ /* user requested, ignore socket errors */
......
...@@ -1128,7 +1128,8 @@ static u32 get_fcx_max_data(struct dasd_device *device) ...@@ -1128,7 +1128,8 @@ static u32 get_fcx_max_data(struct dasd_device *device)
{ {
struct dasd_eckd_private *private = device->private; struct dasd_eckd_private *private = device->private;
int fcx_in_css, fcx_in_gneq, fcx_in_features; int fcx_in_css, fcx_in_gneq, fcx_in_features;
int tpm, mdc; unsigned int mdc;
int tpm;
if (dasd_nofcx) if (dasd_nofcx)
return 0; return 0;
...@@ -1142,7 +1143,7 @@ static u32 get_fcx_max_data(struct dasd_device *device) ...@@ -1142,7 +1143,7 @@ static u32 get_fcx_max_data(struct dasd_device *device)
return 0; return 0;
mdc = ccw_device_get_mdc(device->cdev, 0); mdc = ccw_device_get_mdc(device->cdev, 0);
if (mdc < 0) { if (mdc == 0) {
dev_warn(&device->cdev->dev, "Detecting the maximum supported data size for zHPF requests failed\n"); dev_warn(&device->cdev->dev, "Detecting the maximum supported data size for zHPF requests failed\n");
return 0; return 0;
} else { } else {
...@@ -1153,12 +1154,12 @@ static u32 get_fcx_max_data(struct dasd_device *device) ...@@ -1153,12 +1154,12 @@ static u32 get_fcx_max_data(struct dasd_device *device)
static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm) static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm)
{ {
struct dasd_eckd_private *private = device->private; struct dasd_eckd_private *private = device->private;
int mdc; unsigned int mdc;
u32 fcx_max_data; u32 fcx_max_data;
if (private->fcx_max_data) { if (private->fcx_max_data) {
mdc = ccw_device_get_mdc(device->cdev, lpm); mdc = ccw_device_get_mdc(device->cdev, lpm);
if ((mdc < 0)) { if (mdc == 0) {
dev_warn(&device->cdev->dev, dev_warn(&device->cdev->dev,
"Detecting the maximum data size for zHPF " "Detecting the maximum data size for zHPF "
"requests failed (rc=%d) for a new path %x\n", "requests failed (rc=%d) for a new path %x\n",
...@@ -2073,7 +2074,7 @@ dasd_eckd_check_characteristics(struct dasd_device *device) ...@@ -2073,7 +2074,7 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
dasd_free_block(device->block); dasd_free_block(device->block);
device->block = NULL; device->block = NULL;
out_err1: out_err1:
kfree(private->conf_data); dasd_eckd_clear_conf_data(device);
kfree(device->private); kfree(device->private);
device->private = NULL; device->private = NULL;
return rc; return rc;
...@@ -2082,7 +2083,6 @@ dasd_eckd_check_characteristics(struct dasd_device *device) ...@@ -2082,7 +2083,6 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
static void dasd_eckd_uncheck_device(struct dasd_device *device) static void dasd_eckd_uncheck_device(struct dasd_device *device)
{ {
struct dasd_eckd_private *private = device->private; struct dasd_eckd_private *private = device->private;
int i;
if (!private) if (!private)
return; return;
...@@ -2092,21 +2092,7 @@ static void dasd_eckd_uncheck_device(struct dasd_device *device) ...@@ -2092,21 +2092,7 @@ static void dasd_eckd_uncheck_device(struct dasd_device *device)
private->sneq = NULL; private->sneq = NULL;
private->vdsneq = NULL; private->vdsneq = NULL;
private->gneq = NULL; private->gneq = NULL;
private->conf_len = 0; dasd_eckd_clear_conf_data(device);
for (i = 0; i < 8; i++) {
kfree(device->path[i].conf_data);
if ((__u8 *)device->path[i].conf_data ==
private->conf_data) {
private->conf_data = NULL;
private->conf_len = 0;
}
device->path[i].conf_data = NULL;
device->path[i].cssid = 0;
device->path[i].ssid = 0;
device->path[i].chpid = 0;
}
kfree(private->conf_data);
private->conf_data = NULL;
} }
static struct dasd_ccw_req * static struct dasd_ccw_req *
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
/* /*
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com> * Bugreports.to..: <Linux390@de.ibm.com>
* Coypright IBM Corp. 1999, 2000 * Copyright IBM Corp. 1999, 2000
* *
*/ */
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
* Carsten Otte <Cotte@de.ibm.com> * Carsten Otte <Cotte@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com> * Martin Schwidefsky <schwidefsky@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com> * Bugreports.to..: <Linux390@de.ibm.com>
* Coypright IBM Corp. 1999, 2002 * Copyright IBM Corp. 1999, 2002
* *
* /proc interface for the dasd driver. * /proc interface for the dasd driver.
* *
......
...@@ -635,7 +635,7 @@ EXPORT_SYMBOL(ccw_device_tm_start_timeout); ...@@ -635,7 +635,7 @@ EXPORT_SYMBOL(ccw_device_tm_start_timeout);
* @mask: mask of paths to use * @mask: mask of paths to use
* *
* Return the number of 64K-bytes blocks all paths at least support * Return the number of 64K-bytes blocks all paths at least support
* for a transport command. Return values <= 0 indicate failures. * for a transport command. Return value 0 indicates failure.
*/ */
int ccw_device_get_mdc(struct ccw_device *cdev, u8 mask) int ccw_device_get_mdc(struct ccw_device *cdev, u8 mask)
{ {
......
...@@ -650,8 +650,8 @@ void sbitmap_add_wait_queue(struct sbitmap_queue *sbq, ...@@ -650,8 +650,8 @@ void sbitmap_add_wait_queue(struct sbitmap_queue *sbq,
if (!sbq_wait->sbq) { if (!sbq_wait->sbq) {
sbq_wait->sbq = sbq; sbq_wait->sbq = sbq;
atomic_inc(&sbq->ws_active); atomic_inc(&sbq->ws_active);
add_wait_queue(&ws->wait, &sbq_wait->wait);
} }
add_wait_queue(&ws->wait, &sbq_wait->wait);
} }
EXPORT_SYMBOL_GPL(sbitmap_add_wait_queue); EXPORT_SYMBOL_GPL(sbitmap_add_wait_queue);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment