Commit 2f50037a authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus-20180504' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:
 "A collection of fixes that should to into this release. This contains:

   - Set of bcache fixes from Coly, fixing regression in patches that
     went into this series.

   - Set of NVMe fixes by way of Keith.

   - Set of bdi related fixes, one from Jan and two from Tetsuo Handa,
     fixing various issues around device addition/removal.

   - Two block inflight fixes from Omar, fixing issues around the
     transition to using tags for blk-mq inflight accounting that we
     did a few releases ago"

* tag 'for-linus-20180504' of git://git.kernel.dk/linux-block:
  bdi: Fix oops in wb_workfn()
  nvmet: switch loopback target state to connecting when resetting
  nvme/multipath: Fix multipath disabled naming collisions
  nvme/multipath: Disable runtime writable enabling parameter
  nvme: Set integrity flag for user passthrough commands
  nvme: fix potential memory leak in option parsing
  bdi: Fix use after free bug in debugfs_remove()
  bdi: wake up concurrent wb_shutdown() callers.
  bcache: use pr_info() to inform duplicated CACHE_SET_IO_DISABLE set
  bcache: set dc->io_disable to true in conditional_stop_bcache_device()
  bcache: add wait_for_kthread_stop() in bch_allocator_thread()
  bcache: count backing device I/O error for writeback I/O
  bcache: set CACHE_SET_IO_DISABLE in bch_cached_dev_error()
  bcache: store disk name in struct cache and struct cached_dev
  blk-mq: fix sysfs inflight counter
  blk-mq: count allocated but not started requests in iostats inflight
parents 2e171ffc b8b78495
...@@ -95,18 +95,15 @@ static void blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx, ...@@ -95,18 +95,15 @@ static void blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx,
{ {
struct mq_inflight *mi = priv; struct mq_inflight *mi = priv;
if (blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT) {
/* /*
* index[0] counts the specific partition that was asked * index[0] counts the specific partition that was asked for. index[1]
* for. index[1] counts the ones that are active on the * counts the ones that are active on the whole device, so increment
* whole device, so increment that if mi->part is indeed * that if mi->part is indeed a partition, and not a whole device.
* a partition, and not a whole device.
*/ */
if (rq->part == mi->part) if (rq->part == mi->part)
mi->inflight[0]++; mi->inflight[0]++;
if (mi->part->partno) if (mi->part->partno)
mi->inflight[1]++; mi->inflight[1]++;
}
} }
void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part, void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part,
...@@ -118,6 +115,25 @@ void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part, ...@@ -118,6 +115,25 @@ void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part,
blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi); blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
} }
static void blk_mq_check_inflight_rw(struct blk_mq_hw_ctx *hctx,
struct request *rq, void *priv,
bool reserved)
{
struct mq_inflight *mi = priv;
if (rq->part == mi->part)
mi->inflight[rq_data_dir(rq)]++;
}
void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part,
unsigned int inflight[2])
{
struct mq_inflight mi = { .part = part, .inflight = inflight, };
inflight[0] = inflight[1] = 0;
blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight_rw, &mi);
}
void blk_freeze_queue_start(struct request_queue *q) void blk_freeze_queue_start(struct request_queue *q)
{ {
int freeze_depth; int freeze_depth;
......
...@@ -189,6 +189,8 @@ static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx) ...@@ -189,6 +189,8 @@ static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part, void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part,
unsigned int inflight[2]); unsigned int inflight[2]);
void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part,
unsigned int inflight[2]);
static inline void blk_mq_put_dispatch_budget(struct blk_mq_hw_ctx *hctx) static inline void blk_mq_put_dispatch_budget(struct blk_mq_hw_ctx *hctx)
{ {
......
...@@ -82,6 +82,18 @@ void part_in_flight(struct request_queue *q, struct hd_struct *part, ...@@ -82,6 +82,18 @@ void part_in_flight(struct request_queue *q, struct hd_struct *part,
} }
} }
void part_in_flight_rw(struct request_queue *q, struct hd_struct *part,
unsigned int inflight[2])
{
if (q->mq_ops) {
blk_mq_in_flight_rw(q, part, inflight);
return;
}
inflight[0] = atomic_read(&part->in_flight[0]);
inflight[1] = atomic_read(&part->in_flight[1]);
}
struct hd_struct *__disk_get_part(struct gendisk *disk, int partno) struct hd_struct *__disk_get_part(struct gendisk *disk, int partno)
{ {
struct disk_part_tbl *ptbl = rcu_dereference(disk->part_tbl); struct disk_part_tbl *ptbl = rcu_dereference(disk->part_tbl);
......
...@@ -145,13 +145,15 @@ ssize_t part_stat_show(struct device *dev, ...@@ -145,13 +145,15 @@ ssize_t part_stat_show(struct device *dev,
jiffies_to_msecs(part_stat_read(p, time_in_queue))); jiffies_to_msecs(part_stat_read(p, time_in_queue)));
} }
ssize_t part_inflight_show(struct device *dev, ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr,
struct device_attribute *attr, char *buf) char *buf)
{ {
struct hd_struct *p = dev_to_part(dev); struct hd_struct *p = dev_to_part(dev);
struct request_queue *q = part_to_disk(p)->queue;
unsigned int inflight[2];
return sprintf(buf, "%8u %8u\n", atomic_read(&p->in_flight[0]), part_in_flight_rw(q, p, inflight);
atomic_read(&p->in_flight[1])); return sprintf(buf, "%8u %8u\n", inflight[0], inflight[1]);
} }
#ifdef CONFIG_FAIL_MAKE_REQUEST #ifdef CONFIG_FAIL_MAKE_REQUEST
......
...@@ -290,7 +290,7 @@ do { \ ...@@ -290,7 +290,7 @@ do { \
if (kthread_should_stop() || \ if (kthread_should_stop() || \
test_bit(CACHE_SET_IO_DISABLE, &ca->set->flags)) { \ test_bit(CACHE_SET_IO_DISABLE, &ca->set->flags)) { \
set_current_state(TASK_RUNNING); \ set_current_state(TASK_RUNNING); \
return 0; \ goto out; \
} \ } \
\ \
schedule(); \ schedule(); \
...@@ -378,6 +378,9 @@ static int bch_allocator_thread(void *arg) ...@@ -378,6 +378,9 @@ static int bch_allocator_thread(void *arg)
bch_prio_write(ca); bch_prio_write(ca);
} }
} }
out:
wait_for_kthread_stop();
return 0;
} }
/* Allocation */ /* Allocation */
......
...@@ -392,6 +392,8 @@ struct cached_dev { ...@@ -392,6 +392,8 @@ struct cached_dev {
#define DEFAULT_CACHED_DEV_ERROR_LIMIT 64 #define DEFAULT_CACHED_DEV_ERROR_LIMIT 64
atomic_t io_errors; atomic_t io_errors;
unsigned error_limit; unsigned error_limit;
char backing_dev_name[BDEVNAME_SIZE];
}; };
enum alloc_reserve { enum alloc_reserve {
...@@ -464,6 +466,8 @@ struct cache { ...@@ -464,6 +466,8 @@ struct cache {
atomic_long_t meta_sectors_written; atomic_long_t meta_sectors_written;
atomic_long_t btree_sectors_written; atomic_long_t btree_sectors_written;
atomic_long_t sectors_written; atomic_long_t sectors_written;
char cache_dev_name[BDEVNAME_SIZE];
}; };
struct gc_stat { struct gc_stat {
......
...@@ -106,7 +106,6 @@ void bch_btree_verify(struct btree *b) ...@@ -106,7 +106,6 @@ void bch_btree_verify(struct btree *b)
void bch_data_verify(struct cached_dev *dc, struct bio *bio) void bch_data_verify(struct cached_dev *dc, struct bio *bio)
{ {
char name[BDEVNAME_SIZE];
struct bio *check; struct bio *check;
struct bio_vec bv, cbv; struct bio_vec bv, cbv;
struct bvec_iter iter, citer = { 0 }; struct bvec_iter iter, citer = { 0 };
...@@ -134,7 +133,7 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio) ...@@ -134,7 +133,7 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio)
bv.bv_len), bv.bv_len),
dc->disk.c, dc->disk.c,
"verify failed at dev %s sector %llu", "verify failed at dev %s sector %llu",
bdevname(dc->bdev, name), dc->backing_dev_name,
(uint64_t) bio->bi_iter.bi_sector); (uint64_t) bio->bi_iter.bi_sector);
kunmap_atomic(p1); kunmap_atomic(p1);
......
...@@ -52,7 +52,6 @@ void bch_submit_bbio(struct bio *bio, struct cache_set *c, ...@@ -52,7 +52,6 @@ void bch_submit_bbio(struct bio *bio, struct cache_set *c,
/* IO errors */ /* IO errors */
void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio) void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio)
{ {
char buf[BDEVNAME_SIZE];
unsigned errors; unsigned errors;
WARN_ONCE(!dc, "NULL pointer of struct cached_dev"); WARN_ONCE(!dc, "NULL pointer of struct cached_dev");
...@@ -60,7 +59,7 @@ void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio) ...@@ -60,7 +59,7 @@ void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio)
errors = atomic_add_return(1, &dc->io_errors); errors = atomic_add_return(1, &dc->io_errors);
if (errors < dc->error_limit) if (errors < dc->error_limit)
pr_err("%s: IO error on backing device, unrecoverable", pr_err("%s: IO error on backing device, unrecoverable",
bio_devname(bio, buf)); dc->backing_dev_name);
else else
bch_cached_dev_error(dc); bch_cached_dev_error(dc);
} }
...@@ -105,19 +104,18 @@ void bch_count_io_errors(struct cache *ca, ...@@ -105,19 +104,18 @@ void bch_count_io_errors(struct cache *ca,
} }
if (error) { if (error) {
char buf[BDEVNAME_SIZE];
unsigned errors = atomic_add_return(1 << IO_ERROR_SHIFT, unsigned errors = atomic_add_return(1 << IO_ERROR_SHIFT,
&ca->io_errors); &ca->io_errors);
errors >>= IO_ERROR_SHIFT; errors >>= IO_ERROR_SHIFT;
if (errors < ca->set->error_limit) if (errors < ca->set->error_limit)
pr_err("%s: IO error on %s%s", pr_err("%s: IO error on %s%s",
bdevname(ca->bdev, buf), m, ca->cache_dev_name, m,
is_read ? ", recovering." : "."); is_read ? ", recovering." : ".");
else else
bch_cache_set_error(ca->set, bch_cache_set_error(ca->set,
"%s: too many IO errors %s", "%s: too many IO errors %s",
bdevname(ca->bdev, buf), m); ca->cache_dev_name, m);
} }
} }
......
...@@ -649,11 +649,8 @@ static void backing_request_endio(struct bio *bio) ...@@ -649,11 +649,8 @@ static void backing_request_endio(struct bio *bio)
*/ */
if (unlikely(s->iop.writeback && if (unlikely(s->iop.writeback &&
bio->bi_opf & REQ_PREFLUSH)) { bio->bi_opf & REQ_PREFLUSH)) {
char buf[BDEVNAME_SIZE];
bio_devname(bio, buf);
pr_err("Can't flush %s: returned bi_status %i", pr_err("Can't flush %s: returned bi_status %i",
buf, bio->bi_status); dc->backing_dev_name, bio->bi_status);
} else { } else {
/* set to orig_bio->bi_status in bio_complete() */ /* set to orig_bio->bi_status in bio_complete() */
s->iop.status = bio->bi_status; s->iop.status = bio->bi_status;
......
...@@ -936,7 +936,6 @@ static void cancel_writeback_rate_update_dwork(struct cached_dev *dc) ...@@ -936,7 +936,6 @@ static void cancel_writeback_rate_update_dwork(struct cached_dev *dc)
static void cached_dev_detach_finish(struct work_struct *w) static void cached_dev_detach_finish(struct work_struct *w)
{ {
struct cached_dev *dc = container_of(w, struct cached_dev, detach); struct cached_dev *dc = container_of(w, struct cached_dev, detach);
char buf[BDEVNAME_SIZE];
struct closure cl; struct closure cl;
closure_init_stack(&cl); closure_init_stack(&cl);
...@@ -967,7 +966,7 @@ static void cached_dev_detach_finish(struct work_struct *w) ...@@ -967,7 +966,7 @@ static void cached_dev_detach_finish(struct work_struct *w)
mutex_unlock(&bch_register_lock); mutex_unlock(&bch_register_lock);
pr_info("Caching disabled for %s", bdevname(dc->bdev, buf)); pr_info("Caching disabled for %s", dc->backing_dev_name);
/* Drop ref we took in cached_dev_detach() */ /* Drop ref we took in cached_dev_detach() */
closure_put(&dc->disk.cl); closure_put(&dc->disk.cl);
...@@ -999,29 +998,28 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c, ...@@ -999,29 +998,28 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
{ {
uint32_t rtime = cpu_to_le32(get_seconds()); uint32_t rtime = cpu_to_le32(get_seconds());
struct uuid_entry *u; struct uuid_entry *u;
char buf[BDEVNAME_SIZE];
struct cached_dev *exist_dc, *t; struct cached_dev *exist_dc, *t;
bdevname(dc->bdev, buf);
if ((set_uuid && memcmp(set_uuid, c->sb.set_uuid, 16)) || if ((set_uuid && memcmp(set_uuid, c->sb.set_uuid, 16)) ||
(!set_uuid && memcmp(dc->sb.set_uuid, c->sb.set_uuid, 16))) (!set_uuid && memcmp(dc->sb.set_uuid, c->sb.set_uuid, 16)))
return -ENOENT; return -ENOENT;
if (dc->disk.c) { if (dc->disk.c) {
pr_err("Can't attach %s: already attached", buf); pr_err("Can't attach %s: already attached",
dc->backing_dev_name);
return -EINVAL; return -EINVAL;
} }
if (test_bit(CACHE_SET_STOPPING, &c->flags)) { if (test_bit(CACHE_SET_STOPPING, &c->flags)) {
pr_err("Can't attach %s: shutting down", buf); pr_err("Can't attach %s: shutting down",
dc->backing_dev_name);
return -EINVAL; return -EINVAL;
} }
if (dc->sb.block_size < c->sb.block_size) { if (dc->sb.block_size < c->sb.block_size) {
/* Will die */ /* Will die */
pr_err("Couldn't attach %s: block size less than set's block size", pr_err("Couldn't attach %s: block size less than set's block size",
buf); dc->backing_dev_name);
return -EINVAL; return -EINVAL;
} }
...@@ -1029,7 +1027,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c, ...@@ -1029,7 +1027,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
list_for_each_entry_safe(exist_dc, t, &c->cached_devs, list) { list_for_each_entry_safe(exist_dc, t, &c->cached_devs, list) {
if (!memcmp(dc->sb.uuid, exist_dc->sb.uuid, 16)) { if (!memcmp(dc->sb.uuid, exist_dc->sb.uuid, 16)) {
pr_err("Tried to attach %s but duplicate UUID already attached", pr_err("Tried to attach %s but duplicate UUID already attached",
buf); dc->backing_dev_name);
return -EINVAL; return -EINVAL;
} }
...@@ -1047,13 +1045,15 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c, ...@@ -1047,13 +1045,15 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
if (!u) { if (!u) {
if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) { if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
pr_err("Couldn't find uuid for %s in set", buf); pr_err("Couldn't find uuid for %s in set",
dc->backing_dev_name);
return -ENOENT; return -ENOENT;
} }
u = uuid_find_empty(c); u = uuid_find_empty(c);
if (!u) { if (!u) {
pr_err("Not caching %s, no room for UUID", buf); pr_err("Not caching %s, no room for UUID",
dc->backing_dev_name);
return -EINVAL; return -EINVAL;
} }
} }
...@@ -1112,7 +1112,8 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c, ...@@ -1112,7 +1112,8 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
up_write(&dc->writeback_lock); up_write(&dc->writeback_lock);
pr_info("Caching %s as %s on set %pU", pr_info("Caching %s as %s on set %pU",
bdevname(dc->bdev, buf), dc->disk.disk->disk_name, dc->backing_dev_name,
dc->disk.disk->disk_name,
dc->disk.c->sb.set_uuid); dc->disk.c->sb.set_uuid);
return 0; return 0;
} }
...@@ -1225,10 +1226,10 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page, ...@@ -1225,10 +1226,10 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page,
struct block_device *bdev, struct block_device *bdev,
struct cached_dev *dc) struct cached_dev *dc)
{ {
char name[BDEVNAME_SIZE];
const char *err = "cannot allocate memory"; const char *err = "cannot allocate memory";
struct cache_set *c; struct cache_set *c;
bdevname(bdev, dc->backing_dev_name);
memcpy(&dc->sb, sb, sizeof(struct cache_sb)); memcpy(&dc->sb, sb, sizeof(struct cache_sb));
dc->bdev = bdev; dc->bdev = bdev;
dc->bdev->bd_holder = dc; dc->bdev->bd_holder = dc;
...@@ -1237,6 +1238,7 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page, ...@@ -1237,6 +1238,7 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page,
bio_first_bvec_all(&dc->sb_bio)->bv_page = sb_page; bio_first_bvec_all(&dc->sb_bio)->bv_page = sb_page;
get_page(sb_page); get_page(sb_page);
if (cached_dev_init(dc, sb->block_size << 9)) if (cached_dev_init(dc, sb->block_size << 9))
goto err; goto err;
...@@ -1247,7 +1249,7 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page, ...@@ -1247,7 +1249,7 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page,
if (bch_cache_accounting_add_kobjs(&dc->accounting, &dc->disk.kobj)) if (bch_cache_accounting_add_kobjs(&dc->accounting, &dc->disk.kobj))
goto err; goto err;
pr_info("registered backing device %s", bdevname(bdev, name)); pr_info("registered backing device %s", dc->backing_dev_name);
list_add(&dc->list, &uncached_devices); list_add(&dc->list, &uncached_devices);
list_for_each_entry(c, &bch_cache_sets, list) list_for_each_entry(c, &bch_cache_sets, list)
...@@ -1259,7 +1261,7 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page, ...@@ -1259,7 +1261,7 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page,
return; return;
err: err:
pr_notice("error %s: %s", bdevname(bdev, name), err); pr_notice("error %s: %s", dc->backing_dev_name, err);
bcache_device_stop(&dc->disk); bcache_device_stop(&dc->disk);
} }
...@@ -1367,7 +1369,7 @@ int bch_flash_dev_create(struct cache_set *c, uint64_t size) ...@@ -1367,7 +1369,7 @@ int bch_flash_dev_create(struct cache_set *c, uint64_t size)
bool bch_cached_dev_error(struct cached_dev *dc) bool bch_cached_dev_error(struct cached_dev *dc)
{ {
char name[BDEVNAME_SIZE]; struct cache_set *c;
if (!dc || test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags)) if (!dc || test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags))
return false; return false;
...@@ -1377,7 +1379,22 @@ bool bch_cached_dev_error(struct cached_dev *dc) ...@@ -1377,7 +1379,22 @@ bool bch_cached_dev_error(struct cached_dev *dc)
smp_mb(); smp_mb();
pr_err("stop %s: too many IO errors on backing device %s\n", pr_err("stop %s: too many IO errors on backing device %s\n",
dc->disk.disk->disk_name, bdevname(dc->bdev, name)); dc->disk.disk->disk_name, dc->backing_dev_name);
/*
* If the cached device is still attached to a cache set,
* even dc->io_disable is true and no more I/O requests
* accepted, cache device internal I/O (writeback scan or
* garbage collection) may still prevent bcache device from
* being stopped. So here CACHE_SET_IO_DISABLE should be
* set to c->flags too, to make the internal I/O to cache
* device rejected and stopped immediately.
* If c is NULL, that means the bcache device is not attached
* to any cache set, then no CACHE_SET_IO_DISABLE bit to set.
*/
c = dc->disk.c;
if (c && test_and_set_bit(CACHE_SET_IO_DISABLE, &c->flags))
pr_info("CACHE_SET_IO_DISABLE already set");
bcache_device_stop(&dc->disk); bcache_device_stop(&dc->disk);
return true; return true;
...@@ -1395,7 +1412,7 @@ bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...) ...@@ -1395,7 +1412,7 @@ bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...)
return false; return false;
if (test_and_set_bit(CACHE_SET_IO_DISABLE, &c->flags)) if (test_and_set_bit(CACHE_SET_IO_DISABLE, &c->flags))
pr_warn("CACHE_SET_IO_DISABLE already set"); pr_info("CACHE_SET_IO_DISABLE already set");
/* XXX: we can be called from atomic context /* XXX: we can be called from atomic context
acquire_console_sem(); acquire_console_sem();
...@@ -1539,6 +1556,20 @@ static void conditional_stop_bcache_device(struct cache_set *c, ...@@ -1539,6 +1556,20 @@ static void conditional_stop_bcache_device(struct cache_set *c,
*/ */
pr_warn("stop_when_cache_set_failed of %s is \"auto\" and cache is dirty, stop it to avoid potential data corruption.", pr_warn("stop_when_cache_set_failed of %s is \"auto\" and cache is dirty, stop it to avoid potential data corruption.",
d->disk->disk_name); d->disk->disk_name);
/*
* There might be a small time gap that cache set is
* released but bcache device is not. Inside this time
* gap, regular I/O requests will directly go into
* backing device as no cache set attached to. This
* behavior may also introduce potential inconsistence
* data in writeback mode while cache is dirty.
* Therefore before calling bcache_device_stop() due
* to a broken cache device, dc->io_disable should be
* explicitly set to true.
*/
dc->io_disable = true;
/* make others know io_disable is true earlier */
smp_mb();
bcache_device_stop(d); bcache_device_stop(d);
} else { } else {
/* /*
...@@ -2003,12 +2034,10 @@ static int cache_alloc(struct cache *ca) ...@@ -2003,12 +2034,10 @@ static int cache_alloc(struct cache *ca)
static int register_cache(struct cache_sb *sb, struct page *sb_page, static int register_cache(struct cache_sb *sb, struct page *sb_page,
struct block_device *bdev, struct cache *ca) struct block_device *bdev, struct cache *ca)
{ {
char name[BDEVNAME_SIZE];
const char *err = NULL; /* must be set for any error case */ const char *err = NULL; /* must be set for any error case */
int ret = 0; int ret = 0;
bdevname(bdev, name); bdevname(bdev, ca->cache_dev_name);
memcpy(&ca->sb, sb, sizeof(struct cache_sb)); memcpy(&ca->sb, sb, sizeof(struct cache_sb));
ca->bdev = bdev; ca->bdev = bdev;
ca->bdev->bd_holder = ca; ca->bdev->bd_holder = ca;
...@@ -2045,14 +2074,14 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page, ...@@ -2045,14 +2074,14 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,
goto out; goto out;
} }
pr_info("registered cache device %s", name); pr_info("registered cache device %s", ca->cache_dev_name);
out: out:
kobject_put(&ca->kobj); kobject_put(&ca->kobj);
err: err:
if (err) if (err)
pr_notice("error %s: %s", name, err); pr_notice("error %s: %s", ca->cache_dev_name, err);
return ret; return ret;
} }
......
...@@ -244,8 +244,10 @@ static void dirty_endio(struct bio *bio) ...@@ -244,8 +244,10 @@ static void dirty_endio(struct bio *bio)
struct keybuf_key *w = bio->bi_private; struct keybuf_key *w = bio->bi_private;
struct dirty_io *io = w->private; struct dirty_io *io = w->private;
if (bio->bi_status) if (bio->bi_status) {
SET_KEY_DIRTY(&w->key, false); SET_KEY_DIRTY(&w->key, false);
bch_count_backing_io_errors(io->dc, bio);
}
closure_put(&io->cl); closure_put(&io->cl);
} }
......
...@@ -764,6 +764,7 @@ static int nvme_submit_user_cmd(struct request_queue *q, ...@@ -764,6 +764,7 @@ static int nvme_submit_user_cmd(struct request_queue *q,
ret = PTR_ERR(meta); ret = PTR_ERR(meta);
goto out_unmap; goto out_unmap;
} }
req->cmd_flags |= REQ_INTEGRITY;
} }
} }
...@@ -2997,31 +2998,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) ...@@ -2997,31 +2998,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
if (nvme_init_ns_head(ns, nsid, id)) if (nvme_init_ns_head(ns, nsid, id))
goto out_free_id; goto out_free_id;
nvme_setup_streams_ns(ctrl, ns); nvme_setup_streams_ns(ctrl, ns);
nvme_set_disk_name(disk_name, ns, ctrl, &flags);
#ifdef CONFIG_NVME_MULTIPATH
/*
* If multipathing is enabled we need to always use the subsystem
* instance number for numbering our devices to avoid conflicts
* between subsystems that have multiple controllers and thus use
* the multipath-aware subsystem node and those that have a single
* controller and use the controller node directly.
*/
if (ns->head->disk) {
sprintf(disk_name, "nvme%dc%dn%d", ctrl->subsys->instance,
ctrl->cntlid, ns->head->instance);
flags = GENHD_FL_HIDDEN;
} else {
sprintf(disk_name, "nvme%dn%d", ctrl->subsys->instance,
ns->head->instance);
}
#else
/*
* But without the multipath code enabled, multiple controller per
* subsystems are visible as devices and thus we cannot use the
* subsystem instance.
*/
sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance);
#endif
if ((ctrl->quirks & NVME_QUIRK_LIGHTNVM) && id->vs[0] == 0x1) { if ((ctrl->quirks & NVME_QUIRK_LIGHTNVM) && id->vs[0] == 0x1) {
if (nvme_nvm_register(ns, disk_name, node)) { if (nvme_nvm_register(ns, disk_name, node)) {
......
...@@ -668,6 +668,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, ...@@ -668,6 +668,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
} }
kfree(opts->transport);
opts->transport = p; opts->transport = p;
break; break;
case NVMF_OPT_NQN: case NVMF_OPT_NQN:
...@@ -676,6 +677,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, ...@@ -676,6 +677,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
} }
kfree(opts->subsysnqn);
opts->subsysnqn = p; opts->subsysnqn = p;
nqnlen = strlen(opts->subsysnqn); nqnlen = strlen(opts->subsysnqn);
if (nqnlen >= NVMF_NQN_SIZE) { if (nqnlen >= NVMF_NQN_SIZE) {
...@@ -698,6 +700,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, ...@@ -698,6 +700,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
} }
kfree(opts->traddr);
opts->traddr = p; opts->traddr = p;
break; break;
case NVMF_OPT_TRSVCID: case NVMF_OPT_TRSVCID:
...@@ -706,6 +709,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, ...@@ -706,6 +709,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
} }
kfree(opts->trsvcid);
opts->trsvcid = p; opts->trsvcid = p;
break; break;
case NVMF_OPT_QUEUE_SIZE: case NVMF_OPT_QUEUE_SIZE:
...@@ -792,6 +796,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, ...@@ -792,6 +796,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
} }
nvmf_host_put(opts->host);
opts->host = nvmf_host_add(p); opts->host = nvmf_host_add(p);
kfree(p); kfree(p);
if (!opts->host) { if (!opts->host) {
...@@ -817,6 +822,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, ...@@ -817,6 +822,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
} }
kfree(opts->host_traddr);
opts->host_traddr = p; opts->host_traddr = p;
break; break;
case NVMF_OPT_HOST_ID: case NVMF_OPT_HOST_ID:
......
...@@ -15,10 +15,32 @@ ...@@ -15,10 +15,32 @@
#include "nvme.h" #include "nvme.h"
static bool multipath = true; static bool multipath = true;
module_param(multipath, bool, 0644); module_param(multipath, bool, 0444);
MODULE_PARM_DESC(multipath, MODULE_PARM_DESC(multipath,
"turn on native support for multiple controllers per subsystem"); "turn on native support for multiple controllers per subsystem");
/*
* If multipathing is enabled we need to always use the subsystem instance
* number for numbering our devices to avoid conflicts between subsystems that
* have multiple controllers and thus use the multipath-aware subsystem node
* and those that have a single controller and use the controller node
* directly.
*/
void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
struct nvme_ctrl *ctrl, int *flags)
{
if (!multipath) {
sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance);
} else if (ns->head->disk) {
sprintf(disk_name, "nvme%dc%dn%d", ctrl->subsys->instance,
ctrl->cntlid, ns->head->instance);
*flags = GENHD_FL_HIDDEN;
} else {
sprintf(disk_name, "nvme%dn%d", ctrl->subsys->instance,
ns->head->instance);
}
}
void nvme_failover_req(struct request *req) void nvme_failover_req(struct request *req)
{ {
struct nvme_ns *ns = req->q->queuedata; struct nvme_ns *ns = req->q->queuedata;
......
...@@ -436,6 +436,8 @@ extern const struct attribute_group nvme_ns_id_attr_group; ...@@ -436,6 +436,8 @@ extern const struct attribute_group nvme_ns_id_attr_group;
extern const struct block_device_operations nvme_ns_head_ops; extern const struct block_device_operations nvme_ns_head_ops;
#ifdef CONFIG_NVME_MULTIPATH #ifdef CONFIG_NVME_MULTIPATH
void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
struct nvme_ctrl *ctrl, int *flags);
void nvme_failover_req(struct request *req); void nvme_failover_req(struct request *req);
bool nvme_req_needs_failover(struct request *req, blk_status_t error); bool nvme_req_needs_failover(struct request *req, blk_status_t error);
void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl); void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);
...@@ -461,6 +463,16 @@ static inline void nvme_mpath_check_last_path(struct nvme_ns *ns) ...@@ -461,6 +463,16 @@ static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
} }
#else #else
/*
* Without the multipath code enabled, multiple controller per subsystems are
* visible as devices and thus we cannot use the subsystem instance.
*/
static inline void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
struct nvme_ctrl *ctrl, int *flags)
{
sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance);
}
static inline void nvme_failover_req(struct request *req) static inline void nvme_failover_req(struct request *req)
{ {
} }
......
...@@ -469,6 +469,12 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work) ...@@ -469,6 +469,12 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work)
nvme_stop_ctrl(&ctrl->ctrl); nvme_stop_ctrl(&ctrl->ctrl);
nvme_loop_shutdown_ctrl(ctrl); nvme_loop_shutdown_ctrl(ctrl);
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
/* state change failure should never happen */
WARN_ON_ONCE(1);
return;
}
ret = nvme_loop_configure_admin_queue(ctrl); ret = nvme_loop_configure_admin_queue(ctrl);
if (ret) if (ret)
goto out_disable; goto out_disable;
......
...@@ -1961,7 +1961,7 @@ void wb_workfn(struct work_struct *work) ...@@ -1961,7 +1961,7 @@ void wb_workfn(struct work_struct *work)
} }
if (!list_empty(&wb->work_list)) if (!list_empty(&wb->work_list))
mod_delayed_work(bdi_wq, &wb->dwork, 0); wb_wakeup(wb);
else if (wb_has_dirty_io(wb) && dirty_writeback_interval) else if (wb_has_dirty_io(wb) && dirty_writeback_interval)
wb_wakeup_delayed(wb); wb_wakeup_delayed(wb);
......
...@@ -369,6 +369,8 @@ static inline void free_part_stats(struct hd_struct *part) ...@@ -369,6 +369,8 @@ static inline void free_part_stats(struct hd_struct *part)
void part_in_flight(struct request_queue *q, struct hd_struct *part, void part_in_flight(struct request_queue *q, struct hd_struct *part,
unsigned int inflight[2]); unsigned int inflight[2]);
void part_in_flight_rw(struct request_queue *q, struct hd_struct *part,
unsigned int inflight[2]);
void part_dec_in_flight(struct request_queue *q, struct hd_struct *part, void part_dec_in_flight(struct request_queue *q, struct hd_struct *part,
int rw); int rw);
void part_inc_in_flight(struct request_queue *q, struct hd_struct *part, void part_inc_in_flight(struct request_queue *q, struct hd_struct *part,
......
...@@ -305,4 +305,21 @@ do { \ ...@@ -305,4 +305,21 @@ do { \
__ret; \ __ret; \
}) })
/**
* clear_and_wake_up_bit - clear a bit and wake up anyone waiting on that bit
*
* @bit: the bit of the word being waited on
* @word: the word being waited on, a kernel virtual address
*
* You can use this helper if bitflags are manipulated atomically rather than
* non-atomically under a lock.
*/
static inline void clear_and_wake_up_bit(int bit, void *word)
{
clear_bit_unlock(bit, word);
/* See wake_up_bit() for which memory barrier you need to use. */
smp_mb__after_atomic();
wake_up_bit(word, bit);
}
#endif /* _LINUX_WAIT_BIT_H */ #endif /* _LINUX_WAIT_BIT_H */
...@@ -115,6 +115,7 @@ static int bdi_debug_register(struct backing_dev_info *bdi, const char *name) ...@@ -115,6 +115,7 @@ static int bdi_debug_register(struct backing_dev_info *bdi, const char *name)
bdi, &bdi_debug_stats_fops); bdi, &bdi_debug_stats_fops);
if (!bdi->debug_stats) { if (!bdi->debug_stats) {
debugfs_remove(bdi->debug_dir); debugfs_remove(bdi->debug_dir);
bdi->debug_dir = NULL;
return -ENOMEM; return -ENOMEM;
} }
...@@ -383,7 +384,7 @@ static void wb_shutdown(struct bdi_writeback *wb) ...@@ -383,7 +384,7 @@ static void wb_shutdown(struct bdi_writeback *wb)
* the barrier provided by test_and_clear_bit() above. * the barrier provided by test_and_clear_bit() above.
*/ */
smp_wmb(); smp_wmb();
clear_bit(WB_shutting_down, &wb->state); clear_and_wake_up_bit(WB_shutting_down, &wb->state);
} }
static void wb_exit(struct bdi_writeback *wb) static void wb_exit(struct bdi_writeback *wb)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment