Commit b85dfd30 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.dk/linux-block

Pull block layer fixes from Jens Axboe:
 "Remember about a week ago when I sent the last pull request for 4.1?
  Well, I lied.  Now, I don't want to shift the blame, but Dan, Ming,
  and Richard made a liar out of me.

  Here are three small patches that should go into 4.1.  More
  specifically, this pull request contains:

   - A Kconfig dependency for the pmem block driver, so it can't be
     selected if HAS_IOMEM isn't availble.  From Richard Weinberger.

   - A fix for genhd, making the ext_devt_lock softirq safe.  This makes
     lockdep happier, since we also end up grabbing this lock on release
     off the softirq path.  From Dan Williams.

   - A blk-mq software queue release fix from Ming Lei.

  Last two are headed to stable, first fixes an issue introduced in this
  cycle"

* 'for-linus' of git://git.kernel.dk/linux-block:
  block: pmem: Add dependency on HAS_IOMEM
  block: fix ext_dev_lock lockdep report
  blk-mq: free hctx->ctxs in queue's release handler
parents 7b565d9d b6f2098f
...@@ -1600,6 +1600,7 @@ static int blk_mq_hctx_notify(void *data, unsigned long action, ...@@ -1600,6 +1600,7 @@ static int blk_mq_hctx_notify(void *data, unsigned long action,
return NOTIFY_OK; return NOTIFY_OK;
} }
/* hctx->ctxs will be freed in queue's release handler */
static void blk_mq_exit_hctx(struct request_queue *q, static void blk_mq_exit_hctx(struct request_queue *q,
struct blk_mq_tag_set *set, struct blk_mq_tag_set *set,
struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
...@@ -1618,7 +1619,6 @@ static void blk_mq_exit_hctx(struct request_queue *q, ...@@ -1618,7 +1619,6 @@ static void blk_mq_exit_hctx(struct request_queue *q,
blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier); blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
blk_free_flush_queue(hctx->fq); blk_free_flush_queue(hctx->fq);
kfree(hctx->ctxs);
blk_mq_free_bitmap(&hctx->ctx_map); blk_mq_free_bitmap(&hctx->ctx_map);
} }
...@@ -1891,8 +1891,12 @@ void blk_mq_release(struct request_queue *q) ...@@ -1891,8 +1891,12 @@ void blk_mq_release(struct request_queue *q)
unsigned int i; unsigned int i;
/* hctx kobj stays in hctx */ /* hctx kobj stays in hctx */
queue_for_each_hw_ctx(q, hctx, i) queue_for_each_hw_ctx(q, hctx, i) {
if (!hctx)
continue;
kfree(hctx->ctxs);
kfree(hctx); kfree(hctx);
}
kfree(q->queue_hw_ctx); kfree(q->queue_hw_ctx);
......
...@@ -422,9 +422,9 @@ int blk_alloc_devt(struct hd_struct *part, dev_t *devt) ...@@ -422,9 +422,9 @@ int blk_alloc_devt(struct hd_struct *part, dev_t *devt)
/* allocate ext devt */ /* allocate ext devt */
idr_preload(GFP_KERNEL); idr_preload(GFP_KERNEL);
spin_lock(&ext_devt_lock); spin_lock_bh(&ext_devt_lock);
idx = idr_alloc(&ext_devt_idr, part, 0, NR_EXT_DEVT, GFP_NOWAIT); idx = idr_alloc(&ext_devt_idr, part, 0, NR_EXT_DEVT, GFP_NOWAIT);
spin_unlock(&ext_devt_lock); spin_unlock_bh(&ext_devt_lock);
idr_preload_end(); idr_preload_end();
if (idx < 0) if (idx < 0)
...@@ -449,9 +449,9 @@ void blk_free_devt(dev_t devt) ...@@ -449,9 +449,9 @@ void blk_free_devt(dev_t devt)
return; return;
if (MAJOR(devt) == BLOCK_EXT_MAJOR) { if (MAJOR(devt) == BLOCK_EXT_MAJOR) {
spin_lock(&ext_devt_lock); spin_lock_bh(&ext_devt_lock);
idr_remove(&ext_devt_idr, blk_mangle_minor(MINOR(devt))); idr_remove(&ext_devt_idr, blk_mangle_minor(MINOR(devt)));
spin_unlock(&ext_devt_lock); spin_unlock_bh(&ext_devt_lock);
} }
} }
...@@ -690,13 +690,13 @@ struct gendisk *get_gendisk(dev_t devt, int *partno) ...@@ -690,13 +690,13 @@ struct gendisk *get_gendisk(dev_t devt, int *partno)
} else { } else {
struct hd_struct *part; struct hd_struct *part;
spin_lock(&ext_devt_lock); spin_lock_bh(&ext_devt_lock);
part = idr_find(&ext_devt_idr, blk_mangle_minor(MINOR(devt))); part = idr_find(&ext_devt_idr, blk_mangle_minor(MINOR(devt)));
if (part && get_disk(part_to_disk(part))) { if (part && get_disk(part_to_disk(part))) {
*partno = part->partno; *partno = part->partno;
disk = part_to_disk(part); disk = part_to_disk(part);
} }
spin_unlock(&ext_devt_lock); spin_unlock_bh(&ext_devt_lock);
} }
return disk; return disk;
......
...@@ -406,6 +406,7 @@ config BLK_DEV_RAM_DAX ...@@ -406,6 +406,7 @@ config BLK_DEV_RAM_DAX
config BLK_DEV_PMEM config BLK_DEV_PMEM
tristate "Persistent memory block device support" tristate "Persistent memory block device support"
depends on HAS_IOMEM
help help
Saying Y here will allow you to use a contiguous range of reserved Saying Y here will allow you to use a contiguous range of reserved
memory as one or more persistent block devices. memory as one or more persistent block devices.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment