Commit b8517e98 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.dk/linux-block

Pull block layer fixes from Jens Axboe:
 "A small collection of fixes that has been gathered over the last few
  weeks.  This contains:

   - A one-liner fix for NVMe, fixing a missing list_head init that
     could makes us oops on hitting recovery at load time.

   - Two small blk-mq fixes:
        - Fixup a bad goto jump on error handling.
        - Fix for oopsing if running out of reserved tags.

   - A memory leak fix for NBD.

   - Two small writeback fixes from Tejun, fixing a missing init to
     INITIAL_JIFFIES, and a possible underflow introduced recently.

   - A core merge fixup in sg gap detection, where rq->biotail was
     indexed with the count of rq->bio"

* 'for-linus' of git://git.kernel.dk/linux-block:
  writeback: fix possible underflow in write bandwidth calculation
  NVMe: Initialize device list head before starting
  Fix bug in blk_rq_merge_ok
  blkmq: Fix NULL pointer deref when all reserved tags in
  blk-mq: fix use of incorrect goto label in blk_mq_init_queue error path
  nbd: fix possible memory leak
  writeback: add missing INITIAL_JIFFIES init in global_update_bandwidth()
parents c875f421 c72efb65
...@@ -592,7 +592,7 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio) ...@@ -592,7 +592,7 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS)) { if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS)) {
struct bio_vec *bprev; struct bio_vec *bprev;
bprev = &rq->biotail->bi_io_vec[bio->bi_vcnt - 1]; bprev = &rq->biotail->bi_io_vec[rq->biotail->bi_vcnt - 1];
if (bvec_gap_to_prev(bprev, bio->bi_io_vec[0].bv_offset)) if (bvec_gap_to_prev(bprev, bio->bi_io_vec[0].bv_offset))
return false; return false;
} }
......
...@@ -278,9 +278,11 @@ static int bt_get(struct blk_mq_alloc_data *data, ...@@ -278,9 +278,11 @@ static int bt_get(struct blk_mq_alloc_data *data,
/* /*
* We're out of tags on this hardware queue, kick any * We're out of tags on this hardware queue, kick any
* pending IO submits before going to sleep waiting for * pending IO submits before going to sleep waiting for
* some to complete. * some to complete. Note that hctx can be NULL here for
* reserved tag allocation.
*/ */
blk_mq_run_hw_queue(hctx, false); if (hctx)
blk_mq_run_hw_queue(hctx, false);
/* /*
* Retry tag allocation after running the hardware queue, * Retry tag allocation after running the hardware queue,
......
...@@ -1938,7 +1938,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) ...@@ -1938,7 +1938,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
*/ */
if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release, if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release,
PERCPU_REF_INIT_ATOMIC, GFP_KERNEL)) PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
goto err_map; goto err_mq_usage;
setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q); setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
blk_queue_rq_timeout(q, 30000); blk_queue_rq_timeout(q, 30000);
...@@ -1981,7 +1981,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) ...@@ -1981,7 +1981,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
blk_mq_init_cpu_queues(q, set->nr_hw_queues); blk_mq_init_cpu_queues(q, set->nr_hw_queues);
if (blk_mq_init_hw_queues(q, set)) if (blk_mq_init_hw_queues(q, set))
goto err_hw; goto err_mq_usage;
mutex_lock(&all_q_mutex); mutex_lock(&all_q_mutex);
list_add_tail(&q->all_q_node, &all_q_list); list_add_tail(&q->all_q_node, &all_q_list);
...@@ -1993,7 +1993,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) ...@@ -1993,7 +1993,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
return q; return q;
err_hw: err_mq_usage:
blk_cleanup_queue(q); blk_cleanup_queue(q);
err_hctxs: err_hctxs:
kfree(map); kfree(map);
......
...@@ -803,10 +803,6 @@ static int __init nbd_init(void) ...@@ -803,10 +803,6 @@ static int __init nbd_init(void)
return -EINVAL; return -EINVAL;
} }
nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL);
if (!nbd_dev)
return -ENOMEM;
part_shift = 0; part_shift = 0;
if (max_part > 0) { if (max_part > 0) {
part_shift = fls(max_part); part_shift = fls(max_part);
...@@ -828,6 +824,10 @@ static int __init nbd_init(void) ...@@ -828,6 +824,10 @@ static int __init nbd_init(void)
if (nbds_max > 1UL << (MINORBITS - part_shift)) if (nbds_max > 1UL << (MINORBITS - part_shift))
return -EINVAL; return -EINVAL;
nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL);
if (!nbd_dev)
return -ENOMEM;
for (i = 0; i < nbds_max; i++) { for (i = 0; i < nbds_max; i++) {
struct gendisk *disk = alloc_disk(1 << part_shift); struct gendisk *disk = alloc_disk(1 << part_shift);
if (!disk) if (!disk)
......
...@@ -3003,6 +3003,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -3003,6 +3003,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
} }
get_device(dev->device); get_device(dev->device);
INIT_LIST_HEAD(&dev->node);
INIT_WORK(&dev->probe_work, nvme_async_probe); INIT_WORK(&dev->probe_work, nvme_async_probe);
schedule_work(&dev->probe_work); schedule_work(&dev->probe_work);
return 0; return 0;
......
...@@ -857,8 +857,11 @@ static void bdi_update_write_bandwidth(struct backing_dev_info *bdi, ...@@ -857,8 +857,11 @@ static void bdi_update_write_bandwidth(struct backing_dev_info *bdi,
* bw * elapsed + write_bandwidth * (period - elapsed) * bw * elapsed + write_bandwidth * (period - elapsed)
* write_bandwidth = --------------------------------------------------- * write_bandwidth = ---------------------------------------------------
* period * period
*
* @written may have decreased due to account_page_redirty().
* Avoid underflowing @bw calculation.
*/ */
bw = written - bdi->written_stamp; bw = written - min(written, bdi->written_stamp);
bw *= HZ; bw *= HZ;
if (unlikely(elapsed > period)) { if (unlikely(elapsed > period)) {
do_div(bw, elapsed); do_div(bw, elapsed);
...@@ -922,7 +925,7 @@ static void global_update_bandwidth(unsigned long thresh, ...@@ -922,7 +925,7 @@ static void global_update_bandwidth(unsigned long thresh,
unsigned long now) unsigned long now)
{ {
static DEFINE_SPINLOCK(dirty_lock); static DEFINE_SPINLOCK(dirty_lock);
static unsigned long update_time; static unsigned long update_time = INITIAL_JIFFIES;
/* /*
* check locklessly first to optimize away locking for the most time * check locklessly first to optimize away locking for the most time
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment