Commit 9c953d63 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:
 "A set of fixes for this series, most notably the fix for the blk-mq
  software queue regression in from this merge window.

  Apart from that, a fix for an unlikely hang if a queue is flooded with
  FUA requests from Ming, and a few small fixes for nbd and badblocks.
  Lastly, a rename update for the proc softirq output, since the block
  polling code was made generic"

* 'for-linus' of git://git.kernel.dk/linux-block:
  blk-mq: update hardware and software queues for sleeping alloc
  block: flush: fix IO hang in case of flood fua req
  nbd: fix incorrect unlock of nbd->sock_lock in sock_shutdown
  badblocks: badblocks_set/clear update unacked_exist
  softirq: Display IRQ_POLL for irq-poll statistics
parents 9dcb8b68 7fe31130
...@@ -133,6 +133,26 @@ int badblocks_check(struct badblocks *bb, sector_t s, int sectors, ...@@ -133,6 +133,26 @@ int badblocks_check(struct badblocks *bb, sector_t s, int sectors,
} }
EXPORT_SYMBOL_GPL(badblocks_check); EXPORT_SYMBOL_GPL(badblocks_check);
static void badblocks_update_acked(struct badblocks *bb)
{
u64 *p = bb->page;
int i;
bool unacked = false;
if (!bb->unacked_exist)
return;
for (i = 0; i < bb->count ; i++) {
if (!BB_ACK(p[i])) {
unacked = true;
break;
}
}
if (!unacked)
bb->unacked_exist = 0;
}
/** /**
* badblocks_set() - Add a range of bad blocks to the table. * badblocks_set() - Add a range of bad blocks to the table.
* @bb: the badblocks structure that holds all badblock information * @bb: the badblocks structure that holds all badblock information
...@@ -294,6 +314,8 @@ int badblocks_set(struct badblocks *bb, sector_t s, int sectors, ...@@ -294,6 +314,8 @@ int badblocks_set(struct badblocks *bb, sector_t s, int sectors,
bb->changed = 1; bb->changed = 1;
if (!acknowledged) if (!acknowledged)
bb->unacked_exist = 1; bb->unacked_exist = 1;
else
badblocks_update_acked(bb);
write_sequnlock_irqrestore(&bb->lock, flags); write_sequnlock_irqrestore(&bb->lock, flags);
return rv; return rv;
...@@ -401,6 +423,7 @@ int badblocks_clear(struct badblocks *bb, sector_t s, int sectors) ...@@ -401,6 +423,7 @@ int badblocks_clear(struct badblocks *bb, sector_t s, int sectors)
} }
} }
badblocks_update_acked(bb);
bb->changed = 1; bb->changed = 1;
out: out:
write_sequnlock_irq(&bb->lock); write_sequnlock_irq(&bb->lock);
......
...@@ -342,6 +342,34 @@ static void flush_data_end_io(struct request *rq, int error) ...@@ -342,6 +342,34 @@ static void flush_data_end_io(struct request *rq, int error)
struct request_queue *q = rq->q; struct request_queue *q = rq->q;
struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL); struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL);
/*
* Updating q->in_flight[] here for making this tag usable
* early. Because in blk_queue_start_tag(),
* q->in_flight[BLK_RW_ASYNC] is used to limit async I/O and
* reserve tags for sync I/O.
*
* More importantly this way can avoid the following I/O
* deadlock:
*
* - suppose there are 40 fua requests comming to flush queue
* and queue depth is 31
* - 30 rqs are scheduled then blk_queue_start_tag() can't alloc
* tag for async I/O any more
* - all the 30 rqs are completed before FLUSH_PENDING_TIMEOUT
* and flush_data_end_io() is called
* - the other rqs still can't go ahead if not updating
* q->in_flight[BLK_RW_ASYNC] here, meantime these rqs
* are held in flush data queue and make no progress of
* handling post flush rq
* - only after the post flush rq is handled, all these rqs
* can be completed
*/
elv_completed_request(q, rq);
/* for avoiding double accounting */
rq->cmd_flags &= ~REQ_STARTED;
/* /*
* After populating an empty queue, kick it to avoid stall. Read * After populating an empty queue, kick it to avoid stall. Read
* the comment in flush_end_io(). * the comment in flush_end_io().
......
...@@ -1217,9 +1217,9 @@ static struct request *blk_mq_map_request(struct request_queue *q, ...@@ -1217,9 +1217,9 @@ static struct request *blk_mq_map_request(struct request_queue *q,
blk_mq_set_alloc_data(&alloc_data, q, 0, ctx, hctx); blk_mq_set_alloc_data(&alloc_data, q, 0, ctx, hctx);
rq = __blk_mq_alloc_request(&alloc_data, op, op_flags); rq = __blk_mq_alloc_request(&alloc_data, op, op_flags);
hctx->queued++; data->hctx = alloc_data.hctx;
data->hctx = hctx; data->ctx = alloc_data.ctx;
data->ctx = ctx; data->hctx->queued++;
return rq; return rq;
} }
......
...@@ -164,7 +164,7 @@ static void sock_shutdown(struct nbd_device *nbd) ...@@ -164,7 +164,7 @@ static void sock_shutdown(struct nbd_device *nbd)
spin_lock(&nbd->sock_lock); spin_lock(&nbd->sock_lock);
if (!nbd->sock) { if (!nbd->sock) {
spin_unlock_irq(&nbd->sock_lock); spin_unlock(&nbd->sock_lock);
return; return;
} }
......
...@@ -58,7 +58,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp ...@@ -58,7 +58,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
DEFINE_PER_CPU(struct task_struct *, ksoftirqd); DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
const char * const softirq_to_name[NR_SOFTIRQS] = { const char * const softirq_to_name[NR_SOFTIRQS] = {
"HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL", "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
"TASKLET", "SCHED", "HRTIMER", "RCU" "TASKLET", "SCHED", "HRTIMER", "RCU"
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment