Commit fed678dc authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.dk/linux-block

* 'for-linus' of git://git.kernel.dk/linux-block:
  floppy: use del_timer_sync() in init cleanup
  blk-cgroup: be able to remove the record of unplugged device
  block: Don't check QUEUE_FLAG_SAME_COMP in __blk_complete_request
  mm: Add comment explaining task state setting in bdi_forker_thread()
  mm: Cleanup clearing of BDI_pending bit in bdi_forker_thread()
  block: simplify force plug flush code a little bit
  block: change force plug flush call order
  block: Fix queue_flag update when rq_affinity goes from 2 to 1
  block: separate priority boosting from REQ_META
  block: remove READ_META and WRITE_META
  xen-blkback: fixed indentation and comments
  xen-blkback: Don't disconnect backend until state switched to XenbusStateClosed.
parents 808bf29b 6c4867f6
...@@ -785,10 +785,10 @@ static int blkio_policy_parse_and_set(char *buf, ...@@ -785,10 +785,10 @@ static int blkio_policy_parse_and_set(char *buf,
{ {
char *s[4], *p, *major_s = NULL, *minor_s = NULL; char *s[4], *p, *major_s = NULL, *minor_s = NULL;
int ret; int ret;
unsigned long major, minor, temp; unsigned long major, minor;
int i = 0; int i = 0;
dev_t dev; dev_t dev;
u64 bps, iops; u64 temp;
memset(s, 0, sizeof(s)); memset(s, 0, sizeof(s));
...@@ -826,20 +826,23 @@ static int blkio_policy_parse_and_set(char *buf, ...@@ -826,20 +826,23 @@ static int blkio_policy_parse_and_set(char *buf,
dev = MKDEV(major, minor); dev = MKDEV(major, minor);
ret = blkio_check_dev_num(dev); ret = strict_strtoull(s[1], 10, &temp);
if (ret) if (ret)
return ret; return -EINVAL;
newpn->dev = dev; /* For rule removal, do not check for device presence. */
if (temp) {
ret = blkio_check_dev_num(dev);
if (ret)
return ret;
}
if (s[1] == NULL) newpn->dev = dev;
return -EINVAL;
switch (plid) { switch (plid) {
case BLKIO_POLICY_PROP: case BLKIO_POLICY_PROP:
ret = strict_strtoul(s[1], 10, &temp); if ((temp < BLKIO_WEIGHT_MIN && temp > 0) ||
if (ret || (temp < BLKIO_WEIGHT_MIN && temp > 0) || temp > BLKIO_WEIGHT_MAX)
temp > BLKIO_WEIGHT_MAX)
return -EINVAL; return -EINVAL;
newpn->plid = plid; newpn->plid = plid;
...@@ -850,26 +853,18 @@ static int blkio_policy_parse_and_set(char *buf, ...@@ -850,26 +853,18 @@ static int blkio_policy_parse_and_set(char *buf,
switch(fileid) { switch(fileid) {
case BLKIO_THROTL_read_bps_device: case BLKIO_THROTL_read_bps_device:
case BLKIO_THROTL_write_bps_device: case BLKIO_THROTL_write_bps_device:
ret = strict_strtoull(s[1], 10, &bps);
if (ret)
return -EINVAL;
newpn->plid = plid; newpn->plid = plid;
newpn->fileid = fileid; newpn->fileid = fileid;
newpn->val.bps = bps; newpn->val.bps = temp;
break; break;
case BLKIO_THROTL_read_iops_device: case BLKIO_THROTL_read_iops_device:
case BLKIO_THROTL_write_iops_device: case BLKIO_THROTL_write_iops_device:
ret = strict_strtoull(s[1], 10, &iops); if (temp > THROTL_IOPS_MAX)
if (ret)
return -EINVAL;
if (iops > THROTL_IOPS_MAX)
return -EINVAL; return -EINVAL;
newpn->plid = plid; newpn->plid = plid;
newpn->fileid = fileid; newpn->fileid = fileid;
newpn->val.iops = (unsigned int)iops; newpn->val.iops = (unsigned int)temp;
break; break;
} }
break; break;
......
...@@ -1167,7 +1167,7 @@ static bool bio_attempt_front_merge(struct request_queue *q, ...@@ -1167,7 +1167,7 @@ static bool bio_attempt_front_merge(struct request_queue *q,
* true if merge was successful, otherwise false. * true if merge was successful, otherwise false.
*/ */
static bool attempt_plug_merge(struct task_struct *tsk, struct request_queue *q, static bool attempt_plug_merge(struct task_struct *tsk, struct request_queue *q,
struct bio *bio) struct bio *bio, unsigned int *request_count)
{ {
struct blk_plug *plug; struct blk_plug *plug;
struct request *rq; struct request *rq;
...@@ -1176,10 +1176,13 @@ static bool attempt_plug_merge(struct task_struct *tsk, struct request_queue *q, ...@@ -1176,10 +1176,13 @@ static bool attempt_plug_merge(struct task_struct *tsk, struct request_queue *q,
plug = tsk->plug; plug = tsk->plug;
if (!plug) if (!plug)
goto out; goto out;
*request_count = 0;
list_for_each_entry_reverse(rq, &plug->list, queuelist) { list_for_each_entry_reverse(rq, &plug->list, queuelist) {
int el_ret; int el_ret;
(*request_count)++;
if (rq->q != q) if (rq->q != q)
continue; continue;
...@@ -1219,6 +1222,7 @@ static int __make_request(struct request_queue *q, struct bio *bio) ...@@ -1219,6 +1222,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)
struct blk_plug *plug; struct blk_plug *plug;
int el_ret, rw_flags, where = ELEVATOR_INSERT_SORT; int el_ret, rw_flags, where = ELEVATOR_INSERT_SORT;
struct request *req; struct request *req;
unsigned int request_count = 0;
/* /*
* low level driver can indicate that it wants pages above a * low level driver can indicate that it wants pages above a
...@@ -1237,7 +1241,7 @@ static int __make_request(struct request_queue *q, struct bio *bio) ...@@ -1237,7 +1241,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)
* Check if we can merge with the plugged list before grabbing * Check if we can merge with the plugged list before grabbing
* any locks. * any locks.
*/ */
if (attempt_plug_merge(current, q, bio)) if (attempt_plug_merge(current, q, bio, &request_count))
goto out; goto out;
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
...@@ -1302,11 +1306,10 @@ static int __make_request(struct request_queue *q, struct bio *bio) ...@@ -1302,11 +1306,10 @@ static int __make_request(struct request_queue *q, struct bio *bio)
if (__rq->q != q) if (__rq->q != q)
plug->should_sort = 1; plug->should_sort = 1;
} }
if (request_count >= BLK_MAX_REQUEST_COUNT)
blk_flush_plug_list(plug, false);
list_add_tail(&req->queuelist, &plug->list); list_add_tail(&req->queuelist, &plug->list);
plug->count++;
drive_stat_acct(req, 1); drive_stat_acct(req, 1);
if (plug->count >= BLK_MAX_REQUEST_COUNT)
blk_flush_plug_list(plug, false);
} else { } else {
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
add_acct_request(q, req, where); add_acct_request(q, req, where);
...@@ -2634,7 +2637,6 @@ void blk_start_plug(struct blk_plug *plug) ...@@ -2634,7 +2637,6 @@ void blk_start_plug(struct blk_plug *plug)
INIT_LIST_HEAD(&plug->list); INIT_LIST_HEAD(&plug->list);
INIT_LIST_HEAD(&plug->cb_list); INIT_LIST_HEAD(&plug->cb_list);
plug->should_sort = 0; plug->should_sort = 0;
plug->count = 0;
/* /*
* If this is a nested plug, don't actually assign it. It will be * If this is a nested plug, don't actually assign it. It will be
...@@ -2718,7 +2720,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) ...@@ -2718,7 +2720,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
return; return;
list_splice_init(&plug->list, &list); list_splice_init(&plug->list, &list);
plug->count = 0;
if (plug->should_sort) { if (plug->should_sort) {
list_sort(NULL, &list, plug_rq_cmp); list_sort(NULL, &list, plug_rq_cmp);
......
...@@ -115,7 +115,7 @@ void __blk_complete_request(struct request *req) ...@@ -115,7 +115,7 @@ void __blk_complete_request(struct request *req)
/* /*
* Select completion CPU * Select completion CPU
*/ */
if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) && req->cpu != -1) { if (req->cpu != -1) {
ccpu = req->cpu; ccpu = req->cpu;
if (!test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags)) { if (!test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags)) {
ccpu = blk_cpu_to_group(ccpu); ccpu = blk_cpu_to_group(ccpu);
......
...@@ -258,11 +258,13 @@ queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count) ...@@ -258,11 +258,13 @@ queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
ret = queue_var_store(&val, page, count); ret = queue_var_store(&val, page, count);
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
if (val) { if (val == 2) {
queue_flag_set(QUEUE_FLAG_SAME_COMP, q); queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
if (val == 2) queue_flag_set(QUEUE_FLAG_SAME_FORCE, q);
queue_flag_set(QUEUE_FLAG_SAME_FORCE, q); } else if (val == 1) {
} else { queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
} else if (val == 0) {
queue_flag_clear(QUEUE_FLAG_SAME_COMP, q); queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);
queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
} }
......
...@@ -130,8 +130,8 @@ struct cfq_queue { ...@@ -130,8 +130,8 @@ struct cfq_queue {
unsigned long slice_end; unsigned long slice_end;
long slice_resid; long slice_resid;
/* pending metadata requests */ /* pending priority requests */
int meta_pending; int prio_pending;
/* number of requests that are on the dispatch list or inside driver */ /* number of requests that are on the dispatch list or inside driver */
int dispatched; int dispatched;
...@@ -684,8 +684,8 @@ cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2, ...@@ -684,8 +684,8 @@ cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2,
if (rq_is_sync(rq1) != rq_is_sync(rq2)) if (rq_is_sync(rq1) != rq_is_sync(rq2))
return rq_is_sync(rq1) ? rq1 : rq2; return rq_is_sync(rq1) ? rq1 : rq2;
if ((rq1->cmd_flags ^ rq2->cmd_flags) & REQ_META) if ((rq1->cmd_flags ^ rq2->cmd_flags) & REQ_PRIO)
return rq1->cmd_flags & REQ_META ? rq1 : rq2; return rq1->cmd_flags & REQ_PRIO ? rq1 : rq2;
s1 = blk_rq_pos(rq1); s1 = blk_rq_pos(rq1);
s2 = blk_rq_pos(rq2); s2 = blk_rq_pos(rq2);
...@@ -1612,9 +1612,9 @@ static void cfq_remove_request(struct request *rq) ...@@ -1612,9 +1612,9 @@ static void cfq_remove_request(struct request *rq)
cfqq->cfqd->rq_queued--; cfqq->cfqd->rq_queued--;
cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg, cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg,
rq_data_dir(rq), rq_is_sync(rq)); rq_data_dir(rq), rq_is_sync(rq));
if (rq->cmd_flags & REQ_META) { if (rq->cmd_flags & REQ_PRIO) {
WARN_ON(!cfqq->meta_pending); WARN_ON(!cfqq->prio_pending);
cfqq->meta_pending--; cfqq->prio_pending--;
} }
} }
...@@ -3372,7 +3372,7 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, ...@@ -3372,7 +3372,7 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
* So both queues are sync. Let the new request get disk time if * So both queues are sync. Let the new request get disk time if
* it's a metadata request and the current queue is doing regular IO. * it's a metadata request and the current queue is doing regular IO.
*/ */
if ((rq->cmd_flags & REQ_META) && !cfqq->meta_pending) if ((rq->cmd_flags & REQ_PRIO) && !cfqq->prio_pending)
return true; return true;
/* /*
...@@ -3439,8 +3439,8 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, ...@@ -3439,8 +3439,8 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
struct cfq_io_context *cic = RQ_CIC(rq); struct cfq_io_context *cic = RQ_CIC(rq);
cfqd->rq_queued++; cfqd->rq_queued++;
if (rq->cmd_flags & REQ_META) if (rq->cmd_flags & REQ_PRIO)
cfqq->meta_pending++; cfqq->prio_pending++;
cfq_update_io_thinktime(cfqd, cfqq, cic); cfq_update_io_thinktime(cfqd, cfqq, cic);
cfq_update_io_seektime(cfqd, cfqq, rq); cfq_update_io_seektime(cfqd, cfqq, rq);
......
...@@ -4250,7 +4250,7 @@ static int __init floppy_init(void) ...@@ -4250,7 +4250,7 @@ static int __init floppy_init(void)
use_virtual_dma = can_use_virtual_dma & 1; use_virtual_dma = can_use_virtual_dma & 1;
fdc_state[0].address = FDC1; fdc_state[0].address = FDC1;
if (fdc_state[0].address == -1) { if (fdc_state[0].address == -1) {
del_timer(&fd_timeout); del_timer_sync(&fd_timeout);
err = -ENODEV; err = -ENODEV;
goto out_unreg_region; goto out_unreg_region;
} }
...@@ -4261,7 +4261,7 @@ static int __init floppy_init(void) ...@@ -4261,7 +4261,7 @@ static int __init floppy_init(void)
fdc = 0; /* reset fdc in case of unexpected interrupt */ fdc = 0; /* reset fdc in case of unexpected interrupt */
err = floppy_grab_irq_and_dma(); err = floppy_grab_irq_and_dma();
if (err) { if (err) {
del_timer(&fd_timeout); del_timer_sync(&fd_timeout);
err = -EBUSY; err = -EBUSY;
goto out_unreg_region; goto out_unreg_region;
} }
...@@ -4318,7 +4318,7 @@ static int __init floppy_init(void) ...@@ -4318,7 +4318,7 @@ static int __init floppy_init(void)
user_reset_fdc(-1, FD_RESET_ALWAYS, false); user_reset_fdc(-1, FD_RESET_ALWAYS, false);
} }
fdc = 0; fdc = 0;
del_timer(&fd_timeout); del_timer_sync(&fd_timeout);
current_drive = 0; current_drive = 0;
initialized = true; initialized = true;
if (have_no_fdc) { if (have_no_fdc) {
...@@ -4368,7 +4368,7 @@ static int __init floppy_init(void) ...@@ -4368,7 +4368,7 @@ static int __init floppy_init(void)
unregister_blkdev(FLOPPY_MAJOR, "fd"); unregister_blkdev(FLOPPY_MAJOR, "fd");
out_put_disk: out_put_disk:
while (dr--) { while (dr--) {
del_timer(&motor_off_timer[dr]); del_timer_sync(&motor_off_timer[dr]);
if (disks[dr]->queue) if (disks[dr]->queue)
blk_cleanup_queue(disks[dr]->queue); blk_cleanup_queue(disks[dr]->queue);
put_disk(disks[dr]); put_disk(disks[dr]);
......
...@@ -46,7 +46,7 @@ ...@@ -46,7 +46,7 @@
#define DRV_PFX "xen-blkback:" #define DRV_PFX "xen-blkback:"
#define DPRINTK(fmt, args...) \ #define DPRINTK(fmt, args...) \
pr_debug(DRV_PFX "(%s:%d) " fmt ".\n", \ pr_debug(DRV_PFX "(%s:%d) " fmt ".\n", \
__func__, __LINE__, ##args) __func__, __LINE__, ##args)
......
...@@ -590,7 +590,7 @@ static void frontend_changed(struct xenbus_device *dev, ...@@ -590,7 +590,7 @@ static void frontend_changed(struct xenbus_device *dev,
/* /*
* Enforce precondition before potential leak point. * Enforce precondition before potential leak point.
* blkif_disconnect() is idempotent. * xen_blkif_disconnect() is idempotent.
*/ */
xen_blkif_disconnect(be->blkif); xen_blkif_disconnect(be->blkif);
...@@ -601,17 +601,17 @@ static void frontend_changed(struct xenbus_device *dev, ...@@ -601,17 +601,17 @@ static void frontend_changed(struct xenbus_device *dev,
break; break;
case XenbusStateClosing: case XenbusStateClosing:
xen_blkif_disconnect(be->blkif);
xenbus_switch_state(dev, XenbusStateClosing); xenbus_switch_state(dev, XenbusStateClosing);
break; break;
case XenbusStateClosed: case XenbusStateClosed:
xen_blkif_disconnect(be->blkif);
xenbus_switch_state(dev, XenbusStateClosed); xenbus_switch_state(dev, XenbusStateClosed);
if (xenbus_dev_is_online(dev)) if (xenbus_dev_is_online(dev))
break; break;
/* fall through if not online */ /* fall through if not online */
case XenbusStateUnknown: case XenbusStateUnknown:
/* implies blkif_disconnect() via blkback_remove() */ /* implies xen_blkif_disconnect() via xen_blkbk_remove() */
device_unregister(&dev->dev); device_unregister(&dev->dev);
break; break;
......
...@@ -926,6 +926,9 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, ...@@ -926,6 +926,9 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
/* /*
* Reliable writes are used to implement Forced Unit Access and * Reliable writes are used to implement Forced Unit Access and
* REQ_META accesses, and are supported only on MMCs. * REQ_META accesses, and are supported only on MMCs.
*
* XXX: this really needs a good explanation of why REQ_META
* is treated special.
*/ */
bool do_rel_wr = ((req->cmd_flags & REQ_FUA) || bool do_rel_wr = ((req->cmd_flags & REQ_FUA) ||
(req->cmd_flags & REQ_META)) && (req->cmd_flags & REQ_META)) &&
......
...@@ -1134,7 +1134,7 @@ struct buffer_head *ext3_bread(handle_t *handle, struct inode *inode, ...@@ -1134,7 +1134,7 @@ struct buffer_head *ext3_bread(handle_t *handle, struct inode *inode,
return bh; return bh;
if (buffer_uptodate(bh)) if (buffer_uptodate(bh))
return bh; return bh;
ll_rw_block(READ_META, 1, &bh); ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &bh);
wait_on_buffer(bh); wait_on_buffer(bh);
if (buffer_uptodate(bh)) if (buffer_uptodate(bh))
return bh; return bh;
...@@ -2807,7 +2807,7 @@ static int __ext3_get_inode_loc(struct inode *inode, ...@@ -2807,7 +2807,7 @@ static int __ext3_get_inode_loc(struct inode *inode,
trace_ext3_load_inode(inode); trace_ext3_load_inode(inode);
get_bh(bh); get_bh(bh);
bh->b_end_io = end_buffer_read_sync; bh->b_end_io = end_buffer_read_sync;
submit_bh(READ_META, bh); submit_bh(READ | REQ_META | REQ_PRIO, bh);
wait_on_buffer(bh); wait_on_buffer(bh);
if (!buffer_uptodate(bh)) { if (!buffer_uptodate(bh)) {
ext3_error(inode->i_sb, "ext3_get_inode_loc", ext3_error(inode->i_sb, "ext3_get_inode_loc",
......
...@@ -922,7 +922,8 @@ static struct buffer_head *ext3_find_entry(struct inode *dir, ...@@ -922,7 +922,8 @@ static struct buffer_head *ext3_find_entry(struct inode *dir,
bh = ext3_getblk(NULL, dir, b++, 0, &err); bh = ext3_getblk(NULL, dir, b++, 0, &err);
bh_use[ra_max] = bh; bh_use[ra_max] = bh;
if (bh) if (bh)
ll_rw_block(READ_META, 1, &bh); ll_rw_block(READ | REQ_META | REQ_PRIO,
1, &bh);
} }
} }
if ((bh = bh_use[ra_ptr++]) == NULL) if ((bh = bh_use[ra_ptr++]) == NULL)
......
...@@ -647,7 +647,7 @@ struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode, ...@@ -647,7 +647,7 @@ struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
return bh; return bh;
if (buffer_uptodate(bh)) if (buffer_uptodate(bh))
return bh; return bh;
ll_rw_block(READ_META, 1, &bh); ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &bh);
wait_on_buffer(bh); wait_on_buffer(bh);
if (buffer_uptodate(bh)) if (buffer_uptodate(bh))
return bh; return bh;
...@@ -3298,7 +3298,7 @@ static int __ext4_get_inode_loc(struct inode *inode, ...@@ -3298,7 +3298,7 @@ static int __ext4_get_inode_loc(struct inode *inode,
trace_ext4_load_inode(inode); trace_ext4_load_inode(inode);
get_bh(bh); get_bh(bh);
bh->b_end_io = end_buffer_read_sync; bh->b_end_io = end_buffer_read_sync;
submit_bh(READ_META, bh); submit_bh(READ | REQ_META | REQ_PRIO, bh);
wait_on_buffer(bh); wait_on_buffer(bh);
if (!buffer_uptodate(bh)) { if (!buffer_uptodate(bh)) {
EXT4_ERROR_INODE_BLOCK(inode, block, EXT4_ERROR_INODE_BLOCK(inode, block,
......
...@@ -922,7 +922,8 @@ static struct buffer_head * ext4_find_entry (struct inode *dir, ...@@ -922,7 +922,8 @@ static struct buffer_head * ext4_find_entry (struct inode *dir,
bh = ext4_getblk(NULL, dir, b++, 0, &err); bh = ext4_getblk(NULL, dir, b++, 0, &err);
bh_use[ra_max] = bh; bh_use[ra_max] = bh;
if (bh) if (bh)
ll_rw_block(READ_META, 1, &bh); ll_rw_block(READ | REQ_META | REQ_PRIO,
1, &bh);
} }
} }
if ((bh = bh_use[ra_ptr++]) == NULL) if ((bh = bh_use[ra_ptr++]) == NULL)
......
...@@ -624,9 +624,9 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags, int pull) ...@@ -624,9 +624,9 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags, int pull)
bh->b_end_io = end_buffer_write_sync; bh->b_end_io = end_buffer_write_sync;
get_bh(bh); get_bh(bh);
if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags))
submit_bh(WRITE_SYNC | REQ_META, bh); submit_bh(WRITE_SYNC | REQ_META | REQ_PRIO, bh);
else else
submit_bh(WRITE_FLUSH_FUA | REQ_META, bh); submit_bh(WRITE_FLUSH_FUA | REQ_META | REQ_PRIO, bh);
wait_on_buffer(bh); wait_on_buffer(bh);
if (!buffer_uptodate(bh)) if (!buffer_uptodate(bh))
......
...@@ -37,7 +37,7 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb ...@@ -37,7 +37,7 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb
{ {
struct buffer_head *bh, *head; struct buffer_head *bh, *head;
int nr_underway = 0; int nr_underway = 0;
int write_op = REQ_META | int write_op = REQ_META | REQ_PRIO |
(wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE); (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE);
BUG_ON(!PageLocked(page)); BUG_ON(!PageLocked(page));
...@@ -225,7 +225,7 @@ int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags, ...@@ -225,7 +225,7 @@ int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
} }
bh->b_end_io = end_buffer_read_sync; bh->b_end_io = end_buffer_read_sync;
get_bh(bh); get_bh(bh);
submit_bh(READ_SYNC | REQ_META, bh); submit_bh(READ_SYNC | REQ_META | REQ_PRIO, bh);
if (!(flags & DIO_WAIT)) if (!(flags & DIO_WAIT))
return 0; return 0;
...@@ -435,7 +435,7 @@ struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen) ...@@ -435,7 +435,7 @@ struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
if (buffer_uptodate(first_bh)) if (buffer_uptodate(first_bh))
goto out; goto out;
if (!buffer_locked(first_bh)) if (!buffer_locked(first_bh))
ll_rw_block(READ_SYNC | REQ_META, 1, &first_bh); ll_rw_block(READ_SYNC | REQ_META | REQ_PRIO, 1, &first_bh);
dblock++; dblock++;
extlen--; extlen--;
......
...@@ -224,7 +224,7 @@ static int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector, int silent) ...@@ -224,7 +224,7 @@ static int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector, int silent)
bio->bi_end_io = end_bio_io_page; bio->bi_end_io = end_bio_io_page;
bio->bi_private = page; bio->bi_private = page;
submit_bio(READ_SYNC | REQ_META, bio); submit_bio(READ_SYNC | REQ_META | REQ_PRIO, bio);
wait_on_page_locked(page); wait_on_page_locked(page);
bio_put(bio); bio_put(bio);
if (!PageUptodate(page)) { if (!PageUptodate(page)) {
......
...@@ -709,7 +709,7 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc, ...@@ -709,7 +709,7 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
set_buffer_uptodate(bh); set_buffer_uptodate(bh);
if (!buffer_uptodate(bh)) { if (!buffer_uptodate(bh)) {
ll_rw_block(READ_META, 1, &bh); ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &bh);
wait_on_buffer(bh); wait_on_buffer(bh);
if (!buffer_uptodate(bh)) if (!buffer_uptodate(bh))
goto unlock_out; goto unlock_out;
......
...@@ -124,6 +124,7 @@ enum rq_flag_bits { ...@@ -124,6 +124,7 @@ enum rq_flag_bits {
__REQ_SYNC, /* request is sync (sync write or read) */ __REQ_SYNC, /* request is sync (sync write or read) */
__REQ_META, /* metadata io request */ __REQ_META, /* metadata io request */
__REQ_PRIO, /* boost priority in cfq */
__REQ_DISCARD, /* request to discard sectors */ __REQ_DISCARD, /* request to discard sectors */
__REQ_SECURE, /* secure discard (used with __REQ_DISCARD) */ __REQ_SECURE, /* secure discard (used with __REQ_DISCARD) */
...@@ -161,14 +162,15 @@ enum rq_flag_bits { ...@@ -161,14 +162,15 @@ enum rq_flag_bits {
#define REQ_FAILFAST_DRIVER (1 << __REQ_FAILFAST_DRIVER) #define REQ_FAILFAST_DRIVER (1 << __REQ_FAILFAST_DRIVER)
#define REQ_SYNC (1 << __REQ_SYNC) #define REQ_SYNC (1 << __REQ_SYNC)
#define REQ_META (1 << __REQ_META) #define REQ_META (1 << __REQ_META)
#define REQ_PRIO (1 << __REQ_PRIO)
#define REQ_DISCARD (1 << __REQ_DISCARD) #define REQ_DISCARD (1 << __REQ_DISCARD)
#define REQ_NOIDLE (1 << __REQ_NOIDLE) #define REQ_NOIDLE (1 << __REQ_NOIDLE)
#define REQ_FAILFAST_MASK \ #define REQ_FAILFAST_MASK \
(REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER) (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
#define REQ_COMMON_MASK \ #define REQ_COMMON_MASK \
(REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_DISCARD | \ (REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_PRIO | \
REQ_NOIDLE | REQ_FLUSH | REQ_FUA | REQ_SECURE) REQ_DISCARD | REQ_NOIDLE | REQ_FLUSH | REQ_FUA | REQ_SECURE)
#define REQ_CLONE_MASK REQ_COMMON_MASK #define REQ_CLONE_MASK REQ_COMMON_MASK
#define REQ_RAHEAD (1 << __REQ_RAHEAD) #define REQ_RAHEAD (1 << __REQ_RAHEAD)
......
...@@ -873,7 +873,6 @@ struct blk_plug { ...@@ -873,7 +873,6 @@ struct blk_plug {
struct list_head list; struct list_head list;
struct list_head cb_list; struct list_head cb_list;
unsigned int should_sort; unsigned int should_sort;
unsigned int count;
}; };
#define BLK_MAX_REQUEST_COUNT 16 #define BLK_MAX_REQUEST_COUNT 16
......
...@@ -162,10 +162,8 @@ struct inodes_stat_t { ...@@ -162,10 +162,8 @@ struct inodes_stat_t {
#define READA RWA_MASK #define READA RWA_MASK
#define READ_SYNC (READ | REQ_SYNC) #define READ_SYNC (READ | REQ_SYNC)
#define READ_META (READ | REQ_META)
#define WRITE_SYNC (WRITE | REQ_SYNC | REQ_NOIDLE) #define WRITE_SYNC (WRITE | REQ_SYNC | REQ_NOIDLE)
#define WRITE_ODIRECT (WRITE | REQ_SYNC) #define WRITE_ODIRECT (WRITE | REQ_SYNC)
#define WRITE_META (WRITE | REQ_META)
#define WRITE_FLUSH (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FLUSH) #define WRITE_FLUSH (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FLUSH)
#define WRITE_FUA (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FUA) #define WRITE_FUA (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FUA)
#define WRITE_FLUSH_FUA (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FLUSH | REQ_FUA) #define WRITE_FLUSH_FUA (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FLUSH | REQ_FUA)
......
...@@ -359,6 +359,17 @@ static unsigned long bdi_longest_inactive(void) ...@@ -359,6 +359,17 @@ static unsigned long bdi_longest_inactive(void)
return max(5UL * 60 * HZ, interval); return max(5UL * 60 * HZ, interval);
} }
/*
* Clear pending bit and wakeup anybody waiting for flusher thread creation or
* shutdown
*/
static void bdi_clear_pending(struct backing_dev_info *bdi)
{
clear_bit(BDI_pending, &bdi->state);
smp_mb__after_clear_bit();
wake_up_bit(&bdi->state, BDI_pending);
}
static int bdi_forker_thread(void *ptr) static int bdi_forker_thread(void *ptr)
{ {
struct bdi_writeback *me = ptr; struct bdi_writeback *me = ptr;
...@@ -390,6 +401,13 @@ static int bdi_forker_thread(void *ptr) ...@@ -390,6 +401,13 @@ static int bdi_forker_thread(void *ptr)
} }
spin_lock_bh(&bdi_lock); spin_lock_bh(&bdi_lock);
/*
* In the following loop we are going to check whether we have
* some work to do without any synchronization with tasks
* waking us up to do work for them. So we have to set task
* state already here so that we don't miss wakeups coming
* after we verify some condition.
*/
set_current_state(TASK_INTERRUPTIBLE); set_current_state(TASK_INTERRUPTIBLE);
list_for_each_entry(bdi, &bdi_list, bdi_list) { list_for_each_entry(bdi, &bdi_list, bdi_list) {
...@@ -469,11 +487,13 @@ static int bdi_forker_thread(void *ptr) ...@@ -469,11 +487,13 @@ static int bdi_forker_thread(void *ptr)
spin_unlock_bh(&bdi->wb_lock); spin_unlock_bh(&bdi->wb_lock);
wake_up_process(task); wake_up_process(task);
} }
bdi_clear_pending(bdi);
break; break;
case KILL_THREAD: case KILL_THREAD:
__set_current_state(TASK_RUNNING); __set_current_state(TASK_RUNNING);
kthread_stop(task); kthread_stop(task);
bdi_clear_pending(bdi);
break; break;
case NO_ACTION: case NO_ACTION:
...@@ -489,16 +509,8 @@ static int bdi_forker_thread(void *ptr) ...@@ -489,16 +509,8 @@ static int bdi_forker_thread(void *ptr)
else else
schedule_timeout(msecs_to_jiffies(dirty_writeback_interval * 10)); schedule_timeout(msecs_to_jiffies(dirty_writeback_interval * 10));
try_to_freeze(); try_to_freeze();
/* Back to the main loop */ break;
continue;
} }
/*
* Clear pending bit and wakeup anybody waiting to tear us down.
*/
clear_bit(BDI_pending, &bdi->state);
smp_mb__after_clear_bit();
wake_up_bit(&bdi->state, BDI_pending);
} }
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment