Commit 4c31c303 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block

* 'for-linus' of git://git.kernel.dk/linux-2.6-block:
  Add UNPLUG traces to all appropriate places
  block: fix requeue handling in blk_queue_invalidate_tags()
  mmc: Fix sg helper copy-and-paste error
  pktcdvd: fix BUG caused by sysfs module reference semantics change
  ioprio: allow sys_ioprio_set() value of 0 to reset ioprio setting
  cfq_idle_class_timer: add paranoid checks for jiffies overflow
  cfq: fix IOPRIO_CLASS_IDLE delays
  cfq: fix IOPRIO_CLASS_IDLE accounting
parents c4888f9f 2ad8b1ef
...@@ -789,6 +789,20 @@ static inline void cfq_slice_expired(struct cfq_data *cfqd, int timed_out) ...@@ -789,6 +789,20 @@ static inline void cfq_slice_expired(struct cfq_data *cfqd, int timed_out)
__cfq_slice_expired(cfqd, cfqq, timed_out); __cfq_slice_expired(cfqd, cfqq, timed_out);
} }
static int start_idle_class_timer(struct cfq_data *cfqd)
{
unsigned long end = cfqd->last_end_request + CFQ_IDLE_GRACE;
unsigned long now = jiffies;
if (time_before(now, end) &&
time_after_eq(now, cfqd->last_end_request)) {
mod_timer(&cfqd->idle_class_timer, end);
return 1;
}
return 0;
}
/* /*
* Get next queue for service. Unless we have a queue preemption, * Get next queue for service. Unless we have a queue preemption,
* we'll simply select the first cfqq in the service tree. * we'll simply select the first cfqq in the service tree.
...@@ -805,19 +819,14 @@ static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd) ...@@ -805,19 +819,14 @@ static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
cfqq = rb_entry(n, struct cfq_queue, rb_node); cfqq = rb_entry(n, struct cfq_queue, rb_node);
if (cfq_class_idle(cfqq)) { if (cfq_class_idle(cfqq)) {
unsigned long end;
/* /*
* if we have idle queues and no rt or be queues had * if we have idle queues and no rt or be queues had
* pending requests, either allow immediate service if * pending requests, either allow immediate service if
* the grace period has passed or arm the idle grace * the grace period has passed or arm the idle grace
* timer * timer
*/ */
end = cfqd->last_end_request + CFQ_IDLE_GRACE; if (start_idle_class_timer(cfqd))
if (time_before(jiffies, end)) {
mod_timer(&cfqd->idle_class_timer, end);
cfqq = NULL; cfqq = NULL;
}
} }
return cfqq; return cfqq;
...@@ -2036,17 +2045,14 @@ static void cfq_idle_slice_timer(unsigned long data) ...@@ -2036,17 +2045,14 @@ static void cfq_idle_slice_timer(unsigned long data)
static void cfq_idle_class_timer(unsigned long data) static void cfq_idle_class_timer(unsigned long data)
{ {
struct cfq_data *cfqd = (struct cfq_data *) data; struct cfq_data *cfqd = (struct cfq_data *) data;
unsigned long flags, end; unsigned long flags;
spin_lock_irqsave(cfqd->queue->queue_lock, flags); spin_lock_irqsave(cfqd->queue->queue_lock, flags);
/* /*
* race with a non-idle queue, reset timer * race with a non-idle queue, reset timer
*/ */
end = cfqd->last_end_request + CFQ_IDLE_GRACE; if (!start_idle_class_timer(cfqd))
if (!time_after_eq(jiffies, end))
mod_timer(&cfqd->idle_class_timer, end);
else
cfq_schedule_dispatch(cfqd); cfq_schedule_dispatch(cfqd);
spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
...@@ -2068,9 +2074,10 @@ static void cfq_put_async_queues(struct cfq_data *cfqd) ...@@ -2068,9 +2074,10 @@ static void cfq_put_async_queues(struct cfq_data *cfqd)
cfq_put_queue(cfqd->async_cfqq[0][i]); cfq_put_queue(cfqd->async_cfqq[0][i]);
if (cfqd->async_cfqq[1][i]) if (cfqd->async_cfqq[1][i])
cfq_put_queue(cfqd->async_cfqq[1][i]); cfq_put_queue(cfqd->async_cfqq[1][i]);
if (cfqd->async_idle_cfqq)
cfq_put_queue(cfqd->async_idle_cfqq);
} }
if (cfqd->async_idle_cfqq)
cfq_put_queue(cfqd->async_idle_cfqq);
} }
static void cfq_exit_queue(elevator_t *e) static void cfq_exit_queue(elevator_t *e)
...@@ -2125,6 +2132,7 @@ static void *cfq_init_queue(struct request_queue *q) ...@@ -2125,6 +2132,7 @@ static void *cfq_init_queue(struct request_queue *q)
INIT_WORK(&cfqd->unplug_work, cfq_kick_queue); INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
cfqd->last_end_request = jiffies;
cfqd->cfq_quantum = cfq_quantum; cfqd->cfq_quantum = cfq_quantum;
cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0]; cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1]; cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
......
...@@ -1143,22 +1143,9 @@ EXPORT_SYMBOL(blk_queue_start_tag); ...@@ -1143,22 +1143,9 @@ EXPORT_SYMBOL(blk_queue_start_tag);
void blk_queue_invalidate_tags(struct request_queue *q) void blk_queue_invalidate_tags(struct request_queue *q)
{ {
struct list_head *tmp, *n; struct list_head *tmp, *n;
struct request *rq;
list_for_each_safe(tmp, n, &q->tag_busy_list) {
rq = list_entry_rq(tmp);
if (rq->tag == -1) { list_for_each_safe(tmp, n, &q->tag_busy_list)
printk(KERN_ERR blk_requeue_request(q, list_entry_rq(tmp));
"%s: bad tag found on list\n", __FUNCTION__);
list_del_init(&rq->queuelist);
rq->cmd_flags &= ~REQ_QUEUED;
} else
blk_queue_end_tag(q, rq);
rq->cmd_flags &= ~REQ_STARTED;
__elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 0);
}
} }
EXPORT_SYMBOL(blk_queue_invalidate_tags); EXPORT_SYMBOL(blk_queue_invalidate_tags);
...@@ -1634,15 +1621,7 @@ static void blk_backing_dev_unplug(struct backing_dev_info *bdi, ...@@ -1634,15 +1621,7 @@ static void blk_backing_dev_unplug(struct backing_dev_info *bdi,
{ {
struct request_queue *q = bdi->unplug_io_data; struct request_queue *q = bdi->unplug_io_data;
/* blk_unplug(q);
* devices don't necessarily have an ->unplug_fn defined
*/
if (q->unplug_fn) {
blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
q->rq.count[READ] + q->rq.count[WRITE]);
q->unplug_fn(q);
}
} }
static void blk_unplug_work(struct work_struct *work) static void blk_unplug_work(struct work_struct *work)
...@@ -1666,6 +1645,20 @@ static void blk_unplug_timeout(unsigned long data) ...@@ -1666,6 +1645,20 @@ static void blk_unplug_timeout(unsigned long data)
kblockd_schedule_work(&q->unplug_work); kblockd_schedule_work(&q->unplug_work);
} }
void blk_unplug(struct request_queue *q)
{
/*
* devices don't necessarily have an ->unplug_fn defined
*/
if (q->unplug_fn) {
blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
q->rq.count[READ] + q->rq.count[WRITE]);
q->unplug_fn(q);
}
}
EXPORT_SYMBOL(blk_unplug);
/** /**
* blk_start_queue - restart a previously stopped queue * blk_start_queue - restart a previously stopped queue
* @q: The &struct request_queue in question * @q: The &struct request_queue in question
......
...@@ -358,10 +358,19 @@ static ssize_t class_pktcdvd_store_add(struct class *c, const char *buf, ...@@ -358,10 +358,19 @@ static ssize_t class_pktcdvd_store_add(struct class *c, const char *buf,
size_t count) size_t count)
{ {
unsigned int major, minor; unsigned int major, minor;
if (sscanf(buf, "%u:%u", &major, &minor) == 2) { if (sscanf(buf, "%u:%u", &major, &minor) == 2) {
/* pkt_setup_dev() expects caller to hold reference to self */
if (!try_module_get(THIS_MODULE))
return -ENODEV;
pkt_setup_dev(MKDEV(major, minor), NULL); pkt_setup_dev(MKDEV(major, minor), NULL);
module_put(THIS_MODULE);
return count; return count;
} }
return -EINVAL; return -EINVAL;
} }
......
...@@ -1207,8 +1207,7 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect ...@@ -1207,8 +1207,7 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect
prepare_to_wait(&bitmap->overflow_wait, &__wait, prepare_to_wait(&bitmap->overflow_wait, &__wait,
TASK_UNINTERRUPTIBLE); TASK_UNINTERRUPTIBLE);
spin_unlock_irq(&bitmap->lock); spin_unlock_irq(&bitmap->lock);
bitmap->mddev->queue blk_unplug(bitmap->mddev->queue);
->unplug_fn(bitmap->mddev->queue);
schedule(); schedule();
finish_wait(&bitmap->overflow_wait, &__wait); finish_wait(&bitmap->overflow_wait, &__wait);
continue; continue;
......
...@@ -1000,8 +1000,7 @@ void dm_table_unplug_all(struct dm_table *t) ...@@ -1000,8 +1000,7 @@ void dm_table_unplug_all(struct dm_table *t)
struct dm_dev *dd = list_entry(d, struct dm_dev, list); struct dm_dev *dd = list_entry(d, struct dm_dev, list);
struct request_queue *q = bdev_get_queue(dd->bdev); struct request_queue *q = bdev_get_queue(dd->bdev);
if (q->unplug_fn) blk_unplug(q);
q->unplug_fn(q);
} }
} }
......
...@@ -87,8 +87,7 @@ static void linear_unplug(struct request_queue *q) ...@@ -87,8 +87,7 @@ static void linear_unplug(struct request_queue *q)
for (i=0; i < mddev->raid_disks; i++) { for (i=0; i < mddev->raid_disks; i++) {
struct request_queue *r_queue = bdev_get_queue(conf->disks[i].rdev->bdev); struct request_queue *r_queue = bdev_get_queue(conf->disks[i].rdev->bdev);
if (r_queue->unplug_fn) blk_unplug(r_queue);
r_queue->unplug_fn(r_queue);
} }
} }
......
...@@ -5445,7 +5445,7 @@ void md_do_sync(mddev_t *mddev) ...@@ -5445,7 +5445,7 @@ void md_do_sync(mddev_t *mddev)
* about not overloading the IO subsystem. (things like an * about not overloading the IO subsystem. (things like an
* e2fsck being done on the RAID array should execute fast) * e2fsck being done on the RAID array should execute fast)
*/ */
mddev->queue->unplug_fn(mddev->queue); blk_unplug(mddev->queue);
cond_resched(); cond_resched();
currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2 currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2
...@@ -5464,7 +5464,7 @@ void md_do_sync(mddev_t *mddev) ...@@ -5464,7 +5464,7 @@ void md_do_sync(mddev_t *mddev)
* this also signals 'finished resyncing' to md_stop * this also signals 'finished resyncing' to md_stop
*/ */
out: out:
mddev->queue->unplug_fn(mddev->queue); blk_unplug(mddev->queue);
wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active)); wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
......
...@@ -125,8 +125,7 @@ static void unplug_slaves(mddev_t *mddev) ...@@ -125,8 +125,7 @@ static void unplug_slaves(mddev_t *mddev)
atomic_inc(&rdev->nr_pending); atomic_inc(&rdev->nr_pending);
rcu_read_unlock(); rcu_read_unlock();
if (r_queue->unplug_fn) blk_unplug(r_queue);
r_queue->unplug_fn(r_queue);
rdev_dec_pending(rdev, mddev); rdev_dec_pending(rdev, mddev);
rcu_read_lock(); rcu_read_lock();
......
...@@ -35,8 +35,7 @@ static void raid0_unplug(struct request_queue *q) ...@@ -35,8 +35,7 @@ static void raid0_unplug(struct request_queue *q)
for (i=0; i<mddev->raid_disks; i++) { for (i=0; i<mddev->raid_disks; i++) {
struct request_queue *r_queue = bdev_get_queue(devlist[i]->bdev); struct request_queue *r_queue = bdev_get_queue(devlist[i]->bdev);
if (r_queue->unplug_fn) blk_unplug(r_queue);
r_queue->unplug_fn(r_queue);
} }
} }
......
...@@ -549,8 +549,7 @@ static void unplug_slaves(mddev_t *mddev) ...@@ -549,8 +549,7 @@ static void unplug_slaves(mddev_t *mddev)
atomic_inc(&rdev->nr_pending); atomic_inc(&rdev->nr_pending);
rcu_read_unlock(); rcu_read_unlock();
if (r_queue->unplug_fn) blk_unplug(r_queue);
r_queue->unplug_fn(r_queue);
rdev_dec_pending(rdev, mddev); rdev_dec_pending(rdev, mddev);
rcu_read_lock(); rcu_read_lock();
......
...@@ -593,8 +593,7 @@ static void unplug_slaves(mddev_t *mddev) ...@@ -593,8 +593,7 @@ static void unplug_slaves(mddev_t *mddev)
atomic_inc(&rdev->nr_pending); atomic_inc(&rdev->nr_pending);
rcu_read_unlock(); rcu_read_unlock();
if (r_queue->unplug_fn) blk_unplug(r_queue);
r_queue->unplug_fn(r_queue);
rdev_dec_pending(rdev, mddev); rdev_dec_pending(rdev, mddev);
rcu_read_lock(); rcu_read_lock();
......
...@@ -3186,8 +3186,7 @@ static void unplug_slaves(mddev_t *mddev) ...@@ -3186,8 +3186,7 @@ static void unplug_slaves(mddev_t *mddev)
atomic_inc(&rdev->nr_pending); atomic_inc(&rdev->nr_pending);
rcu_read_unlock(); rcu_read_unlock();
if (r_queue->unplug_fn) blk_unplug(r_queue);
r_queue->unplug_fn(r_queue);
rdev_dec_pending(rdev, mddev); rdev_dec_pending(rdev, mddev);
rcu_read_lock(); rcu_read_lock();
......
...@@ -310,7 +310,7 @@ static void copy_sg(struct scatterlist *dst, unsigned int dst_len, ...@@ -310,7 +310,7 @@ static void copy_sg(struct scatterlist *dst, unsigned int dst_len,
} }
if (src_size == 0) { if (src_size == 0) {
src_buf = sg_virt(dst); src_buf = sg_virt(src);
src_size = src->length; src_size = src->length;
} }
......
...@@ -78,6 +78,10 @@ asmlinkage long sys_ioprio_set(int which, int who, int ioprio) ...@@ -78,6 +78,10 @@ asmlinkage long sys_ioprio_set(int which, int who, int ioprio)
if (!capable(CAP_SYS_ADMIN)) if (!capable(CAP_SYS_ADMIN))
return -EPERM; return -EPERM;
break; break;
case IOPRIO_CLASS_NONE:
if (data)
return -EINVAL;
break;
default: default:
return -EINVAL; return -EINVAL;
} }
......
...@@ -697,6 +697,7 @@ extern int blk_execute_rq(struct request_queue *, struct gendisk *, ...@@ -697,6 +697,7 @@ extern int blk_execute_rq(struct request_queue *, struct gendisk *,
extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
struct request *, int, rq_end_io_fn *); struct request *, int, rq_end_io_fn *);
extern int blk_verify_command(unsigned char *, int); extern int blk_verify_command(unsigned char *, int);
extern void blk_unplug(struct request_queue *q);
static inline struct request_queue *bdev_get_queue(struct block_device *bdev) static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment