Commit 798ce8f1 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-2.6.40/core' of git://git.kernel.dk/linux-2.6-block

* 'for-2.6.40/core' of git://git.kernel.dk/linux-2.6-block: (40 commits)
  cfq-iosched: free cic_index if cfqd allocation fails
  cfq-iosched: remove unused 'group_changed' in cfq_service_tree_add()
  cfq-iosched: reduce bit operations in cfq_choose_req()
  cfq-iosched: algebraic simplification in cfq_prio_to_maxrq()
  blk-cgroup: Initialize ioc->cgroup_changed at ioc creation time
  block: move bd_set_size() above rescan_partitions() in __blkdev_get()
  block: call elv_bio_merged() when merged
  cfq-iosched: Make IO merge related stats per cpu
  cfq-iosched: Fix a memory leak of per cpu stats for root group
  backing-dev: Kill set but not used var in  bdi_debug_stats_show()
  block: get rid of on-stack plugging debug checks
  blk-throttle: Make no throttling rule group processing lockless
  blk-cgroup: Make cgroup stat reset path blkg->lock free for dispatch stats
  blk-cgroup: Make 64bit per cpu stats safe on 32bit arch
  blk-throttle: Make dispatch stats per cpu
  blk-throttle: Free up a group only after one rcu grace period
  blk-throttle: Use helper function to add root throtl group to lists
  blk-throttle: Introduce a helper function to fill in device details
  blk-throttle: Dynamically allocate root group
  blk-cgroup: Allow sleeping while dynamically allocating a group
  ...
parents 22e12bbc 1547010e
...@@ -142,3 +142,67 @@ Description: ...@@ -142,3 +142,67 @@ Description:
with the previous I/O request are enabled. When set to 2, with the previous I/O request are enabled. When set to 2,
all merge tries are disabled. The default value is 0 - all merge tries are disabled. The default value is 0 -
which enables all types of merge tries. which enables all types of merge tries.
What: /sys/block/<disk>/discard_alignment
Date: May 2011
Contact: Martin K. Petersen <martin.petersen@oracle.com>
Description:
Devices that support discard functionality may
internally allocate space in units that are bigger than
the exported logical block size. The discard_alignment
parameter indicates how many bytes the beginning of the
device is offset from the internal allocation unit's
natural alignment.
What: /sys/block/<disk>/<partition>/discard_alignment
Date: May 2011
Contact: Martin K. Petersen <martin.petersen@oracle.com>
Description:
Devices that support discard functionality may
internally allocate space in units that are bigger than
the exported logical block size. The discard_alignment
parameter indicates how many bytes the beginning of the
partition is offset from the internal allocation unit's
natural alignment.
What: /sys/block/<disk>/queue/discard_granularity
Date: May 2011
Contact: Martin K. Petersen <martin.petersen@oracle.com>
Description:
Devices that support discard functionality may
internally allocate space using units that are bigger
than the logical block size. The discard_granularity
parameter indicates the size of the internal allocation
unit in bytes if reported by the device. Otherwise the
discard_granularity will be set to match the device's
physical block size. A discard_granularity of 0 means
that the device does not support discard functionality.
What: /sys/block/<disk>/queue/discard_max_bytes
Date: May 2011
Contact: Martin K. Petersen <martin.petersen@oracle.com>
Description:
Devices that support discard functionality may have
internal limits on the number of bytes that can be
trimmed or unmapped in a single operation. Some storage
protocols also have inherent limits on the number of
blocks that can be described in a single command. The
discard_max_bytes parameter is set by the device driver
to the maximum number of bytes that can be discarded in
a single operation. Discard requests issued to the
device must not exceed this limit. A discard_max_bytes
value of 0 means that the device does not support
discard functionality.
What: /sys/block/<disk>/queue/discard_zeroes_data
Date: May 2011
Contact: Martin K. Petersen <martin.petersen@oracle.com>
Description:
Devices that support discard functionality may return
stale or random data when a previously discarded block
is read back. This can cause problems if the filesystem
expects discarded blocks to be explicitly cleared. If a
device reports that it deterministically returns zeroes
when a discarded area is read the discard_zeroes_data
parameter will be set to one. Otherwise it will be 0 and
the result of reading a discarded area is undefined.
This diff is collapsed.
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
*/ */
#include <linux/cgroup.h> #include <linux/cgroup.h>
#include <linux/u64_stats_sync.h>
enum blkio_policy_id { enum blkio_policy_id {
BLKIO_POLICY_PROP = 0, /* Proportional Bandwidth division */ BLKIO_POLICY_PROP = 0, /* Proportional Bandwidth division */
...@@ -36,22 +37,15 @@ enum stat_type { ...@@ -36,22 +37,15 @@ enum stat_type {
* request completion for IOs doen by this cgroup. This may not be * request completion for IOs doen by this cgroup. This may not be
* accurate when NCQ is turned on. */ * accurate when NCQ is turned on. */
BLKIO_STAT_SERVICE_TIME = 0, BLKIO_STAT_SERVICE_TIME = 0,
/* Total bytes transferred */
BLKIO_STAT_SERVICE_BYTES,
/* Total IOs serviced, post merge */
BLKIO_STAT_SERVICED,
/* Total time spent waiting in scheduler queue in ns */ /* Total time spent waiting in scheduler queue in ns */
BLKIO_STAT_WAIT_TIME, BLKIO_STAT_WAIT_TIME,
/* Number of IOs merged */
BLKIO_STAT_MERGED,
/* Number of IOs queued up */ /* Number of IOs queued up */
BLKIO_STAT_QUEUED, BLKIO_STAT_QUEUED,
/* All the single valued stats go below this */ /* All the single valued stats go below this */
BLKIO_STAT_TIME, BLKIO_STAT_TIME,
BLKIO_STAT_SECTORS, #ifdef CONFIG_DEBUG_BLK_CGROUP
/* Time not charged to this cgroup */ /* Time not charged to this cgroup */
BLKIO_STAT_UNACCOUNTED_TIME, BLKIO_STAT_UNACCOUNTED_TIME,
#ifdef CONFIG_DEBUG_BLK_CGROUP
BLKIO_STAT_AVG_QUEUE_SIZE, BLKIO_STAT_AVG_QUEUE_SIZE,
BLKIO_STAT_IDLE_TIME, BLKIO_STAT_IDLE_TIME,
BLKIO_STAT_EMPTY_TIME, BLKIO_STAT_EMPTY_TIME,
...@@ -60,6 +54,18 @@ enum stat_type { ...@@ -60,6 +54,18 @@ enum stat_type {
#endif #endif
}; };
/* Per cpu stats */
enum stat_type_cpu {
BLKIO_STAT_CPU_SECTORS,
/* Total bytes transferred */
BLKIO_STAT_CPU_SERVICE_BYTES,
/* Total IOs serviced, post merge */
BLKIO_STAT_CPU_SERVICED,
/* Number of IOs merged */
BLKIO_STAT_CPU_MERGED,
BLKIO_STAT_CPU_NR
};
enum stat_sub_type { enum stat_sub_type {
BLKIO_STAT_READ = 0, BLKIO_STAT_READ = 0,
BLKIO_STAT_WRITE, BLKIO_STAT_WRITE,
...@@ -116,11 +122,11 @@ struct blkio_cgroup { ...@@ -116,11 +122,11 @@ struct blkio_cgroup {
struct blkio_group_stats { struct blkio_group_stats {
/* total disk time and nr sectors dispatched by this group */ /* total disk time and nr sectors dispatched by this group */
uint64_t time; uint64_t time;
uint64_t sectors;
/* Time not charged to this cgroup */
uint64_t unaccounted_time;
uint64_t stat_arr[BLKIO_STAT_QUEUED + 1][BLKIO_STAT_TOTAL]; uint64_t stat_arr[BLKIO_STAT_QUEUED + 1][BLKIO_STAT_TOTAL];
#ifdef CONFIG_DEBUG_BLK_CGROUP #ifdef CONFIG_DEBUG_BLK_CGROUP
/* Time not charged to this cgroup */
uint64_t unaccounted_time;
/* Sum of number of IOs queued across all samples */ /* Sum of number of IOs queued across all samples */
uint64_t avg_queue_size_sum; uint64_t avg_queue_size_sum;
/* Count of samples taken for average */ /* Count of samples taken for average */
...@@ -145,6 +151,13 @@ struct blkio_group_stats { ...@@ -145,6 +151,13 @@ struct blkio_group_stats {
#endif #endif
}; };
/* Per cpu blkio group stats */
struct blkio_group_stats_cpu {
uint64_t sectors;
uint64_t stat_arr_cpu[BLKIO_STAT_CPU_NR][BLKIO_STAT_TOTAL];
struct u64_stats_sync syncp;
};
struct blkio_group { struct blkio_group {
/* An rcu protected unique identifier for the group */ /* An rcu protected unique identifier for the group */
void *key; void *key;
...@@ -160,6 +173,8 @@ struct blkio_group { ...@@ -160,6 +173,8 @@ struct blkio_group {
/* Need to serialize the stats in the case of reset/update */ /* Need to serialize the stats in the case of reset/update */
spinlock_t stats_lock; spinlock_t stats_lock;
struct blkio_group_stats stats; struct blkio_group_stats stats;
/* Per cpu stats pointer */
struct blkio_group_stats_cpu __percpu *stats_cpu;
}; };
struct blkio_policy_node { struct blkio_policy_node {
...@@ -295,6 +310,7 @@ extern struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk); ...@@ -295,6 +310,7 @@ extern struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk);
extern void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg, extern void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
struct blkio_group *blkg, void *key, dev_t dev, struct blkio_group *blkg, void *key, dev_t dev,
enum blkio_policy_id plid); enum blkio_policy_id plid);
extern int blkio_alloc_blkg_stats(struct blkio_group *blkg);
extern int blkiocg_del_blkio_group(struct blkio_group *blkg); extern int blkiocg_del_blkio_group(struct blkio_group *blkg);
extern struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg, extern struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg,
void *key); void *key);
...@@ -322,6 +338,8 @@ static inline void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg, ...@@ -322,6 +338,8 @@ static inline void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
struct blkio_group *blkg, void *key, dev_t dev, struct blkio_group *blkg, void *key, dev_t dev,
enum blkio_policy_id plid) {} enum blkio_policy_id plid) {}
static inline int blkio_alloc_blkg_stats(struct blkio_group *blkg) { return 0; }
static inline int static inline int
blkiocg_del_blkio_group(struct blkio_group *blkg) { return 0; } blkiocg_del_blkio_group(struct blkio_group *blkg) { return 0; }
......
...@@ -569,8 +569,6 @@ int blk_get_queue(struct request_queue *q) ...@@ -569,8 +569,6 @@ int blk_get_queue(struct request_queue *q)
static inline void blk_free_request(struct request_queue *q, struct request *rq) static inline void blk_free_request(struct request_queue *q, struct request *rq)
{ {
BUG_ON(rq->cmd_flags & REQ_ON_PLUG);
if (rq->cmd_flags & REQ_ELVPRIV) if (rq->cmd_flags & REQ_ELVPRIV)
elv_put_request(q, rq); elv_put_request(q, rq);
mempool_free(rq, q->rq.rq_pool); mempool_free(rq, q->rq.rq_pool);
...@@ -1110,14 +1108,6 @@ static bool bio_attempt_back_merge(struct request_queue *q, struct request *req, ...@@ -1110,14 +1108,6 @@ static bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
{ {
const int ff = bio->bi_rw & REQ_FAILFAST_MASK; const int ff = bio->bi_rw & REQ_FAILFAST_MASK;
/*
* Debug stuff, kill later
*/
if (!rq_mergeable(req)) {
blk_dump_rq_flags(req, "back");
return false;
}
if (!ll_back_merge_fn(q, req, bio)) if (!ll_back_merge_fn(q, req, bio))
return false; return false;
...@@ -1132,6 +1122,7 @@ static bool bio_attempt_back_merge(struct request_queue *q, struct request *req, ...@@ -1132,6 +1122,7 @@ static bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
drive_stat_acct(req, 0); drive_stat_acct(req, 0);
elv_bio_merged(q, req, bio);
return true; return true;
} }
...@@ -1141,14 +1132,6 @@ static bool bio_attempt_front_merge(struct request_queue *q, ...@@ -1141,14 +1132,6 @@ static bool bio_attempt_front_merge(struct request_queue *q,
const int ff = bio->bi_rw & REQ_FAILFAST_MASK; const int ff = bio->bi_rw & REQ_FAILFAST_MASK;
sector_t sector; sector_t sector;
/*
* Debug stuff, kill later
*/
if (!rq_mergeable(req)) {
blk_dump_rq_flags(req, "front");
return false;
}
if (!ll_front_merge_fn(q, req, bio)) if (!ll_front_merge_fn(q, req, bio))
return false; return false;
...@@ -1173,6 +1156,7 @@ static bool bio_attempt_front_merge(struct request_queue *q, ...@@ -1173,6 +1156,7 @@ static bool bio_attempt_front_merge(struct request_queue *q,
req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
drive_stat_acct(req, 0); drive_stat_acct(req, 0);
elv_bio_merged(q, req, bio);
return true; return true;
} }
...@@ -1258,14 +1242,12 @@ static int __make_request(struct request_queue *q, struct bio *bio) ...@@ -1258,14 +1242,12 @@ static int __make_request(struct request_queue *q, struct bio *bio)
el_ret = elv_merge(q, &req, bio); el_ret = elv_merge(q, &req, bio);
if (el_ret == ELEVATOR_BACK_MERGE) { if (el_ret == ELEVATOR_BACK_MERGE) {
BUG_ON(req->cmd_flags & REQ_ON_PLUG);
if (bio_attempt_back_merge(q, req, bio)) { if (bio_attempt_back_merge(q, req, bio)) {
if (!attempt_back_merge(q, req)) if (!attempt_back_merge(q, req))
elv_merged_request(q, req, el_ret); elv_merged_request(q, req, el_ret);
goto out_unlock; goto out_unlock;
} }
} else if (el_ret == ELEVATOR_FRONT_MERGE) { } else if (el_ret == ELEVATOR_FRONT_MERGE) {
BUG_ON(req->cmd_flags & REQ_ON_PLUG);
if (bio_attempt_front_merge(q, req, bio)) { if (bio_attempt_front_merge(q, req, bio)) {
if (!attempt_front_merge(q, req)) if (!attempt_front_merge(q, req))
elv_merged_request(q, req, el_ret); elv_merged_request(q, req, el_ret);
...@@ -1320,10 +1302,6 @@ static int __make_request(struct request_queue *q, struct bio *bio) ...@@ -1320,10 +1302,6 @@ static int __make_request(struct request_queue *q, struct bio *bio)
if (__rq->q != q) if (__rq->q != q)
plug->should_sort = 1; plug->should_sort = 1;
} }
/*
* Debug flag, kill later
*/
req->cmd_flags |= REQ_ON_PLUG;
list_add_tail(&req->queuelist, &plug->list); list_add_tail(&req->queuelist, &plug->list);
drive_stat_acct(req, 1); drive_stat_acct(req, 1);
} else { } else {
...@@ -1550,7 +1528,8 @@ static inline void __generic_make_request(struct bio *bio) ...@@ -1550,7 +1528,8 @@ static inline void __generic_make_request(struct bio *bio)
goto end_io; goto end_io;
} }
blk_throtl_bio(q, &bio); if (blk_throtl_bio(q, &bio))
goto end_io;
/* /*
* If bio = NULL, bio has been throttled and will be submitted * If bio = NULL, bio has been throttled and will be submitted
...@@ -2748,7 +2727,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) ...@@ -2748,7 +2727,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
while (!list_empty(&list)) { while (!list_empty(&list)) {
rq = list_entry_rq(list.next); rq = list_entry_rq(list.next);
list_del_init(&rq->queuelist); list_del_init(&rq->queuelist);
BUG_ON(!(rq->cmd_flags & REQ_ON_PLUG));
BUG_ON(!rq->q); BUG_ON(!rq->q);
if (rq->q != q) { if (rq->q != q) {
/* /*
...@@ -2760,8 +2738,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) ...@@ -2760,8 +2738,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
depth = 0; depth = 0;
spin_lock(q->queue_lock); spin_lock(q->queue_lock);
} }
rq->cmd_flags &= ~REQ_ON_PLUG;
/* /*
* rq is already accounted, so use raw insert * rq is already accounted, so use raw insert
*/ */
......
...@@ -56,7 +56,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk, ...@@ -56,7 +56,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
__elv_add_request(q, rq, where); __elv_add_request(q, rq, where);
__blk_run_queue(q); __blk_run_queue(q);
/* the queue is stopped so it won't be plugged+unplugged */ /* the queue is stopped so it won't be run */
if (rq->cmd_type == REQ_TYPE_PM_RESUME) if (rq->cmd_type == REQ_TYPE_PM_RESUME)
q->request_fn(q); q->request_fn(q);
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
......
...@@ -212,13 +212,19 @@ static void flush_end_io(struct request *flush_rq, int error) ...@@ -212,13 +212,19 @@ static void flush_end_io(struct request *flush_rq, int error)
} }
/* /*
* Moving a request silently to empty queue_head may stall the * Kick the queue to avoid stall for two cases:
* queue. Kick the queue in those cases. This function is called * 1. Moving a request silently to empty queue_head may stall the
* from request completion path and calling directly into * queue.
* request_fn may confuse the driver. Always use kblockd. * 2. When flush request is running in non-queueable queue, the
* queue is hold. Restart the queue after flush request is finished
* to avoid stall.
* This function is called from request completion path and calling
* directly into request_fn may confuse the driver. Always use
* kblockd.
*/ */
if (queued) if (queued || q->flush_queue_delayed)
blk_run_queue_async(q); blk_run_queue_async(q);
q->flush_queue_delayed = 0;
} }
/** /**
......
...@@ -96,6 +96,9 @@ struct io_context *alloc_io_context(gfp_t gfp_flags, int node) ...@@ -96,6 +96,9 @@ struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
INIT_RADIX_TREE(&ret->radix_root, GFP_ATOMIC | __GFP_HIGH); INIT_RADIX_TREE(&ret->radix_root, GFP_ATOMIC | __GFP_HIGH);
INIT_HLIST_HEAD(&ret->cic_list); INIT_HLIST_HEAD(&ret->cic_list);
ret->ioc_data = NULL; ret->ioc_data = NULL;
#if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE)
ret->cgroup_changed = 0;
#endif
} }
return ret; return ret;
......
...@@ -9,17 +9,20 @@ ...@@ -9,17 +9,20 @@
#include "blk.h" #include "blk.h"
static void blkdev_discard_end_io(struct bio *bio, int err) struct bio_batch {
{ atomic_t done;
if (err) { unsigned long flags;
if (err == -EOPNOTSUPP) struct completion *wait;
set_bit(BIO_EOPNOTSUPP, &bio->bi_flags); };
clear_bit(BIO_UPTODATE, &bio->bi_flags);
}
if (bio->bi_private) static void bio_batch_end_io(struct bio *bio, int err)
complete(bio->bi_private); {
struct bio_batch *bb = bio->bi_private;
if (err && (err != -EOPNOTSUPP))
clear_bit(BIO_UPTODATE, &bb->flags);
if (atomic_dec_and_test(&bb->done))
complete(bb->wait);
bio_put(bio); bio_put(bio);
} }
...@@ -41,6 +44,7 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, ...@@ -41,6 +44,7 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
struct request_queue *q = bdev_get_queue(bdev); struct request_queue *q = bdev_get_queue(bdev);
int type = REQ_WRITE | REQ_DISCARD; int type = REQ_WRITE | REQ_DISCARD;
unsigned int max_discard_sectors; unsigned int max_discard_sectors;
struct bio_batch bb;
struct bio *bio; struct bio *bio;
int ret = 0; int ret = 0;
...@@ -67,7 +71,11 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, ...@@ -67,7 +71,11 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
type |= REQ_SECURE; type |= REQ_SECURE;
} }
while (nr_sects && !ret) { atomic_set(&bb.done, 1);
bb.flags = 1 << BIO_UPTODATE;
bb.wait = &wait;
while (nr_sects) {
bio = bio_alloc(gfp_mask, 1); bio = bio_alloc(gfp_mask, 1);
if (!bio) { if (!bio) {
ret = -ENOMEM; ret = -ENOMEM;
...@@ -75,9 +83,9 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, ...@@ -75,9 +83,9 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
} }
bio->bi_sector = sector; bio->bi_sector = sector;
bio->bi_end_io = blkdev_discard_end_io; bio->bi_end_io = bio_batch_end_io;
bio->bi_bdev = bdev; bio->bi_bdev = bdev;
bio->bi_private = &wait; bio->bi_private = &bb;
if (nr_sects > max_discard_sectors) { if (nr_sects > max_discard_sectors) {
bio->bi_size = max_discard_sectors << 9; bio->bi_size = max_discard_sectors << 9;
...@@ -88,45 +96,21 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, ...@@ -88,45 +96,21 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
nr_sects = 0; nr_sects = 0;
} }
bio_get(bio); atomic_inc(&bb.done);
submit_bio(type, bio); submit_bio(type, bio);
}
/* Wait for bios in-flight */
if (!atomic_dec_and_test(&bb.done))
wait_for_completion(&wait); wait_for_completion(&wait);
if (bio_flagged(bio, BIO_EOPNOTSUPP)) if (!test_bit(BIO_UPTODATE, &bb.flags))
ret = -EOPNOTSUPP; ret = -EIO;
else if (!bio_flagged(bio, BIO_UPTODATE))
ret = -EIO;
bio_put(bio);
}
return ret; return ret;
} }
EXPORT_SYMBOL(blkdev_issue_discard); EXPORT_SYMBOL(blkdev_issue_discard);
struct bio_batch
{
atomic_t done;
unsigned long flags;
struct completion *wait;
};
static void bio_batch_end_io(struct bio *bio, int err)
{
struct bio_batch *bb = bio->bi_private;
if (err) {
if (err == -EOPNOTSUPP)
set_bit(BIO_EOPNOTSUPP, &bb->flags);
else
clear_bit(BIO_UPTODATE, &bb->flags);
}
if (bb)
if (atomic_dec_and_test(&bb->done))
complete(bb->wait);
bio_put(bio);
}
/** /**
* blkdev_issue_zeroout - generate number of zero filed write bios * blkdev_issue_zeroout - generate number of zero filed write bios
* @bdev: blockdev to issue * @bdev: blockdev to issue
...@@ -151,7 +135,6 @@ int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, ...@@ -151,7 +135,6 @@ int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
bb.flags = 1 << BIO_UPTODATE; bb.flags = 1 << BIO_UPTODATE;
bb.wait = &wait; bb.wait = &wait;
submit:
ret = 0; ret = 0;
while (nr_sects != 0) { while (nr_sects != 0) {
bio = bio_alloc(gfp_mask, bio = bio_alloc(gfp_mask,
...@@ -168,9 +151,6 @@ int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, ...@@ -168,9 +151,6 @@ int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
while (nr_sects != 0) { while (nr_sects != 0) {
sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects); sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects);
if (sz == 0)
/* bio has maximum size possible */
break;
ret = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0); ret = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0);
nr_sects -= ret >> 9; nr_sects -= ret >> 9;
sector += ret >> 9; sector += ret >> 9;
...@@ -190,16 +170,6 @@ int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, ...@@ -190,16 +170,6 @@ int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
/* One of bios in the batch was completed with error.*/ /* One of bios in the batch was completed with error.*/
ret = -EIO; ret = -EIO;
if (ret)
goto out;
if (test_bit(BIO_EOPNOTSUPP, &bb.flags)) {
ret = -EOPNOTSUPP;
goto out;
}
if (nr_sects != 0)
goto submit;
out:
return ret; return ret;
} }
EXPORT_SYMBOL(blkdev_issue_zeroout); EXPORT_SYMBOL(blkdev_issue_zeroout);
...@@ -120,7 +120,7 @@ void blk_set_default_limits(struct queue_limits *lim) ...@@ -120,7 +120,7 @@ void blk_set_default_limits(struct queue_limits *lim)
lim->discard_granularity = 0; lim->discard_granularity = 0;
lim->discard_alignment = 0; lim->discard_alignment = 0;
lim->discard_misaligned = 0; lim->discard_misaligned = 0;
lim->discard_zeroes_data = -1; lim->discard_zeroes_data = 1;
lim->logical_block_size = lim->physical_block_size = lim->io_min = 512; lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT); lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT);
lim->alignment_offset = 0; lim->alignment_offset = 0;
...@@ -166,6 +166,7 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn) ...@@ -166,6 +166,7 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
blk_set_default_limits(&q->limits); blk_set_default_limits(&q->limits);
blk_queue_max_hw_sectors(q, BLK_SAFE_MAX_SECTORS); blk_queue_max_hw_sectors(q, BLK_SAFE_MAX_SECTORS);
q->limits.discard_zeroes_data = 0;
/* /*
* by default assume old behaviour and bounce for any highmem page * by default assume old behaviour and bounce for any highmem page
...@@ -790,6 +791,12 @@ void blk_queue_flush(struct request_queue *q, unsigned int flush) ...@@ -790,6 +791,12 @@ void blk_queue_flush(struct request_queue *q, unsigned int flush)
} }
EXPORT_SYMBOL_GPL(blk_queue_flush); EXPORT_SYMBOL_GPL(blk_queue_flush);
void blk_queue_flush_queueable(struct request_queue *q, bool queueable)
{
q->flush_not_queueable = !queueable;
}
EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
static int __init blk_settings_init(void) static int __init blk_settings_init(void)
{ {
blk_max_low_pfn = max_low_pfn - 1; blk_max_low_pfn = max_low_pfn - 1;
......
...@@ -152,7 +152,8 @@ static ssize_t queue_discard_granularity_show(struct request_queue *q, char *pag ...@@ -152,7 +152,8 @@ static ssize_t queue_discard_granularity_show(struct request_queue *q, char *pag
static ssize_t queue_discard_max_show(struct request_queue *q, char *page) static ssize_t queue_discard_max_show(struct request_queue *q, char *page)
{ {
return queue_var_show(q->limits.max_discard_sectors << 9, page); return sprintf(page, "%llu\n",
(unsigned long long)q->limits.max_discard_sectors << 9);
} }
static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page) static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page)
......
This diff is collapsed.
...@@ -62,7 +62,28 @@ static inline struct request *__elv_next_request(struct request_queue *q) ...@@ -62,7 +62,28 @@ static inline struct request *__elv_next_request(struct request_queue *q)
return rq; return rq;
} }
if (!q->elevator->ops->elevator_dispatch_fn(q, 0)) /*
* Flush request is running and flush request isn't queueable
* in the drive, we can hold the queue till flush request is
* finished. Even we don't do this, driver can't dispatch next
* requests and will requeue them. And this can improve
* throughput too. For example, we have request flush1, write1,
* flush 2. flush1 is dispatched, then queue is hold, write1
* isn't inserted to queue. After flush1 is finished, flush2
* will be dispatched. Since disk cache is already clean,
* flush2 will be finished very soon, so looks like flush2 is
* folded to flush1.
* Since the queue is hold, a flag is set to indicate the queue
* should be restarted later. Please see flush_end_io() for
* details.
*/
if (q->flush_pending_idx != q->flush_running_idx &&
!queue_flush_queueable(q)) {
q->flush_queue_delayed = 1;
return NULL;
}
if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags) ||
!q->elevator->ops->elevator_dispatch_fn(q, 0))
return NULL; return NULL;
} }
} }
......
This diff is collapsed.
...@@ -155,13 +155,8 @@ static struct elevator_type *elevator_get(const char *name) ...@@ -155,13 +155,8 @@ static struct elevator_type *elevator_get(const char *name)
e = elevator_find(name); e = elevator_find(name);
if (!e) { if (!e) {
char elv[ELV_NAME_MAX + strlen("-iosched")];
spin_unlock(&elv_list_lock); spin_unlock(&elv_list_lock);
request_module("%s-iosched", name);
snprintf(elv, sizeof(elv), "%s-iosched", name);
request_module("%s", elv);
spin_lock(&elv_list_lock); spin_lock(&elv_list_lock);
e = elevator_find(name); e = elevator_find(name);
} }
...@@ -421,8 +416,6 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq) ...@@ -421,8 +416,6 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
struct list_head *entry; struct list_head *entry;
int stop_flags; int stop_flags;
BUG_ON(rq->cmd_flags & REQ_ON_PLUG);
if (q->last_merge == rq) if (q->last_merge == rq)
q->last_merge = NULL; q->last_merge = NULL;
...@@ -661,8 +654,6 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where) ...@@ -661,8 +654,6 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where)
rq->q = q; rq->q = q;
BUG_ON(rq->cmd_flags & REQ_ON_PLUG);
if (rq->cmd_flags & REQ_SOFTBARRIER) { if (rq->cmd_flags & REQ_SOFTBARRIER) {
/* barriers are scheduling boundary, update end_sector */ /* barriers are scheduling boundary, update end_sector */
if (rq->cmd_type == REQ_TYPE_FS || if (rq->cmd_type == REQ_TYPE_FS ||
......
...@@ -1089,21 +1089,21 @@ static int atapi_drain_needed(struct request *rq) ...@@ -1089,21 +1089,21 @@ static int atapi_drain_needed(struct request *rq)
static int ata_scsi_dev_config(struct scsi_device *sdev, static int ata_scsi_dev_config(struct scsi_device *sdev,
struct ata_device *dev) struct ata_device *dev)
{ {
struct request_queue *q = sdev->request_queue;
if (!ata_id_has_unload(dev->id)) if (!ata_id_has_unload(dev->id))
dev->flags |= ATA_DFLAG_NO_UNLOAD; dev->flags |= ATA_DFLAG_NO_UNLOAD;
/* configure max sectors */ /* configure max sectors */
blk_queue_max_hw_sectors(sdev->request_queue, dev->max_sectors); blk_queue_max_hw_sectors(q, dev->max_sectors);
if (dev->class == ATA_DEV_ATAPI) { if (dev->class == ATA_DEV_ATAPI) {
struct request_queue *q = sdev->request_queue;
void *buf; void *buf;
sdev->sector_size = ATA_SECT_SIZE; sdev->sector_size = ATA_SECT_SIZE;
/* set DMA padding */ /* set DMA padding */
blk_queue_update_dma_pad(sdev->request_queue, blk_queue_update_dma_pad(q, ATA_DMA_PAD_SZ - 1);
ATA_DMA_PAD_SZ - 1);
/* configure draining */ /* configure draining */
buf = kmalloc(ATAPI_MAX_DRAIN, q->bounce_gfp | GFP_KERNEL); buf = kmalloc(ATAPI_MAX_DRAIN, q->bounce_gfp | GFP_KERNEL);
...@@ -1131,8 +1131,7 @@ static int ata_scsi_dev_config(struct scsi_device *sdev, ...@@ -1131,8 +1131,7 @@ static int ata_scsi_dev_config(struct scsi_device *sdev,
"sector_size=%u > PAGE_SIZE, PIO may malfunction\n", "sector_size=%u > PAGE_SIZE, PIO may malfunction\n",
sdev->sector_size); sdev->sector_size);
blk_queue_update_dma_alignment(sdev->request_queue, blk_queue_update_dma_alignment(q, sdev->sector_size - 1);
sdev->sector_size - 1);
if (dev->flags & ATA_DFLAG_AN) if (dev->flags & ATA_DFLAG_AN)
set_bit(SDEV_EVT_MEDIA_CHANGE, sdev->supported_events); set_bit(SDEV_EVT_MEDIA_CHANGE, sdev->supported_events);
...@@ -1145,6 +1144,8 @@ static int ata_scsi_dev_config(struct scsi_device *sdev, ...@@ -1145,6 +1144,8 @@ static int ata_scsi_dev_config(struct scsi_device *sdev,
scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, depth); scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, depth);
} }
blk_queue_flush_queueable(q, false);
dev->sdev = sdev; dev->sdev = sdev;
return 0; return 0;
} }
......
...@@ -320,6 +320,8 @@ static void pcd_init_units(void) ...@@ -320,6 +320,8 @@ static void pcd_init_units(void)
disk->first_minor = unit; disk->first_minor = unit;
strcpy(disk->disk_name, cd->name); /* umm... */ strcpy(disk->disk_name, cd->name); /* umm... */
disk->fops = &pcd_bdops; disk->fops = &pcd_bdops;
disk->flags = GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
disk->events = DISK_EVENT_MEDIA_CHANGE;
} }
} }
......
...@@ -625,7 +625,9 @@ static int viocd_probe(struct vio_dev *vdev, const struct vio_device_id *id) ...@@ -625,7 +625,9 @@ static int viocd_probe(struct vio_dev *vdev, const struct vio_device_id *id)
blk_queue_max_hw_sectors(q, 4096 / 512); blk_queue_max_hw_sectors(q, 4096 / 512);
gendisk->queue = q; gendisk->queue = q;
gendisk->fops = &viocd_fops; gendisk->fops = &viocd_fops;
gendisk->flags = GENHD_FL_CD|GENHD_FL_REMOVABLE; gendisk->flags = GENHD_FL_CD | GENHD_FL_REMOVABLE |
GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
gendisk->events = DISK_EVENT_MEDIA_CHANGE;
set_capacity(gendisk, 0); set_capacity(gendisk, 0);
gendisk->private_data = d; gendisk->private_data = d;
d->viocd_disk = gendisk; d->viocd_disk = gendisk;
......
...@@ -1781,7 +1781,8 @@ static int ide_cd_probe(ide_drive_t *drive) ...@@ -1781,7 +1781,8 @@ static int ide_cd_probe(ide_drive_t *drive)
ide_cd_read_toc(drive, &sense); ide_cd_read_toc(drive, &sense);
g->fops = &idecd_ops; g->fops = &idecd_ops;
g->flags |= GENHD_FL_REMOVABLE; g->flags |= GENHD_FL_REMOVABLE | GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
g->events = DISK_EVENT_MEDIA_CHANGE;
add_disk(g); add_disk(g);
return 0; return 0;
......
...@@ -636,7 +636,7 @@ static int sr_probe(struct device *dev) ...@@ -636,7 +636,7 @@ static int sr_probe(struct device *dev)
disk->first_minor = minor; disk->first_minor = minor;
sprintf(disk->disk_name, "sr%d", minor); sprintf(disk->disk_name, "sr%d", minor);
disk->fops = &sr_bdops; disk->fops = &sr_bdops;
disk->flags = GENHD_FL_CD; disk->flags = GENHD_FL_CD | GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
disk->events = DISK_EVENT_MEDIA_CHANGE | DISK_EVENT_EJECT_REQUEST; disk->events = DISK_EVENT_MEDIA_CHANGE | DISK_EVENT_EJECT_REQUEST;
blk_queue_rq_timeout(sdev->request_queue, SR_TIMEOUT); blk_queue_rq_timeout(sdev->request_queue, SR_TIMEOUT);
......
...@@ -1238,6 +1238,8 @@ int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder) ...@@ -1238,6 +1238,8 @@ int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder)
res = __blkdev_get(bdev, mode, 0); res = __blkdev_get(bdev, mode, 0);
if (whole) { if (whole) {
struct gendisk *disk = whole->bd_disk;
/* finish claiming */ /* finish claiming */
mutex_lock(&bdev->bd_mutex); mutex_lock(&bdev->bd_mutex);
spin_lock(&bdev_lock); spin_lock(&bdev_lock);
...@@ -1264,15 +1266,16 @@ int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder) ...@@ -1264,15 +1266,16 @@ int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder)
spin_unlock(&bdev_lock); spin_unlock(&bdev_lock);
/* /*
* Block event polling for write claims. Any write * Block event polling for write claims if requested. Any
* holder makes the write_holder state stick until all * write holder makes the write_holder state stick until
* are released. This is good enough and tracking * all are released. This is good enough and tracking
* individual writeable reference is too fragile given * individual writeable reference is too fragile given the
* the way @mode is used in blkdev_get/put(). * way @mode is used in blkdev_get/put().
*/ */
if (!res && (mode & FMODE_WRITE) && !bdev->bd_write_holder) { if ((disk->flags & GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE) &&
!res && (mode & FMODE_WRITE) && !bdev->bd_write_holder) {
bdev->bd_write_holder = true; bdev->bd_write_holder = true;
disk_block_events(bdev->bd_disk); disk_block_events(disk);
} }
mutex_unlock(&bdev->bd_mutex); mutex_unlock(&bdev->bd_mutex);
......
...@@ -255,7 +255,11 @@ ssize_t part_discard_alignment_show(struct device *dev, ...@@ -255,7 +255,11 @@ ssize_t part_discard_alignment_show(struct device *dev,
struct device_attribute *attr, char *buf) struct device_attribute *attr, char *buf)
{ {
struct hd_struct *p = dev_to_part(dev); struct hd_struct *p = dev_to_part(dev);
return sprintf(buf, "%u\n", p->discard_alignment); struct gendisk *disk = dev_to_disk(dev);
return sprintf(buf, "%u\n",
queue_limit_discard_alignment(&disk->queue->limits,
p->start_sect));
} }
ssize_t part_stat_show(struct device *dev, ssize_t part_stat_show(struct device *dev,
...@@ -449,8 +453,6 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno, ...@@ -449,8 +453,6 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno,
p->start_sect = start; p->start_sect = start;
p->alignment_offset = p->alignment_offset =
queue_limit_alignment_offset(&disk->queue->limits, start); queue_limit_alignment_offset(&disk->queue->limits, start);
p->discard_alignment =
queue_limit_discard_alignment(&disk->queue->limits, start);
p->nr_sects = len; p->nr_sects = len;
p->partno = partno; p->partno = partno;
p->policy = get_disk_ro(disk); p->policy = get_disk_ro(disk);
......
...@@ -151,7 +151,6 @@ enum rq_flag_bits { ...@@ -151,7 +151,6 @@ enum rq_flag_bits {
__REQ_IO_STAT, /* account I/O stat */ __REQ_IO_STAT, /* account I/O stat */
__REQ_MIXED_MERGE, /* merge of different types, fail separately */ __REQ_MIXED_MERGE, /* merge of different types, fail separately */
__REQ_SECURE, /* secure discard (used with __REQ_DISCARD) */ __REQ_SECURE, /* secure discard (used with __REQ_DISCARD) */
__REQ_ON_PLUG, /* on plug list */
__REQ_NR_BITS, /* stops here */ __REQ_NR_BITS, /* stops here */
}; };
...@@ -192,6 +191,5 @@ enum rq_flag_bits { ...@@ -192,6 +191,5 @@ enum rq_flag_bits {
#define REQ_IO_STAT (1 << __REQ_IO_STAT) #define REQ_IO_STAT (1 << __REQ_IO_STAT)
#define REQ_MIXED_MERGE (1 << __REQ_MIXED_MERGE) #define REQ_MIXED_MERGE (1 << __REQ_MIXED_MERGE)
#define REQ_SECURE (1 << __REQ_SECURE) #define REQ_SECURE (1 << __REQ_SECURE)
#define REQ_ON_PLUG (1 << __REQ_ON_PLUG)
#endif /* __LINUX_BLK_TYPES_H */ #endif /* __LINUX_BLK_TYPES_H */
...@@ -257,7 +257,7 @@ struct queue_limits { ...@@ -257,7 +257,7 @@ struct queue_limits {
unsigned char misaligned; unsigned char misaligned;
unsigned char discard_misaligned; unsigned char discard_misaligned;
unsigned char cluster; unsigned char cluster;
signed char discard_zeroes_data; unsigned char discard_zeroes_data;
}; };
struct request_queue struct request_queue
...@@ -364,6 +364,8 @@ struct request_queue ...@@ -364,6 +364,8 @@ struct request_queue
* for flush operations * for flush operations
*/ */
unsigned int flush_flags; unsigned int flush_flags;
unsigned int flush_not_queueable:1;
unsigned int flush_queue_delayed:1;
unsigned int flush_pending_idx:1; unsigned int flush_pending_idx:1;
unsigned int flush_running_idx:1; unsigned int flush_running_idx:1;
unsigned long flush_pending_since; unsigned long flush_pending_since;
...@@ -843,6 +845,7 @@ extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); ...@@ -843,6 +845,7 @@ extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
extern void blk_queue_flush(struct request_queue *q, unsigned int flush); extern void blk_queue_flush(struct request_queue *q, unsigned int flush);
extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable);
extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
...@@ -1066,13 +1069,16 @@ static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector ...@@ -1066,13 +1069,16 @@ static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector
{ {
unsigned int alignment = (sector << 9) & (lim->discard_granularity - 1); unsigned int alignment = (sector << 9) & (lim->discard_granularity - 1);
if (!lim->max_discard_sectors)
return 0;
return (lim->discard_granularity + lim->discard_alignment - alignment) return (lim->discard_granularity + lim->discard_alignment - alignment)
& (lim->discard_granularity - 1); & (lim->discard_granularity - 1);
} }
static inline unsigned int queue_discard_zeroes_data(struct request_queue *q) static inline unsigned int queue_discard_zeroes_data(struct request_queue *q)
{ {
if (q->limits.discard_zeroes_data == 1) if (q->limits.max_discard_sectors && q->limits.discard_zeroes_data == 1)
return 1; return 1;
return 0; return 0;
...@@ -1111,6 +1117,11 @@ static inline unsigned int block_size(struct block_device *bdev) ...@@ -1111,6 +1117,11 @@ static inline unsigned int block_size(struct block_device *bdev)
return bdev->bd_block_size; return bdev->bd_block_size;
} }
static inline bool queue_flush_queueable(struct request_queue *q)
{
return !q->flush_not_queueable;
}
typedef struct {struct page *v;} Sector; typedef struct {struct page *v;} Sector;
unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *); unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *);
......
...@@ -100,7 +100,6 @@ struct hd_struct { ...@@ -100,7 +100,6 @@ struct hd_struct {
sector_t start_sect; sector_t start_sect;
sector_t nr_sects; sector_t nr_sects;
sector_t alignment_offset; sector_t alignment_offset;
unsigned int discard_alignment;
struct device __dev; struct device __dev;
struct kobject *holder_dir; struct kobject *holder_dir;
int policy, partno; int policy, partno;
...@@ -127,6 +126,7 @@ struct hd_struct { ...@@ -127,6 +126,7 @@ struct hd_struct {
#define GENHD_FL_SUPPRESS_PARTITION_INFO 32 #define GENHD_FL_SUPPRESS_PARTITION_INFO 32
#define GENHD_FL_EXT_DEVT 64 /* allow extended devt */ #define GENHD_FL_EXT_DEVT 64 /* allow extended devt */
#define GENHD_FL_NATIVE_CAPACITY 128 #define GENHD_FL_NATIVE_CAPACITY 128
#define GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE 256
enum { enum {
DISK_EVENT_MEDIA_CHANGE = 1 << 0, /* media changed */ DISK_EVENT_MEDIA_CHANGE = 1 << 0, /* media changed */
......
...@@ -63,10 +63,10 @@ static int bdi_debug_stats_show(struct seq_file *m, void *v) ...@@ -63,10 +63,10 @@ static int bdi_debug_stats_show(struct seq_file *m, void *v)
unsigned long background_thresh; unsigned long background_thresh;
unsigned long dirty_thresh; unsigned long dirty_thresh;
unsigned long bdi_thresh; unsigned long bdi_thresh;
unsigned long nr_dirty, nr_io, nr_more_io, nr_wb; unsigned long nr_dirty, nr_io, nr_more_io;
struct inode *inode; struct inode *inode;
nr_wb = nr_dirty = nr_io = nr_more_io = 0; nr_dirty = nr_io = nr_more_io = 0;
spin_lock(&inode_wb_list_lock); spin_lock(&inode_wb_list_lock);
list_for_each_entry(inode, &wb->b_dirty, i_wb_list) list_for_each_entry(inode, &wb->b_dirty, i_wb_list)
nr_dirty++; nr_dirty++;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment