Commit 24e1c13c authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block

* 'for-linus' of git://git.kernel.dk/linux-2.6-block:
  block: kill swap_io_context()
  as-iosched: fix inconsistent ioc->lock context
  ide-cd: fix leftover data BUG
  block: make elevator lib checkpatch compliant
  cfq-iosched: make checkpatch compliant
  block: make core bits checkpatch compliant
  block: new end request handling interface should take unsigned byte counts
  unexport add_disk_randomness
  block/sunvdc.c:print_version() must be __devinit
  splice: always updated atime in direct splice
parents 31fa5d28 3bc217ff
...@@ -170,11 +170,11 @@ static void free_as_io_context(struct as_io_context *aic) ...@@ -170,11 +170,11 @@ static void free_as_io_context(struct as_io_context *aic)
static void as_trim(struct io_context *ioc) static void as_trim(struct io_context *ioc)
{ {
spin_lock(&ioc->lock); spin_lock_irq(&ioc->lock);
if (ioc->aic) if (ioc->aic)
free_as_io_context(ioc->aic); free_as_io_context(ioc->aic);
ioc->aic = NULL; ioc->aic = NULL;
spin_unlock(&ioc->lock); spin_unlock_irq(&ioc->lock);
} }
/* Called when the task exits */ /* Called when the task exits */
...@@ -235,10 +235,12 @@ static void as_put_io_context(struct request *rq) ...@@ -235,10 +235,12 @@ static void as_put_io_context(struct request *rq)
aic = RQ_IOC(rq)->aic; aic = RQ_IOC(rq)->aic;
if (rq_is_sync(rq) && aic) { if (rq_is_sync(rq) && aic) {
spin_lock(&aic->lock); unsigned long flags;
spin_lock_irqsave(&aic->lock, flags);
set_bit(AS_TASK_IORUNNING, &aic->state); set_bit(AS_TASK_IORUNNING, &aic->state);
aic->last_end_request = jiffies; aic->last_end_request = jiffies;
spin_unlock(&aic->lock); spin_unlock_irqrestore(&aic->lock, flags);
} }
put_io_context(RQ_IOC(rq)); put_io_context(RQ_IOC(rq));
...@@ -1266,22 +1268,8 @@ static void as_merged_requests(struct request_queue *q, struct request *req, ...@@ -1266,22 +1268,8 @@ static void as_merged_requests(struct request_queue *q, struct request *req,
*/ */
if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) { if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) {
if (time_before(rq_fifo_time(next), rq_fifo_time(req))) { if (time_before(rq_fifo_time(next), rq_fifo_time(req))) {
struct io_context *rioc = RQ_IOC(req);
struct io_context *nioc = RQ_IOC(next);
list_move(&req->queuelist, &next->queuelist); list_move(&req->queuelist, &next->queuelist);
rq_set_fifo_time(req, rq_fifo_time(next)); rq_set_fifo_time(req, rq_fifo_time(next));
/*
* Don't copy here but swap, because when anext is
* removed below, it must contain the unused context
*/
if (rioc != nioc) {
double_spin_lock(&rioc->lock, &nioc->lock,
rioc < nioc);
swap_io_context(&rioc, &nioc);
double_spin_unlock(&rioc->lock, &nioc->lock,
rioc < nioc);
}
} }
} }
......
...@@ -26,7 +26,8 @@ int blk_queue_ordered(struct request_queue *q, unsigned ordered, ...@@ -26,7 +26,8 @@ int blk_queue_ordered(struct request_queue *q, unsigned ordered,
{ {
if (ordered & (QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH) && if (ordered & (QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH) &&
prepare_flush_fn == NULL) { prepare_flush_fn == NULL) {
printk(KERN_ERR "blk_queue_ordered: prepare_flush_fn required\n"); printk(KERN_ERR "%s: prepare_flush_fn required\n",
__FUNCTION__);
return -EINVAL; return -EINVAL;
} }
...@@ -47,7 +48,6 @@ int blk_queue_ordered(struct request_queue *q, unsigned ordered, ...@@ -47,7 +48,6 @@ int blk_queue_ordered(struct request_queue *q, unsigned ordered,
return 0; return 0;
} }
EXPORT_SYMBOL(blk_queue_ordered); EXPORT_SYMBOL(blk_queue_ordered);
/* /*
...@@ -315,5 +315,4 @@ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector) ...@@ -315,5 +315,4 @@ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
bio_put(bio); bio_put(bio);
return ret; return ret;
} }
EXPORT_SYMBOL(blkdev_issue_flush); EXPORT_SYMBOL(blkdev_issue_flush);
This diff is collapsed.
...@@ -101,5 +101,4 @@ int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk, ...@@ -101,5 +101,4 @@ int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk,
return err; return err;
} }
EXPORT_SYMBOL(blk_execute_rq); EXPORT_SYMBOL(blk_execute_rq);
...@@ -176,15 +176,6 @@ void copy_io_context(struct io_context **pdst, struct io_context **psrc) ...@@ -176,15 +176,6 @@ void copy_io_context(struct io_context **pdst, struct io_context **psrc)
} }
EXPORT_SYMBOL(copy_io_context); EXPORT_SYMBOL(copy_io_context);
void swap_io_context(struct io_context **ioc1, struct io_context **ioc2)
{
struct io_context *temp;
temp = *ioc1;
*ioc1 = *ioc2;
*ioc2 = temp;
}
EXPORT_SYMBOL(swap_io_context);
int __init blk_ioc_init(void) int __init blk_ioc_init(void)
{ {
iocontext_cachep = kmem_cache_create("blkdev_ioc", iocontext_cachep = kmem_cache_create("blkdev_ioc",
......
...@@ -53,7 +53,8 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq, ...@@ -53,7 +53,8 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
* direct dma. else, set up kernel bounce buffers * direct dma. else, set up kernel bounce buffers
*/ */
uaddr = (unsigned long) ubuf; uaddr = (unsigned long) ubuf;
if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q))) if (!(uaddr & queue_dma_alignment(q)) &&
!(len & queue_dma_alignment(q)))
bio = bio_map_user(q, NULL, uaddr, len, reading); bio = bio_map_user(q, NULL, uaddr, len, reading);
else else
bio = bio_copy_user(q, uaddr, len, reading); bio = bio_copy_user(q, uaddr, len, reading);
...@@ -144,7 +145,6 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq, ...@@ -144,7 +145,6 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
blk_rq_unmap_user(bio); blk_rq_unmap_user(bio);
return ret; return ret;
} }
EXPORT_SYMBOL(blk_rq_map_user); EXPORT_SYMBOL(blk_rq_map_user);
/** /**
...@@ -179,7 +179,8 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, ...@@ -179,7 +179,8 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
/* we don't allow misaligned data like bio_map_user() does. If the /* we don't allow misaligned data like bio_map_user() does. If the
* user is using sg, they're expected to know the alignment constraints * user is using sg, they're expected to know the alignment constraints
* and respect them accordingly */ * and respect them accordingly */
bio = bio_map_user_iov(q, NULL, iov, iov_count, rq_data_dir(rq)== READ); bio = bio_map_user_iov(q, NULL, iov, iov_count,
rq_data_dir(rq) == READ);
if (IS_ERR(bio)) if (IS_ERR(bio))
return PTR_ERR(bio); return PTR_ERR(bio);
...@@ -194,7 +195,6 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, ...@@ -194,7 +195,6 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
rq->buffer = rq->data = NULL; rq->buffer = rq->data = NULL;
return 0; return 0;
} }
EXPORT_SYMBOL(blk_rq_map_user_iov); EXPORT_SYMBOL(blk_rq_map_user_iov);
/** /**
...@@ -227,7 +227,6 @@ int blk_rq_unmap_user(struct bio *bio) ...@@ -227,7 +227,6 @@ int blk_rq_unmap_user(struct bio *bio)
return ret; return ret;
} }
EXPORT_SYMBOL(blk_rq_unmap_user); EXPORT_SYMBOL(blk_rq_unmap_user);
/** /**
...@@ -260,5 +259,4 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, ...@@ -260,5 +259,4 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
rq->buffer = rq->data = NULL; rq->buffer = rq->data = NULL;
return 0; return 0;
} }
EXPORT_SYMBOL(blk_rq_map_kern); EXPORT_SYMBOL(blk_rq_map_kern);
...@@ -32,7 +32,7 @@ void blk_recalc_rq_sectors(struct request *rq, int nsect) ...@@ -32,7 +32,7 @@ void blk_recalc_rq_sectors(struct request *rq, int nsect)
* size, something has gone terribly wrong * size, something has gone terribly wrong
*/ */
if (rq->nr_sectors < rq->current_nr_sectors) { if (rq->nr_sectors < rq->current_nr_sectors) {
printk("blk: request botched\n"); printk(KERN_ERR "blk: request botched\n");
rq->nr_sectors = rq->current_nr_sectors; rq->nr_sectors = rq->current_nr_sectors;
} }
} }
...@@ -235,7 +235,6 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq, ...@@ -235,7 +235,6 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
return nsegs; return nsegs;
} }
EXPORT_SYMBOL(blk_rq_map_sg); EXPORT_SYMBOL(blk_rq_map_sg);
static inline int ll_new_mergeable(struct request_queue *q, static inline int ll_new_mergeable(struct request_queue *q,
...@@ -305,8 +304,8 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req, ...@@ -305,8 +304,8 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
blk_recount_segments(q, bio); blk_recount_segments(q, bio);
len = req->biotail->bi_hw_back_size + bio->bi_hw_front_size; len = req->biotail->bi_hw_back_size + bio->bi_hw_front_size;
if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(req->biotail), __BVEC_START(bio)) && if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(req->biotail), __BVEC_START(bio))
!BIOVEC_VIRT_OVERSIZE(len)) { && !BIOVEC_VIRT_OVERSIZE(len)) {
int mergeable = ll_new_mergeable(q, req, bio); int mergeable = ll_new_mergeable(q, req, bio);
if (mergeable) { if (mergeable) {
...@@ -321,7 +320,7 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req, ...@@ -321,7 +320,7 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
return ll_new_hw_segment(q, req, bio); return ll_new_hw_segment(q, req, bio);
} }
int ll_front_merge_fn(struct request_queue *q, struct request *req, int ll_front_merge_fn(struct request_queue *q, struct request *req,
struct bio *bio) struct bio *bio)
{ {
unsigned short max_sectors; unsigned short max_sectors;
...@@ -388,7 +387,8 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req, ...@@ -388,7 +387,8 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
total_hw_segments = req->nr_hw_segments + next->nr_hw_segments; total_hw_segments = req->nr_hw_segments + next->nr_hw_segments;
if (blk_hw_contig_segment(q, req->biotail, next->bio)) { if (blk_hw_contig_segment(q, req->biotail, next->bio)) {
int len = req->biotail->bi_hw_back_size + next->bio->bi_hw_front_size; int len = req->biotail->bi_hw_back_size +
next->bio->bi_hw_front_size;
/* /*
* propagate the combined length to the end of the requests * propagate the combined length to the end of the requests
*/ */
......
...@@ -10,8 +10,10 @@ ...@@ -10,8 +10,10 @@
#include "blk.h" #include "blk.h"
unsigned long blk_max_low_pfn, blk_max_pfn; unsigned long blk_max_low_pfn;
EXPORT_SYMBOL(blk_max_low_pfn); EXPORT_SYMBOL(blk_max_low_pfn);
unsigned long blk_max_pfn;
EXPORT_SYMBOL(blk_max_pfn); EXPORT_SYMBOL(blk_max_pfn);
/** /**
...@@ -29,7 +31,6 @@ void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn) ...@@ -29,7 +31,6 @@ void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn)
{ {
q->prep_rq_fn = pfn; q->prep_rq_fn = pfn;
} }
EXPORT_SYMBOL(blk_queue_prep_rq); EXPORT_SYMBOL(blk_queue_prep_rq);
/** /**
...@@ -52,14 +53,12 @@ void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn) ...@@ -52,14 +53,12 @@ void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn)
{ {
q->merge_bvec_fn = mbfn; q->merge_bvec_fn = mbfn;
} }
EXPORT_SYMBOL(blk_queue_merge_bvec); EXPORT_SYMBOL(blk_queue_merge_bvec);
void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn) void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn)
{ {
q->softirq_done_fn = fn; q->softirq_done_fn = fn;
} }
EXPORT_SYMBOL(blk_queue_softirq_done); EXPORT_SYMBOL(blk_queue_softirq_done);
/** /**
...@@ -84,7 +83,7 @@ EXPORT_SYMBOL(blk_queue_softirq_done); ...@@ -84,7 +83,7 @@ EXPORT_SYMBOL(blk_queue_softirq_done);
* __bio_kmap_atomic() to get a temporary kernel mapping, or by calling * __bio_kmap_atomic() to get a temporary kernel mapping, or by calling
* blk_queue_bounce() to create a buffer in normal memory. * blk_queue_bounce() to create a buffer in normal memory.
**/ **/
void blk_queue_make_request(struct request_queue * q, make_request_fn * mfn) void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
{ {
/* /*
* set defaults * set defaults
...@@ -93,7 +92,8 @@ void blk_queue_make_request(struct request_queue * q, make_request_fn * mfn) ...@@ -93,7 +92,8 @@ void blk_queue_make_request(struct request_queue * q, make_request_fn * mfn)
blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS); blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS); blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);
q->make_request_fn = mfn; q->make_request_fn = mfn;
q->backing_dev_info.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; q->backing_dev_info.ra_pages =
(VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
q->backing_dev_info.state = 0; q->backing_dev_info.state = 0;
q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY; q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
blk_queue_max_sectors(q, SAFE_MAX_SECTORS); blk_queue_max_sectors(q, SAFE_MAX_SECTORS);
...@@ -117,7 +117,6 @@ void blk_queue_make_request(struct request_queue * q, make_request_fn * mfn) ...@@ -117,7 +117,6 @@ void blk_queue_make_request(struct request_queue * q, make_request_fn * mfn)
*/ */
blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
} }
EXPORT_SYMBOL(blk_queue_make_request); EXPORT_SYMBOL(blk_queue_make_request);
/** /**
...@@ -133,7 +132,7 @@ EXPORT_SYMBOL(blk_queue_make_request); ...@@ -133,7 +132,7 @@ EXPORT_SYMBOL(blk_queue_make_request);
**/ **/
void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr) void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr)
{ {
unsigned long bounce_pfn = dma_addr >> PAGE_SHIFT; unsigned long b_pfn = dma_addr >> PAGE_SHIFT;
int dma = 0; int dma = 0;
q->bounce_gfp = GFP_NOIO; q->bounce_gfp = GFP_NOIO;
...@@ -141,21 +140,20 @@ void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr) ...@@ -141,21 +140,20 @@ void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr)
/* Assume anything <= 4GB can be handled by IOMMU. /* Assume anything <= 4GB can be handled by IOMMU.
Actually some IOMMUs can handle everything, but I don't Actually some IOMMUs can handle everything, but I don't
know of a way to test this here. */ know of a way to test this here. */
if (bounce_pfn < (min_t(u64,0xffffffff,BLK_BOUNCE_HIGH) >> PAGE_SHIFT)) if (b_pfn < (min_t(u64, 0xffffffff, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
dma = 1; dma = 1;
q->bounce_pfn = max_low_pfn; q->bounce_pfn = max_low_pfn;
#else #else
if (bounce_pfn < blk_max_low_pfn) if (b_pfn < blk_max_low_pfn)
dma = 1; dma = 1;
q->bounce_pfn = bounce_pfn; q->bounce_pfn = b_pfn;
#endif #endif
if (dma) { if (dma) {
init_emergency_isa_pool(); init_emergency_isa_pool();
q->bounce_gfp = GFP_NOIO | GFP_DMA; q->bounce_gfp = GFP_NOIO | GFP_DMA;
q->bounce_pfn = bounce_pfn; q->bounce_pfn = b_pfn;
} }
} }
EXPORT_SYMBOL(blk_queue_bounce_limit); EXPORT_SYMBOL(blk_queue_bounce_limit);
/** /**
...@@ -171,7 +169,8 @@ void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors) ...@@ -171,7 +169,8 @@ void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors)
{ {
if ((max_sectors << 9) < PAGE_CACHE_SIZE) { if ((max_sectors << 9) < PAGE_CACHE_SIZE) {
max_sectors = 1 << (PAGE_CACHE_SHIFT - 9); max_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
printk("%s: set to minimum %d\n", __FUNCTION__, max_sectors); printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__,
max_sectors);
} }
if (BLK_DEF_MAX_SECTORS > max_sectors) if (BLK_DEF_MAX_SECTORS > max_sectors)
...@@ -181,7 +180,6 @@ void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors) ...@@ -181,7 +180,6 @@ void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors)
q->max_hw_sectors = max_sectors; q->max_hw_sectors = max_sectors;
} }
} }
EXPORT_SYMBOL(blk_queue_max_sectors); EXPORT_SYMBOL(blk_queue_max_sectors);
/** /**
...@@ -199,12 +197,12 @@ void blk_queue_max_phys_segments(struct request_queue *q, ...@@ -199,12 +197,12 @@ void blk_queue_max_phys_segments(struct request_queue *q,
{ {
if (!max_segments) { if (!max_segments) {
max_segments = 1; max_segments = 1;
printk("%s: set to minimum %d\n", __FUNCTION__, max_segments); printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__,
max_segments);
} }
q->max_phys_segments = max_segments; q->max_phys_segments = max_segments;
} }
EXPORT_SYMBOL(blk_queue_max_phys_segments); EXPORT_SYMBOL(blk_queue_max_phys_segments);
/** /**
...@@ -223,12 +221,12 @@ void blk_queue_max_hw_segments(struct request_queue *q, ...@@ -223,12 +221,12 @@ void blk_queue_max_hw_segments(struct request_queue *q,
{ {
if (!max_segments) { if (!max_segments) {
max_segments = 1; max_segments = 1;
printk("%s: set to minimum %d\n", __FUNCTION__, max_segments); printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__,
max_segments);
} }
q->max_hw_segments = max_segments; q->max_hw_segments = max_segments;
} }
EXPORT_SYMBOL(blk_queue_max_hw_segments); EXPORT_SYMBOL(blk_queue_max_hw_segments);
/** /**
...@@ -244,12 +242,12 @@ void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size) ...@@ -244,12 +242,12 @@ void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
{ {
if (max_size < PAGE_CACHE_SIZE) { if (max_size < PAGE_CACHE_SIZE) {
max_size = PAGE_CACHE_SIZE; max_size = PAGE_CACHE_SIZE;
printk("%s: set to minimum %d\n", __FUNCTION__, max_size); printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__,
max_size);
} }
q->max_segment_size = max_size; q->max_segment_size = max_size;
} }
EXPORT_SYMBOL(blk_queue_max_segment_size); EXPORT_SYMBOL(blk_queue_max_segment_size);
/** /**
...@@ -267,7 +265,6 @@ void blk_queue_hardsect_size(struct request_queue *q, unsigned short size) ...@@ -267,7 +265,6 @@ void blk_queue_hardsect_size(struct request_queue *q, unsigned short size)
{ {
q->hardsect_size = size; q->hardsect_size = size;
} }
EXPORT_SYMBOL(blk_queue_hardsect_size); EXPORT_SYMBOL(blk_queue_hardsect_size);
/* /*
...@@ -283,17 +280,16 @@ EXPORT_SYMBOL(blk_queue_hardsect_size); ...@@ -283,17 +280,16 @@ EXPORT_SYMBOL(blk_queue_hardsect_size);
void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
{ {
/* zero is "infinity" */ /* zero is "infinity" */
t->max_sectors = min_not_zero(t->max_sectors,b->max_sectors); t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
t->max_hw_sectors = min_not_zero(t->max_hw_sectors,b->max_hw_sectors); t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
t->max_phys_segments = min(t->max_phys_segments,b->max_phys_segments); t->max_phys_segments = min(t->max_phys_segments, b->max_phys_segments);
t->max_hw_segments = min(t->max_hw_segments,b->max_hw_segments); t->max_hw_segments = min(t->max_hw_segments, b->max_hw_segments);
t->max_segment_size = min(t->max_segment_size,b->max_segment_size); t->max_segment_size = min(t->max_segment_size, b->max_segment_size);
t->hardsect_size = max(t->hardsect_size,b->hardsect_size); t->hardsect_size = max(t->hardsect_size, b->hardsect_size);
if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags))
clear_bit(QUEUE_FLAG_CLUSTER, &t->queue_flags); clear_bit(QUEUE_FLAG_CLUSTER, &t->queue_flags);
} }
EXPORT_SYMBOL(blk_queue_stack_limits); EXPORT_SYMBOL(blk_queue_stack_limits);
/** /**
...@@ -332,7 +328,6 @@ int blk_queue_dma_drain(struct request_queue *q, void *buf, ...@@ -332,7 +328,6 @@ int blk_queue_dma_drain(struct request_queue *q, void *buf,
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(blk_queue_dma_drain); EXPORT_SYMBOL_GPL(blk_queue_dma_drain);
/** /**
...@@ -344,12 +339,12 @@ void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask) ...@@ -344,12 +339,12 @@ void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
{ {
if (mask < PAGE_CACHE_SIZE - 1) { if (mask < PAGE_CACHE_SIZE - 1) {
mask = PAGE_CACHE_SIZE - 1; mask = PAGE_CACHE_SIZE - 1;
printk("%s: set to minimum %lx\n", __FUNCTION__, mask); printk(KERN_INFO "%s: set to minimum %lx\n", __FUNCTION__,
mask);
} }
q->seg_boundary_mask = mask; q->seg_boundary_mask = mask;
} }
EXPORT_SYMBOL(blk_queue_segment_boundary); EXPORT_SYMBOL(blk_queue_segment_boundary);
/** /**
...@@ -366,7 +361,6 @@ void blk_queue_dma_alignment(struct request_queue *q, int mask) ...@@ -366,7 +361,6 @@ void blk_queue_dma_alignment(struct request_queue *q, int mask)
{ {
q->dma_alignment = mask; q->dma_alignment = mask;
} }
EXPORT_SYMBOL(blk_queue_dma_alignment); EXPORT_SYMBOL(blk_queue_dma_alignment);
/** /**
...@@ -390,7 +384,6 @@ void blk_queue_update_dma_alignment(struct request_queue *q, int mask) ...@@ -390,7 +384,6 @@ void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
if (mask > q->dma_alignment) if (mask > q->dma_alignment)
q->dma_alignment = mask; q->dma_alignment = mask;
} }
EXPORT_SYMBOL(blk_queue_update_dma_alignment); EXPORT_SYMBOL(blk_queue_update_dma_alignment);
int __init blk_settings_init(void) int __init blk_settings_init(void)
......
...@@ -207,12 +207,13 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr, ...@@ -207,12 +207,13 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
const char *page, size_t length) const char *page, size_t length)
{ {
struct queue_sysfs_entry *entry = to_queue(attr); struct queue_sysfs_entry *entry = to_queue(attr);
struct request_queue *q = container_of(kobj, struct request_queue, kobj); struct request_queue *q;
ssize_t res; ssize_t res;
if (!entry->store) if (!entry->store)
return -EIO; return -EIO;
q = container_of(kobj, struct request_queue, kobj);
mutex_lock(&q->sysfs_lock); mutex_lock(&q->sysfs_lock);
if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) { if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) {
mutex_unlock(&q->sysfs_lock); mutex_unlock(&q->sysfs_lock);
......
...@@ -21,7 +21,6 @@ struct request *blk_queue_find_tag(struct request_queue *q, int tag) ...@@ -21,7 +21,6 @@ struct request *blk_queue_find_tag(struct request_queue *q, int tag)
{ {
return blk_map_queue_find_tag(q->queue_tags, tag); return blk_map_queue_find_tag(q->queue_tags, tag);
} }
EXPORT_SYMBOL(blk_queue_find_tag); EXPORT_SYMBOL(blk_queue_find_tag);
/** /**
...@@ -99,7 +98,6 @@ void blk_queue_free_tags(struct request_queue *q) ...@@ -99,7 +98,6 @@ void blk_queue_free_tags(struct request_queue *q)
{ {
clear_bit(QUEUE_FLAG_QUEUED, &q->queue_flags); clear_bit(QUEUE_FLAG_QUEUED, &q->queue_flags);
} }
EXPORT_SYMBOL(blk_queue_free_tags); EXPORT_SYMBOL(blk_queue_free_tags);
static int static int
...@@ -185,7 +183,8 @@ int blk_queue_init_tags(struct request_queue *q, int depth, ...@@ -185,7 +183,8 @@ int blk_queue_init_tags(struct request_queue *q, int depth,
if (!tags) if (!tags)
goto fail; goto fail;
} else if (q->queue_tags) { } else if (q->queue_tags) {
if ((rc = blk_queue_resize_tags(q, depth))) rc = blk_queue_resize_tags(q, depth);
if (rc)
return rc; return rc;
set_bit(QUEUE_FLAG_QUEUED, &q->queue_flags); set_bit(QUEUE_FLAG_QUEUED, &q->queue_flags);
return 0; return 0;
...@@ -203,7 +202,6 @@ int blk_queue_init_tags(struct request_queue *q, int depth, ...@@ -203,7 +202,6 @@ int blk_queue_init_tags(struct request_queue *q, int depth,
kfree(tags); kfree(tags);
return -ENOMEM; return -ENOMEM;
} }
EXPORT_SYMBOL(blk_queue_init_tags); EXPORT_SYMBOL(blk_queue_init_tags);
/** /**
...@@ -260,7 +258,6 @@ int blk_queue_resize_tags(struct request_queue *q, int new_depth) ...@@ -260,7 +258,6 @@ int blk_queue_resize_tags(struct request_queue *q, int new_depth)
kfree(tag_map); kfree(tag_map);
return 0; return 0;
} }
EXPORT_SYMBOL(blk_queue_resize_tags); EXPORT_SYMBOL(blk_queue_resize_tags);
/** /**
...@@ -313,7 +310,6 @@ void blk_queue_end_tag(struct request_queue *q, struct request *rq) ...@@ -313,7 +310,6 @@ void blk_queue_end_tag(struct request_queue *q, struct request *rq)
clear_bit_unlock(tag, bqt->tag_map); clear_bit_unlock(tag, bqt->tag_map);
bqt->busy--; bqt->busy--;
} }
EXPORT_SYMBOL(blk_queue_end_tag); EXPORT_SYMBOL(blk_queue_end_tag);
/** /**
...@@ -340,7 +336,7 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq) ...@@ -340,7 +336,7 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
int tag; int tag;
if (unlikely((rq->cmd_flags & REQ_QUEUED))) { if (unlikely((rq->cmd_flags & REQ_QUEUED))) {
printk(KERN_ERR printk(KERN_ERR
"%s: request %p for device [%s] already tagged %d", "%s: request %p for device [%s] already tagged %d",
__FUNCTION__, rq, __FUNCTION__, rq,
rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag); rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag);
...@@ -370,7 +366,6 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq) ...@@ -370,7 +366,6 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
bqt->busy++; bqt->busy++;
return 0; return 0;
} }
EXPORT_SYMBOL(blk_queue_start_tag); EXPORT_SYMBOL(blk_queue_start_tag);
/** /**
...@@ -392,5 +387,4 @@ void blk_queue_invalidate_tags(struct request_queue *q) ...@@ -392,5 +387,4 @@ void blk_queue_invalidate_tags(struct request_queue *q)
list_for_each_safe(tmp, n, &q->tag_busy_list) list_for_each_safe(tmp, n, &q->tag_busy_list)
blk_requeue_request(q, list_entry_rq(tmp)); blk_requeue_request(q, list_entry_rq(tmp));
} }
EXPORT_SYMBOL(blk_queue_invalidate_tags); EXPORT_SYMBOL(blk_queue_invalidate_tags);
...@@ -15,11 +15,13 @@ ...@@ -15,11 +15,13 @@
/* /*
* tunables * tunables
*/ */
static const int cfq_quantum = 4; /* max queue in one round of service */ /* max queue in one round of service */
static const int cfq_quantum = 4;
static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 }; static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
static const int cfq_back_max = 16 * 1024; /* maximum backwards seek, in KiB */ /* maximum backwards seek, in KiB */
static const int cfq_back_penalty = 2; /* penalty of a backwards seek */ static const int cfq_back_max = 16 * 1024;
/* penalty of a backwards seek */
static const int cfq_back_penalty = 2;
static const int cfq_slice_sync = HZ / 10; static const int cfq_slice_sync = HZ / 10;
static int cfq_slice_async = HZ / 25; static int cfq_slice_async = HZ / 25;
static const int cfq_slice_async_rq = 2; static const int cfq_slice_async_rq = 2;
...@@ -37,7 +39,8 @@ static int cfq_slice_idle = HZ / 125; ...@@ -37,7 +39,8 @@ static int cfq_slice_idle = HZ / 125;
#define CFQ_SLICE_SCALE (5) #define CFQ_SLICE_SCALE (5)
#define RQ_CIC(rq) ((struct cfq_io_context*)(rq)->elevator_private) #define RQ_CIC(rq) \
((struct cfq_io_context *) (rq)->elevator_private)
#define RQ_CFQQ(rq) ((rq)->elevator_private2) #define RQ_CFQQ(rq) ((rq)->elevator_private2)
static struct kmem_cache *cfq_pool; static struct kmem_cache *cfq_pool;
...@@ -171,15 +174,15 @@ enum cfqq_state_flags { ...@@ -171,15 +174,15 @@ enum cfqq_state_flags {
#define CFQ_CFQQ_FNS(name) \ #define CFQ_CFQQ_FNS(name) \
static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq) \ static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq) \
{ \ { \
cfqq->flags |= (1 << CFQ_CFQQ_FLAG_##name); \ (cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name); \
} \ } \
static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq) \ static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq) \
{ \ { \
cfqq->flags &= ~(1 << CFQ_CFQQ_FLAG_##name); \ (cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name); \
} \ } \
static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq) \ static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq) \
{ \ { \
return (cfqq->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0; \ return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0; \
} }
CFQ_CFQQ_FNS(on_rr); CFQ_CFQQ_FNS(on_rr);
...@@ -1005,7 +1008,8 @@ __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq, ...@@ -1005,7 +1008,8 @@ __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
/* /*
* follow expired path, else get first next available * follow expired path, else get first next available
*/ */
if ((rq = cfq_check_fifo(cfqq)) == NULL) rq = cfq_check_fifo(cfqq);
if (rq == NULL)
rq = cfqq->next_rq; rq = cfqq->next_rq;
/* /*
...@@ -1294,28 +1298,28 @@ static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc) ...@@ -1294,28 +1298,28 @@ static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc)
ioprio_class = IOPRIO_PRIO_CLASS(ioc->ioprio); ioprio_class = IOPRIO_PRIO_CLASS(ioc->ioprio);
switch (ioprio_class) { switch (ioprio_class) {
default: default:
printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class); printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
case IOPRIO_CLASS_NONE: case IOPRIO_CLASS_NONE:
/* /*
* no prio set, place us in the middle of the BE classes * no prio set, place us in the middle of the BE classes
*/ */
cfqq->ioprio = task_nice_ioprio(tsk); cfqq->ioprio = task_nice_ioprio(tsk);
cfqq->ioprio_class = IOPRIO_CLASS_BE; cfqq->ioprio_class = IOPRIO_CLASS_BE;
break; break;
case IOPRIO_CLASS_RT: case IOPRIO_CLASS_RT:
cfqq->ioprio = task_ioprio(ioc); cfqq->ioprio = task_ioprio(ioc);
cfqq->ioprio_class = IOPRIO_CLASS_RT; cfqq->ioprio_class = IOPRIO_CLASS_RT;
break; break;
case IOPRIO_CLASS_BE: case IOPRIO_CLASS_BE:
cfqq->ioprio = task_ioprio(ioc); cfqq->ioprio = task_ioprio(ioc);
cfqq->ioprio_class = IOPRIO_CLASS_BE; cfqq->ioprio_class = IOPRIO_CLASS_BE;
break; break;
case IOPRIO_CLASS_IDLE: case IOPRIO_CLASS_IDLE:
cfqq->ioprio_class = IOPRIO_CLASS_IDLE; cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
cfqq->ioprio = 7; cfqq->ioprio = 7;
cfq_clear_cfqq_idle_window(cfqq); cfq_clear_cfqq_idle_window(cfqq);
break; break;
} }
/* /*
...@@ -1427,7 +1431,7 @@ cfq_find_alloc_queue(struct cfq_data *cfqd, int is_sync, ...@@ -1427,7 +1431,7 @@ cfq_find_alloc_queue(struct cfq_data *cfqd, int is_sync,
static struct cfq_queue ** static struct cfq_queue **
cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio) cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
{ {
switch(ioprio_class) { switch (ioprio_class) {
case IOPRIO_CLASS_RT: case IOPRIO_CLASS_RT:
return &cfqd->async_cfqq[0][ioprio]; return &cfqd->async_cfqq[0][ioprio];
case IOPRIO_CLASS_BE: case IOPRIO_CLASS_BE:
...@@ -2018,7 +2022,8 @@ static void cfq_idle_slice_timer(unsigned long data) ...@@ -2018,7 +2022,8 @@ static void cfq_idle_slice_timer(unsigned long data)
spin_lock_irqsave(cfqd->queue->queue_lock, flags); spin_lock_irqsave(cfqd->queue->queue_lock, flags);
if ((cfqq = cfqd->active_queue) != NULL) { cfqq = cfqd->active_queue;
if (cfqq) {
timed_out = 0; timed_out = 0;
/* /*
...@@ -2212,14 +2217,18 @@ static ssize_t __FUNC(elevator_t *e, const char *page, size_t count) \ ...@@ -2212,14 +2217,18 @@ static ssize_t __FUNC(elevator_t *e, const char *page, size_t count) \
return ret; \ return ret; \
} }
STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0); STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1, UINT_MAX, 1); STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1,
STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1, UINT_MAX, 1); UINT_MAX, 1);
STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1,
UINT_MAX, 1);
STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0); STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1, UINT_MAX, 0); STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
UINT_MAX, 0);
STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1); STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1); STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1); STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, UINT_MAX, 0); STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
UINT_MAX, 0);
#undef STORE_FUNCTION #undef STORE_FUNCTION
#define CFQ_ATTR(name) \ #define CFQ_ATTR(name) \
......
...@@ -45,7 +45,8 @@ static LIST_HEAD(elv_list); ...@@ -45,7 +45,8 @@ static LIST_HEAD(elv_list);
*/ */
static const int elv_hash_shift = 6; static const int elv_hash_shift = 6;
#define ELV_HASH_BLOCK(sec) ((sec) >> 3) #define ELV_HASH_BLOCK(sec) ((sec) >> 3)
#define ELV_HASH_FN(sec) (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift)) #define ELV_HASH_FN(sec) \
(hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift))
#define ELV_HASH_ENTRIES (1 << elv_hash_shift) #define ELV_HASH_ENTRIES (1 << elv_hash_shift)
#define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors) #define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors)
#define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash)) #define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash))
...@@ -224,15 +225,27 @@ int elevator_init(struct request_queue *q, char *name) ...@@ -224,15 +225,27 @@ int elevator_init(struct request_queue *q, char *name)
q->end_sector = 0; q->end_sector = 0;
q->boundary_rq = NULL; q->boundary_rq = NULL;
if (name && !(e = elevator_get(name))) if (name) {
return -EINVAL; e = elevator_get(name);
if (!e)
return -EINVAL;
}
if (!e && *chosen_elevator && !(e = elevator_get(chosen_elevator))) if (!e && *chosen_elevator) {
printk("I/O scheduler %s not found\n", chosen_elevator); e = elevator_get(chosen_elevator);
if (!e)
printk(KERN_ERR "I/O scheduler %s not found\n",
chosen_elevator);
}
if (!e && !(e = elevator_get(CONFIG_DEFAULT_IOSCHED))) { if (!e) {
printk("Default I/O scheduler not found, using no-op\n"); e = elevator_get(CONFIG_DEFAULT_IOSCHED);
e = elevator_get("noop"); if (!e) {
printk(KERN_ERR
"Default I/O scheduler not found. " \
"Using noop.\n");
e = elevator_get("noop");
}
} }
eq = elevator_alloc(q, e); eq = elevator_alloc(q, e);
...@@ -248,7 +261,6 @@ int elevator_init(struct request_queue *q, char *name) ...@@ -248,7 +261,6 @@ int elevator_init(struct request_queue *q, char *name)
elevator_attach(q, eq, data); elevator_attach(q, eq, data);
return ret; return ret;
} }
EXPORT_SYMBOL(elevator_init); EXPORT_SYMBOL(elevator_init);
void elevator_exit(elevator_t *e) void elevator_exit(elevator_t *e)
...@@ -261,7 +273,6 @@ void elevator_exit(elevator_t *e) ...@@ -261,7 +273,6 @@ void elevator_exit(elevator_t *e)
kobject_put(&e->kobj); kobject_put(&e->kobj);
} }
EXPORT_SYMBOL(elevator_exit); EXPORT_SYMBOL(elevator_exit);
static void elv_activate_rq(struct request_queue *q, struct request *rq) static void elv_activate_rq(struct request_queue *q, struct request *rq)
...@@ -353,7 +364,6 @@ struct request *elv_rb_add(struct rb_root *root, struct request *rq) ...@@ -353,7 +364,6 @@ struct request *elv_rb_add(struct rb_root *root, struct request *rq)
rb_insert_color(&rq->rb_node, root); rb_insert_color(&rq->rb_node, root);
return NULL; return NULL;
} }
EXPORT_SYMBOL(elv_rb_add); EXPORT_SYMBOL(elv_rb_add);
void elv_rb_del(struct rb_root *root, struct request *rq) void elv_rb_del(struct rb_root *root, struct request *rq)
...@@ -362,7 +372,6 @@ void elv_rb_del(struct rb_root *root, struct request *rq) ...@@ -362,7 +372,6 @@ void elv_rb_del(struct rb_root *root, struct request *rq)
rb_erase(&rq->rb_node, root); rb_erase(&rq->rb_node, root);
RB_CLEAR_NODE(&rq->rb_node); RB_CLEAR_NODE(&rq->rb_node);
} }
EXPORT_SYMBOL(elv_rb_del); EXPORT_SYMBOL(elv_rb_del);
struct request *elv_rb_find(struct rb_root *root, sector_t sector) struct request *elv_rb_find(struct rb_root *root, sector_t sector)
...@@ -383,7 +392,6 @@ struct request *elv_rb_find(struct rb_root *root, sector_t sector) ...@@ -383,7 +392,6 @@ struct request *elv_rb_find(struct rb_root *root, sector_t sector)
return NULL; return NULL;
} }
EXPORT_SYMBOL(elv_rb_find); EXPORT_SYMBOL(elv_rb_find);
/* /*
...@@ -395,6 +403,7 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq) ...@@ -395,6 +403,7 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
{ {
sector_t boundary; sector_t boundary;
struct list_head *entry; struct list_head *entry;
int stop_flags;
if (q->last_merge == rq) if (q->last_merge == rq)
q->last_merge = NULL; q->last_merge = NULL;
...@@ -404,13 +413,13 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq) ...@@ -404,13 +413,13 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
q->nr_sorted--; q->nr_sorted--;
boundary = q->end_sector; boundary = q->end_sector;
stop_flags = REQ_SOFTBARRIER | REQ_HARDBARRIER | REQ_STARTED;
list_for_each_prev(entry, &q->queue_head) { list_for_each_prev(entry, &q->queue_head) {
struct request *pos = list_entry_rq(entry); struct request *pos = list_entry_rq(entry);
if (rq_data_dir(rq) != rq_data_dir(pos)) if (rq_data_dir(rq) != rq_data_dir(pos))
break; break;
if (pos->cmd_flags & (REQ_SOFTBARRIER|REQ_HARDBARRIER|REQ_STARTED)) if (pos->cmd_flags & stop_flags)
break; break;
if (rq->sector >= boundary) { if (rq->sector >= boundary) {
if (pos->sector < boundary) if (pos->sector < boundary)
...@@ -425,7 +434,6 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq) ...@@ -425,7 +434,6 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
list_add(&rq->queuelist, entry); list_add(&rq->queuelist, entry);
} }
EXPORT_SYMBOL(elv_dispatch_sort); EXPORT_SYMBOL(elv_dispatch_sort);
/* /*
...@@ -446,7 +454,6 @@ void elv_dispatch_add_tail(struct request_queue *q, struct request *rq) ...@@ -446,7 +454,6 @@ void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
q->boundary_rq = rq; q->boundary_rq = rq;
list_add_tail(&rq->queuelist, &q->queue_head); list_add_tail(&rq->queuelist, &q->queue_head);
} }
EXPORT_SYMBOL(elv_dispatch_add_tail); EXPORT_SYMBOL(elv_dispatch_add_tail);
int elv_merge(struct request_queue *q, struct request **req, struct bio *bio) int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
...@@ -665,7 +672,8 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where, ...@@ -665,7 +672,8 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where,
q->end_sector = rq_end_sector(rq); q->end_sector = rq_end_sector(rq);
q->boundary_rq = rq; q->boundary_rq = rq;
} }
} else if (!(rq->cmd_flags & REQ_ELVPRIV) && where == ELEVATOR_INSERT_SORT) } else if (!(rq->cmd_flags & REQ_ELVPRIV) &&
where == ELEVATOR_INSERT_SORT)
where = ELEVATOR_INSERT_BACK; where = ELEVATOR_INSERT_BACK;
if (plug) if (plug)
...@@ -673,7 +681,6 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where, ...@@ -673,7 +681,6 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where,
elv_insert(q, rq, where); elv_insert(q, rq, where);
} }
EXPORT_SYMBOL(__elv_add_request); EXPORT_SYMBOL(__elv_add_request);
void elv_add_request(struct request_queue *q, struct request *rq, int where, void elv_add_request(struct request_queue *q, struct request *rq, int where,
...@@ -685,7 +692,6 @@ void elv_add_request(struct request_queue *q, struct request *rq, int where, ...@@ -685,7 +692,6 @@ void elv_add_request(struct request_queue *q, struct request *rq, int where,
__elv_add_request(q, rq, where, plug); __elv_add_request(q, rq, where, plug);
spin_unlock_irqrestore(q->queue_lock, flags); spin_unlock_irqrestore(q->queue_lock, flags);
} }
EXPORT_SYMBOL(elv_add_request); EXPORT_SYMBOL(elv_add_request);
static inline struct request *__elv_next_request(struct request_queue *q) static inline struct request *__elv_next_request(struct request_queue *q)
...@@ -792,7 +798,6 @@ struct request *elv_next_request(struct request_queue *q) ...@@ -792,7 +798,6 @@ struct request *elv_next_request(struct request_queue *q)
return rq; return rq;
} }
EXPORT_SYMBOL(elv_next_request); EXPORT_SYMBOL(elv_next_request);
void elv_dequeue_request(struct request_queue *q, struct request *rq) void elv_dequeue_request(struct request_queue *q, struct request *rq)
...@@ -810,7 +815,6 @@ void elv_dequeue_request(struct request_queue *q, struct request *rq) ...@@ -810,7 +815,6 @@ void elv_dequeue_request(struct request_queue *q, struct request *rq)
if (blk_account_rq(rq)) if (blk_account_rq(rq))
q->in_flight++; q->in_flight++;
} }
EXPORT_SYMBOL(elv_dequeue_request); EXPORT_SYMBOL(elv_dequeue_request);
int elv_queue_empty(struct request_queue *q) int elv_queue_empty(struct request_queue *q)
...@@ -825,7 +829,6 @@ int elv_queue_empty(struct request_queue *q) ...@@ -825,7 +829,6 @@ int elv_queue_empty(struct request_queue *q)
return 1; return 1;
} }
EXPORT_SYMBOL(elv_queue_empty); EXPORT_SYMBOL(elv_queue_empty);
struct request *elv_latter_request(struct request_queue *q, struct request *rq) struct request *elv_latter_request(struct request_queue *q, struct request *rq)
...@@ -994,7 +997,8 @@ void elv_register(struct elevator_type *e) ...@@ -994,7 +997,8 @@ void elv_register(struct elevator_type *e)
!strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED))) !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED)))
def = " (default)"; def = " (default)";
printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name, def); printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name,
def);
} }
EXPORT_SYMBOL_GPL(elv_register); EXPORT_SYMBOL_GPL(elv_register);
...@@ -1126,7 +1130,8 @@ ssize_t elv_iosched_store(struct request_queue *q, const char *name, ...@@ -1126,7 +1130,8 @@ ssize_t elv_iosched_store(struct request_queue *q, const char *name,
} }
if (!elevator_switch(q, e)) if (!elevator_switch(q, e))
printk(KERN_ERR "elevator: switch to %s failed\n",elevator_name); printk(KERN_ERR "elevator: switch to %s failed\n",
elevator_name);
return count; return count;
} }
...@@ -1160,7 +1165,6 @@ struct request *elv_rb_former_request(struct request_queue *q, ...@@ -1160,7 +1165,6 @@ struct request *elv_rb_former_request(struct request_queue *q,
return NULL; return NULL;
} }
EXPORT_SYMBOL(elv_rb_former_request); EXPORT_SYMBOL(elv_rb_former_request);
struct request *elv_rb_latter_request(struct request_queue *q, struct request *elv_rb_latter_request(struct request_queue *q,
...@@ -1173,5 +1177,4 @@ struct request *elv_rb_latter_request(struct request_queue *q, ...@@ -1173,5 +1177,4 @@ struct request *elv_rb_latter_request(struct request_queue *q,
return NULL; return NULL;
} }
EXPORT_SYMBOL(elv_rb_latter_request); EXPORT_SYMBOL(elv_rb_latter_request);
...@@ -732,7 +732,7 @@ static struct vio_driver_ops vdc_vio_ops = { ...@@ -732,7 +732,7 @@ static struct vio_driver_ops vdc_vio_ops = {
.handshake_complete = vdc_handshake_complete, .handshake_complete = vdc_handshake_complete,
}; };
static void print_version(void) static void __devinit print_version(void)
{ {
static int version_printed; static int version_printed;
......
...@@ -667,8 +667,6 @@ void add_disk_randomness(struct gendisk *disk) ...@@ -667,8 +667,6 @@ void add_disk_randomness(struct gendisk *disk)
add_timer_randomness(disk->random, add_timer_randomness(disk->random,
0x100 + MKDEV(disk->major, disk->first_minor)); 0x100 + MKDEV(disk->major, disk->first_minor));
} }
EXPORT_SYMBOL(add_disk_randomness);
#endif #endif
#define EXTRACT_SIZE 10 #define EXTRACT_SIZE 10
......
...@@ -1722,7 +1722,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive) ...@@ -1722,7 +1722,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
*/ */
if ((stat & DRQ_STAT) == 0) { if ((stat & DRQ_STAT) == 0) {
spin_lock_irqsave(&ide_lock, flags); spin_lock_irqsave(&ide_lock, flags);
if (__blk_end_request(rq, 0, 0)) if (__blk_end_request(rq, 0, rq->data_len))
BUG(); BUG();
HWGROUP(drive)->rq = NULL; HWGROUP(drive)->rq = NULL;
spin_unlock_irqrestore(&ide_lock, flags); spin_unlock_irqrestore(&ide_lock, flags);
......
...@@ -1033,9 +1033,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd, ...@@ -1033,9 +1033,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
done: done:
pipe->nrbufs = pipe->curbuf = 0; pipe->nrbufs = pipe->curbuf = 0;
if (bytes > 0) file_accessed(in);
file_accessed(in);
return bytes; return bytes;
out_release: out_release:
......
...@@ -39,7 +39,6 @@ void exit_io_context(void); ...@@ -39,7 +39,6 @@ void exit_io_context(void);
struct io_context *get_io_context(gfp_t gfp_flags, int node); struct io_context *get_io_context(gfp_t gfp_flags, int node);
struct io_context *alloc_io_context(gfp_t gfp_flags, int node); struct io_context *alloc_io_context(gfp_t gfp_flags, int node);
void copy_io_context(struct io_context **pdst, struct io_context **psrc); void copy_io_context(struct io_context **pdst, struct io_context **psrc);
void swap_io_context(struct io_context **ioc1, struct io_context **ioc2);
struct request; struct request;
typedef void (rq_end_io_fn)(struct request *, int); typedef void (rq_end_io_fn)(struct request *, int);
...@@ -655,15 +654,18 @@ static inline void blk_run_address_space(struct address_space *mapping) ...@@ -655,15 +654,18 @@ static inline void blk_run_address_space(struct address_space *mapping)
* blk_end_request() for parts of the original function. * blk_end_request() for parts of the original function.
* This prevents code duplication in drivers. * This prevents code duplication in drivers.
*/ */
extern int blk_end_request(struct request *rq, int error, int nr_bytes); extern int blk_end_request(struct request *rq, int error,
extern int __blk_end_request(struct request *rq, int error, int nr_bytes); unsigned int nr_bytes);
extern int blk_end_bidi_request(struct request *rq, int error, int nr_bytes, extern int __blk_end_request(struct request *rq, int error,
int bidi_bytes); unsigned int nr_bytes);
extern int blk_end_bidi_request(struct request *rq, int error,
unsigned int nr_bytes, unsigned int bidi_bytes);
extern void end_request(struct request *, int); extern void end_request(struct request *, int);
extern void end_queued_request(struct request *, int); extern void end_queued_request(struct request *, int);
extern void end_dequeued_request(struct request *, int); extern void end_dequeued_request(struct request *, int);
extern int blk_end_request_callback(struct request *rq, int error, int nr_bytes, extern int blk_end_request_callback(struct request *rq, int error,
int (drv_callback)(struct request *)); unsigned int nr_bytes,
int (drv_callback)(struct request *));
extern void blk_complete_request(struct request *); extern void blk_complete_request(struct request *);
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment