Commit 46040967 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://brick.kernel.dk/data/git/linux-2.6-block

* 'for-linus' of git://brick.kernel.dk/data/git/linux-2.6-block:
  [PATCH] block: document io scheduler allow_merge_fn hook
  [PATCH] cfq-iosched: don't allow sync merges across queues
  [PATCH] Fixup blk_rq_unmap_user() API
  [PATCH] __blk_rq_unmap_user() fails to return error
  [PATCH] __blk_rq_map_user() doesn't need to grab the queue_lock
  [PATCH] Remove queue merging hooks
  [PATCH] ->nr_sectors and ->hard_nr_sectors are not used for BLOCK_PC requests
  [PATCH] cciss: fix XFER_READ/XFER_WRITE in do_cciss_request
  [PATCH] cciss: set default raid level when reading geometry fails
parents 8df8bb4a 126ec9a6
...@@ -946,6 +946,13 @@ elevator_merged_fn called when a request in the scheduler has been ...@@ -946,6 +946,13 @@ elevator_merged_fn called when a request in the scheduler has been
scheduler for example, to reposition the request scheduler for example, to reposition the request
if its sorting order has changed. if its sorting order has changed.
elevator_allow_merge_fn called whenever the block layer determines
that a bio can be merged into an existing
request safely. The io scheduler may still
want to stop a merge at this point if it
results in some sort of conflict internally,
this hook allows it to do that.
elevator_dispatch_fn fills the dispatch queue with ready requests. elevator_dispatch_fn fills the dispatch queue with ready requests.
I/O schedulers are free to postpone requests by I/O schedulers are free to postpone requests by
not filling the dispatch queue unless @force not filling the dispatch queue unless @force
......
...@@ -568,6 +568,38 @@ cfq_merged_requests(request_queue_t *q, struct request *rq, ...@@ -568,6 +568,38 @@ cfq_merged_requests(request_queue_t *q, struct request *rq,
cfq_remove_request(next); cfq_remove_request(next);
} }
static int cfq_allow_merge(request_queue_t *q, struct request *rq,
struct bio *bio)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
const int rw = bio_data_dir(bio);
struct cfq_queue *cfqq;
pid_t key;
/*
* If bio is async or a write, always allow merge
*/
if (!bio_sync(bio) || rw == WRITE)
return 1;
/*
* bio is sync. if request is not, disallow.
*/
if (!rq_is_sync(rq))
return 0;
/*
* Ok, both bio and request are sync. Allow merge if they are
* from the same queue.
*/
key = cfq_queue_pid(current, rw, 1);
cfqq = cfq_find_cfq_hash(cfqd, key, current->ioprio);
if (cfqq != RQ_CFQQ(rq))
return 0;
return 1;
}
static inline void static inline void
__cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) __cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{ {
...@@ -2125,6 +2157,7 @@ static struct elevator_type iosched_cfq = { ...@@ -2125,6 +2157,7 @@ static struct elevator_type iosched_cfq = {
.elevator_merge_fn = cfq_merge, .elevator_merge_fn = cfq_merge,
.elevator_merged_fn = cfq_merged_request, .elevator_merged_fn = cfq_merged_request,
.elevator_merge_req_fn = cfq_merged_requests, .elevator_merge_req_fn = cfq_merged_requests,
.elevator_allow_merge_fn = cfq_allow_merge,
.elevator_dispatch_fn = cfq_dispatch_requests, .elevator_dispatch_fn = cfq_dispatch_requests,
.elevator_add_req_fn = cfq_insert_request, .elevator_add_req_fn = cfq_insert_request,
.elevator_activate_req_fn = cfq_activate_request, .elevator_activate_req_fn = cfq_activate_request,
......
...@@ -50,6 +50,21 @@ static const int elv_hash_shift = 6; ...@@ -50,6 +50,21 @@ static const int elv_hash_shift = 6;
#define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors) #define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors)
#define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash)) #define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash))
/*
* Query io scheduler to see if the current process issuing bio may be
* merged with rq.
*/
static int elv_iosched_allow_merge(struct request *rq, struct bio *bio)
{
request_queue_t *q = rq->q;
elevator_t *e = q->elevator;
if (e->ops->elevator_allow_merge_fn)
return e->ops->elevator_allow_merge_fn(q, rq, bio);
return 1;
}
/* /*
* can we safely merge with this request? * can we safely merge with this request?
*/ */
...@@ -65,12 +80,15 @@ inline int elv_rq_merge_ok(struct request *rq, struct bio *bio) ...@@ -65,12 +80,15 @@ inline int elv_rq_merge_ok(struct request *rq, struct bio *bio)
return 0; return 0;
/* /*
* same device and no special stuff set, merge is ok * must be same device and not a special request
*/ */
if (rq->rq_disk == bio->bi_bdev->bd_disk && !rq->special) if (rq->rq_disk != bio->bi_bdev->bd_disk || !rq->special)
return 1; return 0;
return 0; if (!elv_iosched_allow_merge(rq, bio))
return 0;
return 1;
} }
EXPORT_SYMBOL(elv_rq_merge_ok); EXPORT_SYMBOL(elv_rq_merge_ok);
......
...@@ -1405,8 +1405,7 @@ static inline int ll_new_hw_segment(request_queue_t *q, ...@@ -1405,8 +1405,7 @@ static inline int ll_new_hw_segment(request_queue_t *q,
return 1; return 1;
} }
static int ll_back_merge_fn(request_queue_t *q, struct request *req, int ll_back_merge_fn(request_queue_t *q, struct request *req, struct bio *bio)
struct bio *bio)
{ {
unsigned short max_sectors; unsigned short max_sectors;
int len; int len;
...@@ -1442,6 +1441,7 @@ static int ll_back_merge_fn(request_queue_t *q, struct request *req, ...@@ -1442,6 +1441,7 @@ static int ll_back_merge_fn(request_queue_t *q, struct request *req,
return ll_new_hw_segment(q, req, bio); return ll_new_hw_segment(q, req, bio);
} }
EXPORT_SYMBOL(ll_back_merge_fn);
static int ll_front_merge_fn(request_queue_t *q, struct request *req, static int ll_front_merge_fn(request_queue_t *q, struct request *req,
struct bio *bio) struct bio *bio)
...@@ -1912,9 +1912,6 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) ...@@ -1912,9 +1912,6 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
} }
q->request_fn = rfn; q->request_fn = rfn;
q->back_merge_fn = ll_back_merge_fn;
q->front_merge_fn = ll_front_merge_fn;
q->merge_requests_fn = ll_merge_requests_fn;
q->prep_rq_fn = NULL; q->prep_rq_fn = NULL;
q->unplug_fn = generic_unplug_device; q->unplug_fn = generic_unplug_device;
q->queue_flags = (1 << QUEUE_FLAG_CLUSTER); q->queue_flags = (1 << QUEUE_FLAG_CLUSTER);
...@@ -2350,40 +2347,29 @@ static int __blk_rq_map_user(request_queue_t *q, struct request *rq, ...@@ -2350,40 +2347,29 @@ static int __blk_rq_map_user(request_queue_t *q, struct request *rq,
else else
bio = bio_copy_user(q, uaddr, len, reading); bio = bio_copy_user(q, uaddr, len, reading);
if (IS_ERR(bio)) { if (IS_ERR(bio))
return PTR_ERR(bio); return PTR_ERR(bio);
}
orig_bio = bio; orig_bio = bio;
blk_queue_bounce(q, &bio); blk_queue_bounce(q, &bio);
/* /*
* We link the bounce buffer in and could have to traverse it * We link the bounce buffer in and could have to traverse it
* later so we have to get a ref to prevent it from being freed * later so we have to get a ref to prevent it from being freed
*/ */
bio_get(bio); bio_get(bio);
/*
* for most (all? don't know of any) queues we could
* skip grabbing the queue lock here. only drivers with
* funky private ->back_merge_fn() function could be
* problematic.
*/
spin_lock_irq(q->queue_lock);
if (!rq->bio) if (!rq->bio)
blk_rq_bio_prep(q, rq, bio); blk_rq_bio_prep(q, rq, bio);
else if (!q->back_merge_fn(q, rq, bio)) { else if (!ll_back_merge_fn(q, rq, bio)) {
ret = -EINVAL; ret = -EINVAL;
spin_unlock_irq(q->queue_lock);
goto unmap_bio; goto unmap_bio;
} else { } else {
rq->biotail->bi_next = bio; rq->biotail->bi_next = bio;
rq->biotail = bio; rq->biotail = bio;
rq->nr_sectors += bio_sectors(bio);
rq->hard_nr_sectors = rq->nr_sectors;
rq->data_len += bio->bi_size; rq->data_len += bio->bi_size;
} }
spin_unlock_irq(q->queue_lock);
return bio->bi_size; return bio->bi_size;
...@@ -2419,6 +2405,7 @@ int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf, ...@@ -2419,6 +2405,7 @@ int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf,
unsigned long len) unsigned long len)
{ {
unsigned long bytes_read = 0; unsigned long bytes_read = 0;
struct bio *bio = NULL;
int ret; int ret;
if (len > (q->max_hw_sectors << 9)) if (len > (q->max_hw_sectors << 9))
...@@ -2445,6 +2432,8 @@ int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf, ...@@ -2445,6 +2432,8 @@ int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf,
ret = __blk_rq_map_user(q, rq, ubuf, map_len); ret = __blk_rq_map_user(q, rq, ubuf, map_len);
if (ret < 0) if (ret < 0)
goto unmap_rq; goto unmap_rq;
if (!bio)
bio = rq->bio;
bytes_read += ret; bytes_read += ret;
ubuf += ret; ubuf += ret;
} }
...@@ -2452,7 +2441,7 @@ int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf, ...@@ -2452,7 +2441,7 @@ int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf,
rq->buffer = rq->data = NULL; rq->buffer = rq->data = NULL;
return 0; return 0;
unmap_rq: unmap_rq:
blk_rq_unmap_user(rq); blk_rq_unmap_user(bio);
return ret; return ret;
} }
...@@ -2509,27 +2498,33 @@ EXPORT_SYMBOL(blk_rq_map_user_iov); ...@@ -2509,27 +2498,33 @@ EXPORT_SYMBOL(blk_rq_map_user_iov);
/** /**
* blk_rq_unmap_user - unmap a request with user data * blk_rq_unmap_user - unmap a request with user data
* @rq: rq to be unmapped * @bio: start of bio list
* *
* Description: * Description:
* Unmap a rq previously mapped by blk_rq_map_user(). * Unmap a rq previously mapped by blk_rq_map_user(). The caller must
* rq->bio must be set to the original head of the request. * supply the original rq->bio from the blk_rq_map_user() return, since
* the io completion may have changed rq->bio.
*/ */
int blk_rq_unmap_user(struct request *rq) int blk_rq_unmap_user(struct bio *bio)
{ {
struct bio *bio, *mapped_bio; struct bio *mapped_bio;
int ret = 0, ret2;
while ((bio = rq->bio)) { while (bio) {
if (bio_flagged(bio, BIO_BOUNCED)) mapped_bio = bio;
if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
mapped_bio = bio->bi_private; mapped_bio = bio->bi_private;
else
mapped_bio = bio;
__blk_rq_unmap_user(mapped_bio); ret2 = __blk_rq_unmap_user(mapped_bio);
rq->bio = bio->bi_next; if (ret2 && !ret)
bio_put(bio); ret = ret2;
mapped_bio = bio;
bio = bio->bi_next;
bio_put(mapped_bio);
} }
return 0;
return ret;
} }
EXPORT_SYMBOL(blk_rq_unmap_user); EXPORT_SYMBOL(blk_rq_unmap_user);
...@@ -2822,7 +2817,7 @@ static int attempt_merge(request_queue_t *q, struct request *req, ...@@ -2822,7 +2817,7 @@ static int attempt_merge(request_queue_t *q, struct request *req,
* will have updated segment counts, update sector * will have updated segment counts, update sector
* counts here. * counts here.
*/ */
if (!q->merge_requests_fn(q, req, next)) if (!ll_merge_requests_fn(q, req, next))
return 0; return 0;
/* /*
...@@ -2939,7 +2934,7 @@ static int __make_request(request_queue_t *q, struct bio *bio) ...@@ -2939,7 +2934,7 @@ static int __make_request(request_queue_t *q, struct bio *bio)
case ELEVATOR_BACK_MERGE: case ELEVATOR_BACK_MERGE:
BUG_ON(!rq_mergeable(req)); BUG_ON(!rq_mergeable(req));
if (!q->back_merge_fn(q, req, bio)) if (!ll_back_merge_fn(q, req, bio))
break; break;
blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE); blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
...@@ -2956,7 +2951,7 @@ static int __make_request(request_queue_t *q, struct bio *bio) ...@@ -2956,7 +2951,7 @@ static int __make_request(request_queue_t *q, struct bio *bio)
case ELEVATOR_FRONT_MERGE: case ELEVATOR_FRONT_MERGE:
BUG_ON(!rq_mergeable(req)); BUG_ON(!rq_mergeable(req));
if (!q->front_merge_fn(q, req, bio)) if (!ll_front_merge_fn(q, req, bio))
break; break;
blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE); blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);
......
...@@ -333,8 +333,7 @@ static int sg_io(struct file *file, request_queue_t *q, ...@@ -333,8 +333,7 @@ static int sg_io(struct file *file, request_queue_t *q,
hdr->sb_len_wr = len; hdr->sb_len_wr = len;
} }
rq->bio = bio; if (blk_rq_unmap_user(bio))
if (blk_rq_unmap_user(rq))
ret = -EFAULT; ret = -EFAULT;
/* may not have succeeded, but output values written to control /* may not have succeeded, but output values written to control
......
...@@ -1907,6 +1907,7 @@ static void cciss_geometry_inquiry(int ctlr, int logvol, ...@@ -1907,6 +1907,7 @@ static void cciss_geometry_inquiry(int ctlr, int logvol,
"does not support reading geometry\n"); "does not support reading geometry\n");
drv->heads = 255; drv->heads = 255;
drv->sectors = 32; // Sectors per track drv->sectors = 32; // Sectors per track
drv->raid_level = RAID_UNKNOWN;
} else { } else {
drv->heads = inq_buff->data_byte[6]; drv->heads = inq_buff->data_byte[6];
drv->sectors = inq_buff->data_byte[7]; drv->sectors = inq_buff->data_byte[7];
...@@ -2491,7 +2492,7 @@ static void do_cciss_request(request_queue_t *q) ...@@ -2491,7 +2492,7 @@ static void do_cciss_request(request_queue_t *q)
c->Request.Type.Type = TYPE_CMD; // It is a command. c->Request.Type.Type = TYPE_CMD; // It is a command.
c->Request.Type.Attribute = ATTR_SIMPLE; c->Request.Type.Attribute = ATTR_SIMPLE;
c->Request.Type.Direction = c->Request.Type.Direction =
(rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write; (rq_data_dir(creq) == READ) ? XFER_READ : XFER_WRITE;
c->Request.Timeout = 0; // Don't time out c->Request.Timeout = 0; // Don't time out
c->Request.CDB[0] = c->Request.CDB[0] =
(rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write; (rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write;
......
...@@ -2139,8 +2139,7 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf, ...@@ -2139,8 +2139,7 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
cdi->last_sense = s->sense_key; cdi->last_sense = s->sense_key;
} }
rq->bio = bio; if (blk_rq_unmap_user(bio))
if (blk_rq_unmap_user(rq))
ret = -EFAULT; ret = -EFAULT;
if (ret) if (ret)
......
...@@ -265,13 +265,11 @@ static int scsi_merge_bio(struct request *rq, struct bio *bio) ...@@ -265,13 +265,11 @@ static int scsi_merge_bio(struct request *rq, struct bio *bio)
if (!rq->bio) if (!rq->bio)
blk_rq_bio_prep(q, rq, bio); blk_rq_bio_prep(q, rq, bio);
else if (!q->back_merge_fn(q, rq, bio)) else if (!ll_back_merge_fn(q, rq, bio))
return -EINVAL; return -EINVAL;
else { else {
rq->biotail->bi_next = bio; rq->biotail->bi_next = bio;
rq->biotail = bio; rq->biotail = bio;
rq->hard_nr_sectors += bio_sectors(bio);
rq->nr_sectors = rq->hard_nr_sectors;
} }
return 0; return 0;
......
...@@ -331,10 +331,6 @@ struct request_pm_state ...@@ -331,10 +331,6 @@ struct request_pm_state
#include <linux/elevator.h> #include <linux/elevator.h>
typedef int (merge_request_fn) (request_queue_t *, struct request *,
struct bio *);
typedef int (merge_requests_fn) (request_queue_t *, struct request *,
struct request *);
typedef void (request_fn_proc) (request_queue_t *q); typedef void (request_fn_proc) (request_queue_t *q);
typedef int (make_request_fn) (request_queue_t *q, struct bio *bio); typedef int (make_request_fn) (request_queue_t *q, struct bio *bio);
typedef int (prep_rq_fn) (request_queue_t *, struct request *); typedef int (prep_rq_fn) (request_queue_t *, struct request *);
...@@ -376,9 +372,6 @@ struct request_queue ...@@ -376,9 +372,6 @@ struct request_queue
struct request_list rq; struct request_list rq;
request_fn_proc *request_fn; request_fn_proc *request_fn;
merge_request_fn *back_merge_fn;
merge_request_fn *front_merge_fn;
merge_requests_fn *merge_requests_fn;
make_request_fn *make_request_fn; make_request_fn *make_request_fn;
prep_rq_fn *prep_rq_fn; prep_rq_fn *prep_rq_fn;
unplug_fn *unplug_fn; unplug_fn *unplug_fn;
...@@ -648,6 +641,11 @@ extern int scsi_cmd_ioctl(struct file *, struct gendisk *, unsigned int, void __ ...@@ -648,6 +641,11 @@ extern int scsi_cmd_ioctl(struct file *, struct gendisk *, unsigned int, void __
extern int sg_scsi_ioctl(struct file *, struct request_queue *, extern int sg_scsi_ioctl(struct file *, struct request_queue *,
struct gendisk *, struct scsi_ioctl_command __user *); struct gendisk *, struct scsi_ioctl_command __user *);
/*
* Temporary export, until SCSI gets fixed up.
*/
extern int ll_back_merge_fn(request_queue_t *, struct request *, struct bio *);
/* /*
* A queue has just exitted congestion. Note this in the global counter of * A queue has just exitted congestion. Note this in the global counter of
* congested queues, and wake up anyone who was waiting for requests to be * congested queues, and wake up anyone who was waiting for requests to be
...@@ -674,7 +672,7 @@ extern void __blk_stop_queue(request_queue_t *q); ...@@ -674,7 +672,7 @@ extern void __blk_stop_queue(request_queue_t *q);
extern void blk_run_queue(request_queue_t *); extern void blk_run_queue(request_queue_t *);
extern void blk_start_queueing(request_queue_t *); extern void blk_start_queueing(request_queue_t *);
extern int blk_rq_map_user(request_queue_t *, struct request *, void __user *, unsigned long); extern int blk_rq_map_user(request_queue_t *, struct request *, void __user *, unsigned long);
extern int blk_rq_unmap_user(struct request *); extern int blk_rq_unmap_user(struct bio *);
extern int blk_rq_map_kern(request_queue_t *, struct request *, void *, unsigned int, gfp_t); extern int blk_rq_map_kern(request_queue_t *, struct request *, void *, unsigned int, gfp_t);
extern int blk_rq_map_user_iov(request_queue_t *, struct request *, extern int blk_rq_map_user_iov(request_queue_t *, struct request *,
struct sg_iovec *, int, unsigned int); struct sg_iovec *, int, unsigned int);
......
...@@ -12,6 +12,8 @@ typedef void (elevator_merge_req_fn) (request_queue_t *, struct request *, struc ...@@ -12,6 +12,8 @@ typedef void (elevator_merge_req_fn) (request_queue_t *, struct request *, struc
typedef void (elevator_merged_fn) (request_queue_t *, struct request *, int); typedef void (elevator_merged_fn) (request_queue_t *, struct request *, int);
typedef int (elevator_allow_merge_fn) (request_queue_t *, struct request *, struct bio *);
typedef int (elevator_dispatch_fn) (request_queue_t *, int); typedef int (elevator_dispatch_fn) (request_queue_t *, int);
typedef void (elevator_add_req_fn) (request_queue_t *, struct request *); typedef void (elevator_add_req_fn) (request_queue_t *, struct request *);
...@@ -33,6 +35,7 @@ struct elevator_ops ...@@ -33,6 +35,7 @@ struct elevator_ops
elevator_merge_fn *elevator_merge_fn; elevator_merge_fn *elevator_merge_fn;
elevator_merged_fn *elevator_merged_fn; elevator_merged_fn *elevator_merged_fn;
elevator_merge_req_fn *elevator_merge_req_fn; elevator_merge_req_fn *elevator_merge_req_fn;
elevator_allow_merge_fn *elevator_allow_merge_fn;
elevator_dispatch_fn *elevator_dispatch_fn; elevator_dispatch_fn *elevator_dispatch_fn;
elevator_add_req_fn *elevator_add_req_fn; elevator_add_req_fn *elevator_add_req_fn;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment