Commit 46040967 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://brick.kernel.dk/data/git/linux-2.6-block

* 'for-linus' of git://brick.kernel.dk/data/git/linux-2.6-block:
  [PATCH] block: document io scheduler allow_merge_fn hook
  [PATCH] cfq-iosched: don't allow sync merges across queues
  [PATCH] Fixup blk_rq_unmap_user() API
  [PATCH] __blk_rq_unmap_user() fails to return error
  [PATCH] __blk_rq_map_user() doesn't need to grab the queue_lock
  [PATCH] Remove queue merging hooks
  [PATCH] ->nr_sectors and ->hard_nr_sectors are not used for BLOCK_PC requests
  [PATCH] cciss: fix XFER_READ/XFER_WRITE in do_cciss_request
  [PATCH] cciss: set default raid level when reading geometry fails
parents 8df8bb4a 126ec9a6
......@@ -946,6 +946,13 @@ elevator_merged_fn called when a request in the scheduler has been
scheduler for example, to reposition the request
if its sorting order has changed.
elevator_allow_merge_fn called whenever the block layer determines
that a bio can be merged into an existing
request safely. The io scheduler may still
want to stop a merge at this point if it
results in some sort of conflict internally,
this hook allows it to do that.
elevator_dispatch_fn fills the dispatch queue with ready requests.
I/O schedulers are free to postpone requests by
not filling the dispatch queue unless @force
......
......@@ -568,6 +568,38 @@ cfq_merged_requests(request_queue_t *q, struct request *rq,
cfq_remove_request(next);
}
static int cfq_allow_merge(request_queue_t *q, struct request *rq,
struct bio *bio)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
const int rw = bio_data_dir(bio);
struct cfq_queue *cfqq;
pid_t key;
/*
* If bio is async or a write, always allow merge
*/
if (!bio_sync(bio) || rw == WRITE)
return 1;
/*
* bio is sync. if request is not, disallow.
*/
if (!rq_is_sync(rq))
return 0;
/*
* Ok, both bio and request are sync. Allow merge if they are
* from the same queue.
*/
key = cfq_queue_pid(current, rw, 1);
cfqq = cfq_find_cfq_hash(cfqd, key, current->ioprio);
if (cfqq != RQ_CFQQ(rq))
return 0;
return 1;
}
static inline void
__cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
......@@ -2125,6 +2157,7 @@ static struct elevator_type iosched_cfq = {
.elevator_merge_fn = cfq_merge,
.elevator_merged_fn = cfq_merged_request,
.elevator_merge_req_fn = cfq_merged_requests,
.elevator_allow_merge_fn = cfq_allow_merge,
.elevator_dispatch_fn = cfq_dispatch_requests,
.elevator_add_req_fn = cfq_insert_request,
.elevator_activate_req_fn = cfq_activate_request,
......
......@@ -50,6 +50,21 @@ static const int elv_hash_shift = 6;
#define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors)
#define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash))
/*
* Query io scheduler to see if the current process issuing bio may be
* merged with rq.
*/
static int elv_iosched_allow_merge(struct request *rq, struct bio *bio)
{
request_queue_t *q = rq->q;
elevator_t *e = q->elevator;
if (e->ops->elevator_allow_merge_fn)
return e->ops->elevator_allow_merge_fn(q, rq, bio);
return 1;
}
/*
* can we safely merge with this request?
*/
......@@ -65,12 +80,15 @@ inline int elv_rq_merge_ok(struct request *rq, struct bio *bio)
return 0;
/*
* same device and no special stuff set, merge is ok
* must be same device and not a special request
*/
if (rq->rq_disk == bio->bi_bdev->bd_disk && !rq->special)
return 1;
if (rq->rq_disk != bio->bi_bdev->bd_disk || !rq->special)
return 0;
return 0;
if (!elv_iosched_allow_merge(rq, bio))
return 0;
return 1;
}
EXPORT_SYMBOL(elv_rq_merge_ok);
......
......@@ -1405,8 +1405,7 @@ static inline int ll_new_hw_segment(request_queue_t *q,
return 1;
}
static int ll_back_merge_fn(request_queue_t *q, struct request *req,
struct bio *bio)
int ll_back_merge_fn(request_queue_t *q, struct request *req, struct bio *bio)
{
unsigned short max_sectors;
int len;
......@@ -1442,6 +1441,7 @@ static int ll_back_merge_fn(request_queue_t *q, struct request *req,
return ll_new_hw_segment(q, req, bio);
}
EXPORT_SYMBOL(ll_back_merge_fn);
static int ll_front_merge_fn(request_queue_t *q, struct request *req,
struct bio *bio)
......@@ -1912,9 +1912,6 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
}
q->request_fn = rfn;
q->back_merge_fn = ll_back_merge_fn;
q->front_merge_fn = ll_front_merge_fn;
q->merge_requests_fn = ll_merge_requests_fn;
q->prep_rq_fn = NULL;
q->unplug_fn = generic_unplug_device;
q->queue_flags = (1 << QUEUE_FLAG_CLUSTER);
......@@ -2350,40 +2347,29 @@ static int __blk_rq_map_user(request_queue_t *q, struct request *rq,
else
bio = bio_copy_user(q, uaddr, len, reading);
if (IS_ERR(bio)) {
if (IS_ERR(bio))
return PTR_ERR(bio);
}
orig_bio = bio;
blk_queue_bounce(q, &bio);
/*
* We link the bounce buffer in and could have to traverse it
* later so we have to get a ref to prevent it from being freed
*/
bio_get(bio);
/*
* for most (all? don't know of any) queues we could
* skip grabbing the queue lock here. only drivers with
* funky private ->back_merge_fn() function could be
* problematic.
*/
spin_lock_irq(q->queue_lock);
if (!rq->bio)
blk_rq_bio_prep(q, rq, bio);
else if (!q->back_merge_fn(q, rq, bio)) {
else if (!ll_back_merge_fn(q, rq, bio)) {
ret = -EINVAL;
spin_unlock_irq(q->queue_lock);
goto unmap_bio;
} else {
rq->biotail->bi_next = bio;
rq->biotail = bio;
rq->nr_sectors += bio_sectors(bio);
rq->hard_nr_sectors = rq->nr_sectors;
rq->data_len += bio->bi_size;
}
spin_unlock_irq(q->queue_lock);
return bio->bi_size;
......@@ -2419,6 +2405,7 @@ int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf,
unsigned long len)
{
unsigned long bytes_read = 0;
struct bio *bio = NULL;
int ret;
if (len > (q->max_hw_sectors << 9))
......@@ -2445,6 +2432,8 @@ int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf,
ret = __blk_rq_map_user(q, rq, ubuf, map_len);
if (ret < 0)
goto unmap_rq;
if (!bio)
bio = rq->bio;
bytes_read += ret;
ubuf += ret;
}
......@@ -2452,7 +2441,7 @@ int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf,
rq->buffer = rq->data = NULL;
return 0;
unmap_rq:
blk_rq_unmap_user(rq);
blk_rq_unmap_user(bio);
return ret;
}
......@@ -2509,27 +2498,33 @@ EXPORT_SYMBOL(blk_rq_map_user_iov);
/**
* blk_rq_unmap_user - unmap a request with user data
* @rq: rq to be unmapped
* @bio: start of bio list
*
* Description:
* Unmap a rq previously mapped by blk_rq_map_user().
* rq->bio must be set to the original head of the request.
* Unmap a rq previously mapped by blk_rq_map_user(). The caller must
* supply the original rq->bio from the blk_rq_map_user() return, since
* the io completion may have changed rq->bio.
*/
int blk_rq_unmap_user(struct request *rq)
int blk_rq_unmap_user(struct bio *bio)
{
struct bio *bio, *mapped_bio;
struct bio *mapped_bio;
int ret = 0, ret2;
while ((bio = rq->bio)) {
if (bio_flagged(bio, BIO_BOUNCED))
while (bio) {
mapped_bio = bio;
if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
mapped_bio = bio->bi_private;
else
mapped_bio = bio;
__blk_rq_unmap_user(mapped_bio);
rq->bio = bio->bi_next;
bio_put(bio);
ret2 = __blk_rq_unmap_user(mapped_bio);
if (ret2 && !ret)
ret = ret2;
mapped_bio = bio;
bio = bio->bi_next;
bio_put(mapped_bio);
}
return 0;
return ret;
}
EXPORT_SYMBOL(blk_rq_unmap_user);
......@@ -2822,7 +2817,7 @@ static int attempt_merge(request_queue_t *q, struct request *req,
* will have updated segment counts, update sector
* counts here.
*/
if (!q->merge_requests_fn(q, req, next))
if (!ll_merge_requests_fn(q, req, next))
return 0;
/*
......@@ -2939,7 +2934,7 @@ static int __make_request(request_queue_t *q, struct bio *bio)
case ELEVATOR_BACK_MERGE:
BUG_ON(!rq_mergeable(req));
if (!q->back_merge_fn(q, req, bio))
if (!ll_back_merge_fn(q, req, bio))
break;
blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
......@@ -2956,7 +2951,7 @@ static int __make_request(request_queue_t *q, struct bio *bio)
case ELEVATOR_FRONT_MERGE:
BUG_ON(!rq_mergeable(req));
if (!q->front_merge_fn(q, req, bio))
if (!ll_front_merge_fn(q, req, bio))
break;
blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);
......
......@@ -333,8 +333,7 @@ static int sg_io(struct file *file, request_queue_t *q,
hdr->sb_len_wr = len;
}
rq->bio = bio;
if (blk_rq_unmap_user(rq))
if (blk_rq_unmap_user(bio))
ret = -EFAULT;
/* may not have succeeded, but output values written to control
......
......@@ -1907,6 +1907,7 @@ static void cciss_geometry_inquiry(int ctlr, int logvol,
"does not support reading geometry\n");
drv->heads = 255;
drv->sectors = 32; // Sectors per track
drv->raid_level = RAID_UNKNOWN;
} else {
drv->heads = inq_buff->data_byte[6];
drv->sectors = inq_buff->data_byte[7];
......@@ -2491,7 +2492,7 @@ static void do_cciss_request(request_queue_t *q)
c->Request.Type.Type = TYPE_CMD; // It is a command.
c->Request.Type.Attribute = ATTR_SIMPLE;
c->Request.Type.Direction =
(rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write;
(rq_data_dir(creq) == READ) ? XFER_READ : XFER_WRITE;
c->Request.Timeout = 0; // Don't time out
c->Request.CDB[0] =
(rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write;
......
......@@ -2139,8 +2139,7 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
cdi->last_sense = s->sense_key;
}
rq->bio = bio;
if (blk_rq_unmap_user(rq))
if (blk_rq_unmap_user(bio))
ret = -EFAULT;
if (ret)
......
......@@ -265,13 +265,11 @@ static int scsi_merge_bio(struct request *rq, struct bio *bio)
if (!rq->bio)
blk_rq_bio_prep(q, rq, bio);
else if (!q->back_merge_fn(q, rq, bio))
else if (!ll_back_merge_fn(q, rq, bio))
return -EINVAL;
else {
rq->biotail->bi_next = bio;
rq->biotail = bio;
rq->hard_nr_sectors += bio_sectors(bio);
rq->nr_sectors = rq->hard_nr_sectors;
}
return 0;
......
......@@ -331,10 +331,6 @@ struct request_pm_state
#include <linux/elevator.h>
typedef int (merge_request_fn) (request_queue_t *, struct request *,
struct bio *);
typedef int (merge_requests_fn) (request_queue_t *, struct request *,
struct request *);
typedef void (request_fn_proc) (request_queue_t *q);
typedef int (make_request_fn) (request_queue_t *q, struct bio *bio);
typedef int (prep_rq_fn) (request_queue_t *, struct request *);
......@@ -376,9 +372,6 @@ struct request_queue
struct request_list rq;
request_fn_proc *request_fn;
merge_request_fn *back_merge_fn;
merge_request_fn *front_merge_fn;
merge_requests_fn *merge_requests_fn;
make_request_fn *make_request_fn;
prep_rq_fn *prep_rq_fn;
unplug_fn *unplug_fn;
......@@ -648,6 +641,11 @@ extern int scsi_cmd_ioctl(struct file *, struct gendisk *, unsigned int, void __
extern int sg_scsi_ioctl(struct file *, struct request_queue *,
struct gendisk *, struct scsi_ioctl_command __user *);
/*
* Temporary export, until SCSI gets fixed up.
*/
extern int ll_back_merge_fn(request_queue_t *, struct request *, struct bio *);
/*
* A queue has just exitted congestion. Note this in the global counter of
* congested queues, and wake up anyone who was waiting for requests to be
......@@ -674,7 +672,7 @@ extern void __blk_stop_queue(request_queue_t *q);
extern void blk_run_queue(request_queue_t *);
extern void blk_start_queueing(request_queue_t *);
extern int blk_rq_map_user(request_queue_t *, struct request *, void __user *, unsigned long);
extern int blk_rq_unmap_user(struct request *);
extern int blk_rq_unmap_user(struct bio *);
extern int blk_rq_map_kern(request_queue_t *, struct request *, void *, unsigned int, gfp_t);
extern int blk_rq_map_user_iov(request_queue_t *, struct request *,
struct sg_iovec *, int, unsigned int);
......
......@@ -12,6 +12,8 @@ typedef void (elevator_merge_req_fn) (request_queue_t *, struct request *, struc
typedef void (elevator_merged_fn) (request_queue_t *, struct request *, int);
typedef int (elevator_allow_merge_fn) (request_queue_t *, struct request *, struct bio *);
typedef int (elevator_dispatch_fn) (request_queue_t *, int);
typedef void (elevator_add_req_fn) (request_queue_t *, struct request *);
......@@ -33,6 +35,7 @@ struct elevator_ops
elevator_merge_fn *elevator_merge_fn;
elevator_merged_fn *elevator_merged_fn;
elevator_merge_req_fn *elevator_merge_req_fn;
elevator_allow_merge_fn *elevator_allow_merge_fn;
elevator_dispatch_fn *elevator_dispatch_fn;
elevator_add_req_fn *elevator_add_req_fn;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment