Commit 78daa87b authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block

* 'for-linus' of git://git.kernel.dk/linux-2.6-block:
  cciss: fix build for PROC_FS disabled
  block: fix amiga and atari floppy driver compile warning
  blk-throttle: Fix calculation of max number of WRITES to be dispatched
  ioprio: grab rcu_read_lock in sys_ioprio_{set,get}()
  xen/blkfront: cope with backend that fail empty BLKIF_OP_WRITE_BARRIER requests
  xen/blkfront: Implement FUA with BLKIF_OP_WRITE_BARRIER
  xen/blkfront: change blk_shadow.request to proper pointer
  xen/blkfront: map REQ_FLUSH into a full barrier
parents d4d2ad94 bbe425cd
...@@ -645,7 +645,7 @@ static int throtl_dispatch_tg(struct throtl_data *td, struct throtl_grp *tg, ...@@ -645,7 +645,7 @@ static int throtl_dispatch_tg(struct throtl_data *td, struct throtl_grp *tg,
{ {
unsigned int nr_reads = 0, nr_writes = 0; unsigned int nr_reads = 0, nr_writes = 0;
unsigned int max_nr_reads = throtl_grp_quantum*3/4; unsigned int max_nr_reads = throtl_grp_quantum*3/4;
unsigned int max_nr_writes = throtl_grp_quantum - nr_reads; unsigned int max_nr_writes = throtl_grp_quantum - max_nr_reads;
struct bio *bio; struct bio *bio;
/* Try to dispatch 75% READS and 25% WRITES */ /* Try to dispatch 75% READS and 25% WRITES */
......
...@@ -1341,7 +1341,7 @@ static struct request *set_next_request(void) ...@@ -1341,7 +1341,7 @@ static struct request *set_next_request(void)
{ {
struct request_queue *q; struct request_queue *q;
int cnt = FD_MAX_UNITS; int cnt = FD_MAX_UNITS;
struct request *rq; struct request *rq = NULL;
/* Find next queue we can dispatch from */ /* Find next queue we can dispatch from */
fdc_queue = fdc_queue + 1; fdc_queue = fdc_queue + 1;
......
...@@ -1399,7 +1399,7 @@ static struct request *set_next_request(void) ...@@ -1399,7 +1399,7 @@ static struct request *set_next_request(void)
{ {
struct request_queue *q; struct request_queue *q;
int old_pos = fdc_queue; int old_pos = fdc_queue;
struct request *rq; struct request *rq = NULL;
do { do {
q = unit[fdc_queue].disk->queue; q = unit[fdc_queue].disk->queue;
......
...@@ -66,6 +66,7 @@ MODULE_VERSION("3.6.26"); ...@@ -66,6 +66,7 @@ MODULE_VERSION("3.6.26");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
static DEFINE_MUTEX(cciss_mutex); static DEFINE_MUTEX(cciss_mutex);
static struct proc_dir_entry *proc_cciss;
#include "cciss_cmd.h" #include "cciss_cmd.h"
#include "cciss.h" #include "cciss.h"
...@@ -363,8 +364,6 @@ static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG", ...@@ -363,8 +364,6 @@ static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
#define ENG_GIG_FACTOR (ENG_GIG/512) #define ENG_GIG_FACTOR (ENG_GIG/512)
#define ENGAGE_SCSI "engage scsi" #define ENGAGE_SCSI "engage scsi"
static struct proc_dir_entry *proc_cciss;
static void cciss_seq_show_header(struct seq_file *seq) static void cciss_seq_show_header(struct seq_file *seq)
{ {
ctlr_info_t *h = seq->private; ctlr_info_t *h = seq->private;
......
...@@ -65,7 +65,7 @@ enum blkif_state { ...@@ -65,7 +65,7 @@ enum blkif_state {
struct blk_shadow { struct blk_shadow {
struct blkif_request req; struct blkif_request req;
unsigned long request; struct request *request;
unsigned long frame[BLKIF_MAX_SEGMENTS_PER_REQUEST]; unsigned long frame[BLKIF_MAX_SEGMENTS_PER_REQUEST];
}; };
...@@ -136,7 +136,7 @@ static void add_id_to_freelist(struct blkfront_info *info, ...@@ -136,7 +136,7 @@ static void add_id_to_freelist(struct blkfront_info *info,
unsigned long id) unsigned long id)
{ {
info->shadow[id].req.id = info->shadow_free; info->shadow[id].req.id = info->shadow_free;
info->shadow[id].request = 0; info->shadow[id].request = NULL;
info->shadow_free = id; info->shadow_free = id;
} }
...@@ -245,14 +245,11 @@ static int blkif_ioctl(struct block_device *bdev, fmode_t mode, ...@@ -245,14 +245,11 @@ static int blkif_ioctl(struct block_device *bdev, fmode_t mode,
} }
/* /*
* blkif_queue_request * Generate a Xen blkfront IO request from a blk layer request. Reads
* and writes are handled as expected. Since we lack a loose flush
* request, we map flushes into a full ordered barrier.
* *
* request block io * @req: a request struct
*
* id: for guest use only.
* operation: BLKIF_OP_{READ,WRITE,PROBE}
* buffer: buffer to read/write into. this should be a
* virtual address in the guest os.
*/ */
static int blkif_queue_request(struct request *req) static int blkif_queue_request(struct request *req)
{ {
...@@ -281,7 +278,7 @@ static int blkif_queue_request(struct request *req) ...@@ -281,7 +278,7 @@ static int blkif_queue_request(struct request *req)
/* Fill out a communications ring structure. */ /* Fill out a communications ring structure. */
ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt); ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt);
id = get_id_from_freelist(info); id = get_id_from_freelist(info);
info->shadow[id].request = (unsigned long)req; info->shadow[id].request = req;
ring_req->id = id; ring_req->id = id;
ring_req->sector_number = (blkif_sector_t)blk_rq_pos(req); ring_req->sector_number = (blkif_sector_t)blk_rq_pos(req);
...@@ -290,6 +287,18 @@ static int blkif_queue_request(struct request *req) ...@@ -290,6 +287,18 @@ static int blkif_queue_request(struct request *req)
ring_req->operation = rq_data_dir(req) ? ring_req->operation = rq_data_dir(req) ?
BLKIF_OP_WRITE : BLKIF_OP_READ; BLKIF_OP_WRITE : BLKIF_OP_READ;
if (req->cmd_flags & (REQ_FLUSH | REQ_FUA)) {
/*
* Ideally we could just do an unordered
* flush-to-disk, but all we have is a full write
* barrier at the moment. However, a barrier write is
* a superset of FUA, so we can implement it the same
* way. (It's also a FLUSH+FUA, since it is
* guaranteed ordered WRT previous writes.)
*/
ring_req->operation = BLKIF_OP_WRITE_BARRIER;
}
ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg); ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg);
BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST); BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST);
...@@ -634,7 +643,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) ...@@ -634,7 +643,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
bret = RING_GET_RESPONSE(&info->ring, i); bret = RING_GET_RESPONSE(&info->ring, i);
id = bret->id; id = bret->id;
req = (struct request *)info->shadow[id].request; req = info->shadow[id].request;
blkif_completion(&info->shadow[id]); blkif_completion(&info->shadow[id]);
...@@ -647,6 +656,16 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) ...@@ -647,6 +656,16 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
printk(KERN_WARNING "blkfront: %s: write barrier op failed\n", printk(KERN_WARNING "blkfront: %s: write barrier op failed\n",
info->gd->disk_name); info->gd->disk_name);
error = -EOPNOTSUPP; error = -EOPNOTSUPP;
}
if (unlikely(bret->status == BLKIF_RSP_ERROR &&
info->shadow[id].req.nr_segments == 0)) {
printk(KERN_WARNING "blkfront: %s: empty write barrier op failed\n",
info->gd->disk_name);
error = -EOPNOTSUPP;
}
if (unlikely(error)) {
if (error == -EOPNOTSUPP)
error = 0;
info->feature_flush = 0; info->feature_flush = 0;
xlvbd_flush(info); xlvbd_flush(info);
} }
...@@ -899,7 +918,7 @@ static int blkif_recover(struct blkfront_info *info) ...@@ -899,7 +918,7 @@ static int blkif_recover(struct blkfront_info *info)
/* Stage 3: Find pending requests and requeue them. */ /* Stage 3: Find pending requests and requeue them. */
for (i = 0; i < BLK_RING_SIZE; i++) { for (i = 0; i < BLK_RING_SIZE; i++) {
/* Not in use? */ /* Not in use? */
if (copy[i].request == 0) if (!copy[i].request)
continue; continue;
/* Grab a request slot and copy shadow state into it. */ /* Grab a request slot and copy shadow state into it. */
...@@ -916,9 +935,7 @@ static int blkif_recover(struct blkfront_info *info) ...@@ -916,9 +935,7 @@ static int blkif_recover(struct blkfront_info *info)
req->seg[j].gref, req->seg[j].gref,
info->xbdev->otherend_id, info->xbdev->otherend_id,
pfn_to_mfn(info->shadow[req->id].frame[j]), pfn_to_mfn(info->shadow[req->id].frame[j]),
rq_data_dir( rq_data_dir(info->shadow[req->id].request));
(struct request *)
info->shadow[req->id].request));
info->shadow[req->id].req = *req; info->shadow[req->id].req = *req;
info->ring.req_prod_pvt++; info->ring.req_prod_pvt++;
...@@ -1067,14 +1084,8 @@ static void blkfront_connect(struct blkfront_info *info) ...@@ -1067,14 +1084,8 @@ static void blkfront_connect(struct blkfront_info *info)
*/ */
info->feature_flush = 0; info->feature_flush = 0;
/*
* The driver doesn't properly handled empty flushes, so
* lets disable barrier support for now.
*/
#if 0
if (!err && barrier) if (!err && barrier)
info->feature_flush = REQ_FLUSH; info->feature_flush = REQ_FLUSH | REQ_FUA;
#endif
err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size); err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size);
if (err) { if (err) {
......
...@@ -103,22 +103,15 @@ SYSCALL_DEFINE3(ioprio_set, int, which, int, who, int, ioprio) ...@@ -103,22 +103,15 @@ SYSCALL_DEFINE3(ioprio_set, int, which, int, who, int, ioprio)
} }
ret = -ESRCH; ret = -ESRCH;
/* rcu_read_lock();
* We want IOPRIO_WHO_PGRP/IOPRIO_WHO_USER to be "atomic",
* so we can't use rcu_read_lock(). See re-copy of ->ioprio
* in copy_process().
*/
read_lock(&tasklist_lock);
switch (which) { switch (which) {
case IOPRIO_WHO_PROCESS: case IOPRIO_WHO_PROCESS:
rcu_read_lock();
if (!who) if (!who)
p = current; p = current;
else else
p = find_task_by_vpid(who); p = find_task_by_vpid(who);
if (p) if (p)
ret = set_task_ioprio(p, ioprio); ret = set_task_ioprio(p, ioprio);
rcu_read_unlock();
break; break;
case IOPRIO_WHO_PGRP: case IOPRIO_WHO_PGRP:
if (!who) if (!who)
...@@ -141,12 +134,7 @@ SYSCALL_DEFINE3(ioprio_set, int, which, int, who, int, ioprio) ...@@ -141,12 +134,7 @@ SYSCALL_DEFINE3(ioprio_set, int, which, int, who, int, ioprio)
break; break;
do_each_thread(g, p) { do_each_thread(g, p) {
int match; if (__task_cred(p)->uid != who)
rcu_read_lock();
match = __task_cred(p)->uid == who;
rcu_read_unlock();
if (!match)
continue; continue;
ret = set_task_ioprio(p, ioprio); ret = set_task_ioprio(p, ioprio);
if (ret) if (ret)
...@@ -160,7 +148,7 @@ SYSCALL_DEFINE3(ioprio_set, int, which, int, who, int, ioprio) ...@@ -160,7 +148,7 @@ SYSCALL_DEFINE3(ioprio_set, int, which, int, who, int, ioprio)
ret = -EINVAL; ret = -EINVAL;
} }
read_unlock(&tasklist_lock); rcu_read_unlock();
return ret; return ret;
} }
...@@ -204,17 +192,15 @@ SYSCALL_DEFINE2(ioprio_get, int, which, int, who) ...@@ -204,17 +192,15 @@ SYSCALL_DEFINE2(ioprio_get, int, which, int, who)
int ret = -ESRCH; int ret = -ESRCH;
int tmpio; int tmpio;
read_lock(&tasklist_lock); rcu_read_lock();
switch (which) { switch (which) {
case IOPRIO_WHO_PROCESS: case IOPRIO_WHO_PROCESS:
rcu_read_lock();
if (!who) if (!who)
p = current; p = current;
else else
p = find_task_by_vpid(who); p = find_task_by_vpid(who);
if (p) if (p)
ret = get_task_ioprio(p); ret = get_task_ioprio(p);
rcu_read_unlock();
break; break;
case IOPRIO_WHO_PGRP: case IOPRIO_WHO_PGRP:
if (!who) if (!who)
...@@ -241,12 +227,7 @@ SYSCALL_DEFINE2(ioprio_get, int, which, int, who) ...@@ -241,12 +227,7 @@ SYSCALL_DEFINE2(ioprio_get, int, which, int, who)
break; break;
do_each_thread(g, p) { do_each_thread(g, p) {
int match; if (__task_cred(p)->uid != user->uid)
rcu_read_lock();
match = __task_cred(p)->uid == user->uid;
rcu_read_unlock();
if (!match)
continue; continue;
tmpio = get_task_ioprio(p); tmpio = get_task_ioprio(p);
if (tmpio < 0) if (tmpio < 0)
...@@ -264,6 +245,6 @@ SYSCALL_DEFINE2(ioprio_get, int, which, int, who) ...@@ -264,6 +245,6 @@ SYSCALL_DEFINE2(ioprio_get, int, which, int, who)
ret = -EINVAL; ret = -EINVAL;
} }
read_unlock(&tasklist_lock); rcu_read_unlock();
return ret; return ret;
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment