Commit 8ded371f authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-3.1/drivers' of git://git.kernel.dk/linux-block

* 'for-3.1/drivers' of git://git.kernel.dk/linux-block:
  cciss: do not attempt to read from a write-only register
  xen/blkback: Add module alias for autoloading
  xen/blkback: Don't let in-flight requests defer pending ones.
  bsg: fix address space warning from sparse
  bsg: remove unnecessary conditional expressions
  bsg: fix bsg_poll() to return POLLOUT properly
parents 096a705b 07d0c38e
...@@ -182,7 +182,7 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq, ...@@ -182,7 +182,7 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
return -ENOMEM; return -ENOMEM;
} }
if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request, if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
hdr->request_len)) hdr->request_len))
return -EFAULT; return -EFAULT;
...@@ -249,7 +249,7 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm, ...@@ -249,7 +249,7 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
struct request *rq, *next_rq = NULL; struct request *rq, *next_rq = NULL;
int ret, rw; int ret, rw;
unsigned int dxfer_len; unsigned int dxfer_len;
void *dxferp = NULL; void __user *dxferp = NULL;
struct bsg_class_device *bcd = &q->bsg_dev; struct bsg_class_device *bcd = &q->bsg_dev;
/* if the LLD has been removed then the bsg_unregister_queue will /* if the LLD has been removed then the bsg_unregister_queue will
...@@ -291,7 +291,7 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm, ...@@ -291,7 +291,7 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
rq->next_rq = next_rq; rq->next_rq = next_rq;
next_rq->cmd_type = rq->cmd_type; next_rq->cmd_type = rq->cmd_type;
dxferp = (void*)(unsigned long)hdr->din_xferp; dxferp = (void __user *)(unsigned long)hdr->din_xferp;
ret = blk_rq_map_user(q, next_rq, NULL, dxferp, ret = blk_rq_map_user(q, next_rq, NULL, dxferp,
hdr->din_xfer_len, GFP_KERNEL); hdr->din_xfer_len, GFP_KERNEL);
if (ret) if (ret)
...@@ -300,10 +300,10 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm, ...@@ -300,10 +300,10 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
if (hdr->dout_xfer_len) { if (hdr->dout_xfer_len) {
dxfer_len = hdr->dout_xfer_len; dxfer_len = hdr->dout_xfer_len;
dxferp = (void*)(unsigned long)hdr->dout_xferp; dxferp = (void __user *)(unsigned long)hdr->dout_xferp;
} else if (hdr->din_xfer_len) { } else if (hdr->din_xfer_len) {
dxfer_len = hdr->din_xfer_len; dxfer_len = hdr->din_xfer_len;
dxferp = (void*)(unsigned long)hdr->din_xferp; dxferp = (void __user *)(unsigned long)hdr->din_xferp;
} else } else
dxfer_len = 0; dxfer_len = 0;
...@@ -445,7 +445,7 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr, ...@@ -445,7 +445,7 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
int len = min_t(unsigned int, hdr->max_response_len, int len = min_t(unsigned int, hdr->max_response_len,
rq->sense_len); rq->sense_len);
ret = copy_to_user((void*)(unsigned long)hdr->response, ret = copy_to_user((void __user *)(unsigned long)hdr->response,
rq->sense, len); rq->sense, len);
if (!ret) if (!ret)
hdr->response_len = len; hdr->response_len = len;
...@@ -606,7 +606,7 @@ bsg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) ...@@ -606,7 +606,7 @@ bsg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
ret = __bsg_read(buf, count, bd, NULL, &bytes_read); ret = __bsg_read(buf, count, bd, NULL, &bytes_read);
*ppos = bytes_read; *ppos = bytes_read;
if (!bytes_read || (bytes_read && err_block_err(ret))) if (!bytes_read || err_block_err(ret))
bytes_read = ret; bytes_read = ret;
return bytes_read; return bytes_read;
...@@ -686,7 +686,7 @@ bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) ...@@ -686,7 +686,7 @@ bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
/* /*
* return bytes written on non-fatal errors * return bytes written on non-fatal errors
*/ */
if (!bytes_written || (bytes_written && err_block_err(ret))) if (!bytes_written || err_block_err(ret))
bytes_written = ret; bytes_written = ret;
dprintk("%s: returning %Zd\n", bd->name, bytes_written); dprintk("%s: returning %Zd\n", bd->name, bytes_written);
...@@ -878,7 +878,7 @@ static unsigned int bsg_poll(struct file *file, poll_table *wait) ...@@ -878,7 +878,7 @@ static unsigned int bsg_poll(struct file *file, poll_table *wait)
spin_lock_irq(&bd->lock); spin_lock_irq(&bd->lock);
if (!list_empty(&bd->done_list)) if (!list_empty(&bd->done_list))
mask |= POLLIN | POLLRDNORM; mask |= POLLIN | POLLRDNORM;
if (bd->queued_cmds >= bd->max_queue) if (bd->queued_cmds < bd->max_queue)
mask |= POLLOUT; mask |= POLLOUT;
spin_unlock_irq(&bd->lock); spin_unlock_irq(&bd->lock);
......
...@@ -223,7 +223,7 @@ static void SA5_submit_command( ctlr_info_t *h, CommandList_struct *c) ...@@ -223,7 +223,7 @@ static void SA5_submit_command( ctlr_info_t *h, CommandList_struct *c)
h->ctlr, c->busaddr); h->ctlr, c->busaddr);
#endif /* CCISS_DEBUG */ #endif /* CCISS_DEBUG */
writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
readl(h->vaddr + SA5_REQUEST_PORT_OFFSET); readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
h->commands_outstanding++; h->commands_outstanding++;
if ( h->commands_outstanding > h->max_outstanding) if ( h->commands_outstanding > h->max_outstanding)
h->max_outstanding = h->commands_outstanding; h->max_outstanding = h->commands_outstanding;
......
...@@ -458,7 +458,8 @@ static void end_block_io_op(struct bio *bio, int error) ...@@ -458,7 +458,8 @@ static void end_block_io_op(struct bio *bio, int error)
* (which has the sectors we want, number of them, grant references, etc), * (which has the sectors we want, number of them, grant references, etc),
* and transmute it to the block API to hand it over to the proper block disk. * and transmute it to the block API to hand it over to the proper block disk.
*/ */
static int do_block_io_op(struct xen_blkif *blkif) static int
__do_block_io_op(struct xen_blkif *blkif)
{ {
union blkif_back_rings *blk_rings = &blkif->blk_rings; union blkif_back_rings *blk_rings = &blkif->blk_rings;
struct blkif_request req; struct blkif_request req;
...@@ -515,6 +516,23 @@ static int do_block_io_op(struct xen_blkif *blkif) ...@@ -515,6 +516,23 @@ static int do_block_io_op(struct xen_blkif *blkif)
return more_to_do; return more_to_do;
} }
static int
do_block_io_op(struct xen_blkif *blkif)
{
union blkif_back_rings *blk_rings = &blkif->blk_rings;
int more_to_do;
do {
more_to_do = __do_block_io_op(blkif);
if (more_to_do)
break;
RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
} while (more_to_do);
return more_to_do;
}
/* /*
* Transmutation of the 'struct blkif_request' to a proper 'struct bio' * Transmutation of the 'struct blkif_request' to a proper 'struct bio'
* and call the 'submit_bio' to pass it to the underlying storage. * and call the 'submit_bio' to pass it to the underlying storage.
...@@ -700,7 +718,6 @@ static void make_response(struct xen_blkif *blkif, u64 id, ...@@ -700,7 +718,6 @@ static void make_response(struct xen_blkif *blkif, u64 id,
struct blkif_response resp; struct blkif_response resp;
unsigned long flags; unsigned long flags;
union blkif_back_rings *blk_rings = &blkif->blk_rings; union blkif_back_rings *blk_rings = &blkif->blk_rings;
int more_to_do = 0;
int notify; int notify;
resp.id = id; resp.id = id;
...@@ -727,22 +744,7 @@ static void make_response(struct xen_blkif *blkif, u64 id, ...@@ -727,22 +744,7 @@ static void make_response(struct xen_blkif *blkif, u64 id,
} }
blk_rings->common.rsp_prod_pvt++; blk_rings->common.rsp_prod_pvt++;
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify); RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
if (blk_rings->common.rsp_prod_pvt == blk_rings->common.req_cons) {
/*
* Tail check for pending requests. Allows frontend to avoid
* notifications if requests are already in flight (lower
* overheads and promotes batching).
*/
RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
} else if (RING_HAS_UNCONSUMED_REQUESTS(&blk_rings->common)) {
more_to_do = 1;
}
spin_unlock_irqrestore(&blkif->blk_ring_lock, flags); spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
if (more_to_do)
blkif_notify_work(blkif);
if (notify) if (notify)
notify_remote_via_irq(blkif->irq); notify_remote_via_irq(blkif->irq);
} }
...@@ -824,3 +826,4 @@ static int __init xen_blkif_init(void) ...@@ -824,3 +826,4 @@ static int __init xen_blkif_init(void)
module_init(xen_blkif_init); module_init(xen_blkif_init);
MODULE_LICENSE("Dual BSD/GPL"); MODULE_LICENSE("Dual BSD/GPL");
MODULE_ALIAS("xen-backend:vbd");
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment