Commit 24f567f9 authored by Konrad Rzeszutek Wilk's avatar Konrad Rzeszutek Wilk

xen/blkback: Add support for BLKIF_OP_FLUSH_DISKCACHE and drop BLKIF_OP_WRITE_BARRIER.

We drop the support for 'feature-barrier' and add in the support
for the 'feature-flush-cache' if the real backend storage supports
flushing.
Signed-off-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
parent 73d842af
...@@ -46,8 +46,6 @@ ...@@ -46,8 +46,6 @@
#include <asm/xen/hypercall.h> #include <asm/xen/hypercall.h>
#include "common.h" #include "common.h"
#define WRITE_BARRIER (REQ_WRITE | REQ_FLUSH | REQ_FUA)
/* /*
* These are rather arbitrary. They are fairly large because adjacent requests * These are rather arbitrary. They are fairly large because adjacent requests
* pulled from a communication ring are quite likely to end up being part of * pulled from a communication ring are quite likely to end up being part of
...@@ -256,9 +254,9 @@ irqreturn_t xen_blkif_be_int(int irq, void *dev_id) ...@@ -256,9 +254,9 @@ irqreturn_t xen_blkif_be_int(int irq, void *dev_id)
static void print_stats(struct blkif_st *blkif) static void print_stats(struct blkif_st *blkif)
{ {
printk(KERN_DEBUG "%s: oo %3d | rd %4d | wr %4d | br %4d\n", printk(KERN_DEBUG "%s: oo %3d | rd %4d | wr %4d | f %4d\n",
current->comm, blkif->st_oo_req, current->comm, blkif->st_oo_req,
blkif->st_rd_req, blkif->st_wr_req, blkif->st_br_req); blkif->st_rd_req, blkif->st_wr_req, blkif->st_f_req);
blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000); blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000);
blkif->st_rd_req = 0; blkif->st_rd_req = 0;
blkif->st_wr_req = 0; blkif->st_wr_req = 0;
...@@ -414,10 +412,10 @@ static int xen_blkbk_map(struct blkif_request *req, struct pending_req *pending_ ...@@ -414,10 +412,10 @@ static int xen_blkbk_map(struct blkif_request *req, struct pending_req *pending_
static void __end_block_io_op(struct pending_req *pending_req, int error) static void __end_block_io_op(struct pending_req *pending_req, int error)
{ {
/* An error fails the entire request. */ /* An error fails the entire request. */
if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) && if ((pending_req->operation == BLKIF_OP_FLUSH_DISKCACHE) &&
(error == -EOPNOTSUPP)) { (error == -EOPNOTSUPP)) {
DPRINTK("blkback: write barrier op failed, not supported\n"); DPRINTK("blkback: flush diskcache op failed, not supported\n");
xen_blkbk_barrier(XBT_NIL, pending_req->blkif->be, 0); xen_blkbk_flush_diskcache(XBT_NIL, pending_req->blkif->be, 0);
pending_req->status = BLKIF_RSP_EOPNOTSUPP; pending_req->status = BLKIF_RSP_EOPNOTSUPP;
} else if (error) { } else if (error) {
DPRINTK("Buffer not up-to-date at end of operation, " DPRINTK("Buffer not up-to-date at end of operation, "
...@@ -506,13 +504,14 @@ static int do_block_io_op(struct blkif_st *blkif) ...@@ -506,13 +504,14 @@ static int do_block_io_op(struct blkif_st *blkif)
blkif->st_rd_req++; blkif->st_rd_req++;
dispatch_rw_block_io(blkif, &req, pending_req); dispatch_rw_block_io(blkif, &req, pending_req);
break; break;
case BLKIF_OP_WRITE_BARRIER: case BLKIF_OP_FLUSH_DISKCACHE:
blkif->st_br_req++; blkif->st_f_req++;
/* fall through */ /* fall through */
case BLKIF_OP_WRITE: case BLKIF_OP_WRITE:
blkif->st_wr_req++; blkif->st_wr_req++;
dispatch_rw_block_io(blkif, &req, pending_req); dispatch_rw_block_io(blkif, &req, pending_req);
break; break;
case BLKIF_OP_WRITE_BARRIER:
default: default:
/* A good sign something is wrong: sleep for a while to /* A good sign something is wrong: sleep for a while to
* avoid excessive CPU consumption by a bad guest. */ * avoid excessive CPU consumption by a bad guest. */
...@@ -556,9 +555,14 @@ static void dispatch_rw_block_io(struct blkif_st *blkif, ...@@ -556,9 +555,14 @@ static void dispatch_rw_block_io(struct blkif_st *blkif,
case BLKIF_OP_WRITE: case BLKIF_OP_WRITE:
operation = WRITE_ODIRECT; operation = WRITE_ODIRECT;
break; break;
case BLKIF_OP_WRITE_BARRIER: case BLKIF_OP_FLUSH_DISKCACHE:
operation = WRITE_BARRIER; operation = WRITE_FLUSH;
/* The frontend likes to set this to -1, which vbd_translate
* is alergic too. */
req->u.rw.sector_number = 0;
break; break;
case BLKIF_OP_WRITE_BARRIER:
/* Should never get here. */
default: default:
operation = 0; /* make gcc happy */ operation = 0; /* make gcc happy */
BUG(); BUG();
...@@ -566,7 +570,7 @@ static void dispatch_rw_block_io(struct blkif_st *blkif, ...@@ -566,7 +570,7 @@ static void dispatch_rw_block_io(struct blkif_st *blkif,
/* Check that the number of segments is sane. */ /* Check that the number of segments is sane. */
nseg = req->nr_segments; nseg = req->nr_segments;
if (unlikely(nseg == 0 && operation != WRITE_BARRIER) || if (unlikely(nseg == 0 && operation != WRITE_FLUSH) ||
unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) { unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
DPRINTK("Bad number of segments in request (%d)\n", nseg); DPRINTK("Bad number of segments in request (%d)\n", nseg);
/* Haven't submitted any bio's yet. */ /* Haven't submitted any bio's yet. */
...@@ -643,7 +647,7 @@ static void dispatch_rw_block_io(struct blkif_st *blkif, ...@@ -643,7 +647,7 @@ static void dispatch_rw_block_io(struct blkif_st *blkif,
/* This will be hit if the operation was a barrier. */ /* This will be hit if the operation was a barrier. */
if (!bio) { if (!bio) {
BUG_ON(operation != WRITE_BARRIER); BUG_ON(operation != WRITE_FLUSH);
bio = biolist[nbio++] = bio_alloc(GFP_KERNEL, 0); bio = biolist[nbio++] = bio_alloc(GFP_KERNEL, 0);
if (unlikely(bio == NULL)) if (unlikely(bio == NULL))
goto fail_put_bio; goto fail_put_bio;
...@@ -651,7 +655,6 @@ static void dispatch_rw_block_io(struct blkif_st *blkif, ...@@ -651,7 +655,6 @@ static void dispatch_rw_block_io(struct blkif_st *blkif,
bio->bi_bdev = preq.bdev; bio->bi_bdev = preq.bdev;
bio->bi_private = pending_req; bio->bi_private = pending_req;
bio->bi_end_io = end_block_io_op; bio->bi_end_io = end_block_io_op;
bio->bi_sector = -1;
} }
...@@ -671,7 +674,7 @@ static void dispatch_rw_block_io(struct blkif_st *blkif, ...@@ -671,7 +674,7 @@ static void dispatch_rw_block_io(struct blkif_st *blkif,
if (operation == READ) if (operation == READ)
blkif->st_rd_sect += preq.nr_sects; blkif->st_rd_sect += preq.nr_sects;
else if (operation == WRITE || operation == WRITE_BARRIER) else if (operation == WRITE || operation == WRITE_FLUSH)
blkif->st_wr_sect += preq.nr_sects; blkif->st_wr_sect += preq.nr_sects;
return; return;
......
...@@ -53,6 +53,7 @@ struct vbd { ...@@ -53,6 +53,7 @@ struct vbd {
u32 pdevice; /* phys device that this vbd maps to */ u32 pdevice; /* phys device that this vbd maps to */
struct block_device *bdev; struct block_device *bdev;
sector_t size; /* Cached size parameter */ sector_t size; /* Cached size parameter */
bool flush_support;
}; };
struct backend_info; struct backend_info;
...@@ -85,7 +86,7 @@ struct blkif_st { ...@@ -85,7 +86,7 @@ struct blkif_st {
int st_rd_req; int st_rd_req;
int st_wr_req; int st_wr_req;
int st_oo_req; int st_oo_req;
int st_br_req; int st_f_req;
int st_rd_sect; int st_rd_sect;
int st_wr_sect; int st_wr_sect;
...@@ -120,8 +121,8 @@ int xen_blkif_xenbus_init(void); ...@@ -120,8 +121,8 @@ int xen_blkif_xenbus_init(void);
irqreturn_t xen_blkif_be_int(int irq, void *dev_id); irqreturn_t xen_blkif_be_int(int irq, void *dev_id);
int xen_blkif_schedule(void *arg); int xen_blkif_schedule(void *arg);
int xen_blkbk_barrier(struct xenbus_transaction xbt, int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
struct backend_info *be, int state); struct backend_info *be, int state);
struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be); struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be);
......
...@@ -276,7 +276,7 @@ int __init xen_blkif_interface_init(void) ...@@ -276,7 +276,7 @@ int __init xen_blkif_interface_init(void)
VBD_SHOW(oo_req, "%d\n", be->blkif->st_oo_req); VBD_SHOW(oo_req, "%d\n", be->blkif->st_oo_req);
VBD_SHOW(rd_req, "%d\n", be->blkif->st_rd_req); VBD_SHOW(rd_req, "%d\n", be->blkif->st_rd_req);
VBD_SHOW(wr_req, "%d\n", be->blkif->st_wr_req); VBD_SHOW(wr_req, "%d\n", be->blkif->st_wr_req);
VBD_SHOW(br_req, "%d\n", be->blkif->st_br_req); VBD_SHOW(f_req, "%d\n", be->blkif->st_f_req);
VBD_SHOW(rd_sect, "%d\n", be->blkif->st_rd_sect); VBD_SHOW(rd_sect, "%d\n", be->blkif->st_rd_sect);
VBD_SHOW(wr_sect, "%d\n", be->blkif->st_wr_sect); VBD_SHOW(wr_sect, "%d\n", be->blkif->st_wr_sect);
...@@ -284,7 +284,7 @@ static struct attribute *vbdstat_attrs[] = { ...@@ -284,7 +284,7 @@ static struct attribute *vbdstat_attrs[] = {
&dev_attr_oo_req.attr, &dev_attr_oo_req.attr,
&dev_attr_rd_req.attr, &dev_attr_rd_req.attr,
&dev_attr_wr_req.attr, &dev_attr_wr_req.attr,
&dev_attr_br_req.attr, &dev_attr_f_req.attr,
&dev_attr_rd_sect.attr, &dev_attr_rd_sect.attr,
&dev_attr_wr_sect.attr, &dev_attr_wr_sect.attr,
NULL NULL
...@@ -343,6 +343,7 @@ static int vbd_create(struct blkif_st *blkif, blkif_vdev_t handle, ...@@ -343,6 +343,7 @@ static int vbd_create(struct blkif_st *blkif, blkif_vdev_t handle,
{ {
struct vbd *vbd; struct vbd *vbd;
struct block_device *bdev; struct block_device *bdev;
struct request_queue *q;
vbd = &blkif->vbd; vbd = &blkif->vbd;
vbd->handle = handle; vbd->handle = handle;
...@@ -375,6 +376,10 @@ static int vbd_create(struct blkif_st *blkif, blkif_vdev_t handle, ...@@ -375,6 +376,10 @@ static int vbd_create(struct blkif_st *blkif, blkif_vdev_t handle,
if (vbd->bdev->bd_disk->flags & GENHD_FL_REMOVABLE) if (vbd->bdev->bd_disk->flags & GENHD_FL_REMOVABLE)
vbd->type |= VDISK_REMOVABLE; vbd->type |= VDISK_REMOVABLE;
q = bdev_get_queue(bdev);
if (q && q->flush_flags)
vbd->flush_support = true;
DPRINTK("Successful creation of handle=%04x (dom=%u)\n", DPRINTK("Successful creation of handle=%04x (dom=%u)\n",
handle, blkif->domid); handle, blkif->domid);
return 0; return 0;
...@@ -406,16 +411,16 @@ static int xen_blkbk_remove(struct xenbus_device *dev) ...@@ -406,16 +411,16 @@ static int xen_blkbk_remove(struct xenbus_device *dev)
return 0; return 0;
} }
int xen_blkbk_barrier(struct xenbus_transaction xbt, int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
struct backend_info *be, int state) struct backend_info *be, int state)
{ {
struct xenbus_device *dev = be->dev; struct xenbus_device *dev = be->dev;
int err; int err;
err = xenbus_printf(xbt, dev->nodename, "feature-barrier", err = xenbus_printf(xbt, dev->nodename, "feature-flush-cache",
"%d", state); "%d", state);
if (err) if (err)
xenbus_dev_fatal(dev, err, "writing feature-barrier"); xenbus_dev_fatal(dev, err, "writing feature-flush-cache");
return err; return err;
} }
...@@ -642,7 +647,7 @@ static void connect(struct backend_info *be) ...@@ -642,7 +647,7 @@ static void connect(struct backend_info *be)
return; return;
} }
err = xen_blkbk_barrier(xbt, be, 1); err = xen_blkbk_flush_diskcache(xbt, be, be->blkif->vbd.flush_support);
if (err) if (err)
goto abort; goto abort;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment