Commit 01f37f2d authored by Konrad Rzeszutek Wilk's avatar Konrad Rzeszutek Wilk

xen/blkback: Fixed up comments and converted spaces to tabs.

Suggested-by: default avatarIan Campbell <Ian.Campbell@eu.citrix.com>
Signed-off-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
parent 3d68b399
...@@ -103,7 +103,8 @@ static struct xen_blkbk *blkbk; ...@@ -103,7 +103,8 @@ static struct xen_blkbk *blkbk;
* Little helpful macro to figure out the index and virtual address of the * Little helpful macro to figure out the index and virtual address of the
* pending_pages[..]. For each 'pending_req' we have have up to * pending_pages[..]. For each 'pending_req' we have have up to
* BLKIF_MAX_SEGMENTS_PER_REQUEST (11) pages. The seg would be from 0 through * BLKIF_MAX_SEGMENTS_PER_REQUEST (11) pages. The seg would be from 0 through
* 10 and would index in the pending_pages[..]. */ * 10 and would index in the pending_pages[..].
*/
static inline int vaddr_pagenr(struct pending_req *req, int seg) static inline int vaddr_pagenr(struct pending_req *req, int seg)
{ {
return (req - blkbk->pending_reqs) * return (req - blkbk->pending_reqs) *
...@@ -167,8 +168,6 @@ static void free_req(struct pending_req *req) ...@@ -167,8 +168,6 @@ static void free_req(struct pending_req *req)
/* /*
* Routines for managing virtual block devices (vbds). * Routines for managing virtual block devices (vbds).
*/ */
static int vbd_translate(struct phys_req *req, struct blkif_st *blkif, static int vbd_translate(struct phys_req *req, struct blkif_st *blkif,
int operation) int operation)
{ {
...@@ -315,7 +314,7 @@ struct seg_buf { ...@@ -315,7 +314,7 @@ struct seg_buf {
/* /*
* Unmap the grant references, and also remove the M2P over-rides * Unmap the grant references, and also remove the M2P over-rides
* used in the 'pending_req'. * used in the 'pending_req'.
*/ */
static void xen_blkbk_unmap(struct pending_req *req) static void xen_blkbk_unmap(struct pending_req *req)
{ {
struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
...@@ -336,27 +335,32 @@ static void xen_blkbk_unmap(struct pending_req *req) ...@@ -336,27 +335,32 @@ static void xen_blkbk_unmap(struct pending_req *req)
ret = HYPERVISOR_grant_table_op( ret = HYPERVISOR_grant_table_op(
GNTTABOP_unmap_grant_ref, unmap, invcount); GNTTABOP_unmap_grant_ref, unmap, invcount);
BUG_ON(ret); BUG_ON(ret);
/* Note, we use invcount, so nr->pages, so we can't index /*
* Note, we use invcount, so nr->pages, so we can't index
* using vaddr(req, i). * using vaddr(req, i).
*/ */
for (i = 0; i < invcount; i++) { for (i = 0; i < invcount; i++) {
ret = m2p_remove_override( ret = m2p_remove_override(
virt_to_page(unmap[i].host_addr), false); virt_to_page(unmap[i].host_addr), false);
if (ret) { if (ret) {
printk(KERN_ALERT "Failed to remove M2P override for " \ printk(KERN_ALERT "Failed to remove M2P override for %lx\n",
"%lx\n", (unsigned long)unmap[i].host_addr); (unsigned long)unmap[i].host_addr);
continue; continue;
} }
} }
} }
static int xen_blkbk_map(struct blkif_request *req, struct pending_req *pending_req,
static int xen_blkbk_map(struct blkif_request *req,
struct pending_req *pending_req,
struct seg_buf seg[]) struct seg_buf seg[])
{ {
struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST]; struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
int i; int i;
int nseg = req->nr_segments; int nseg = req->nr_segments;
int ret = 0; int ret = 0;
/* Fill out preq.nr_sects with proper amount of sectors, and setup
/*
* Fill out preq.nr_sects with proper amount of sectors, and setup
* assign map[..] with the PFN of the page in our domain with the * assign map[..] with the PFN of the page in our domain with the
* corresponding grant reference for each page. * corresponding grant reference for each page.
*/ */
...@@ -367,13 +371,15 @@ static int xen_blkbk_map(struct blkif_request *req, struct pending_req *pending_ ...@@ -367,13 +371,15 @@ static int xen_blkbk_map(struct blkif_request *req, struct pending_req *pending_
if (pending_req->operation != BLKIF_OP_READ) if (pending_req->operation != BLKIF_OP_READ)
flags |= GNTMAP_readonly; flags |= GNTMAP_readonly;
gnttab_set_map_op(&map[i], vaddr(pending_req, i), flags, gnttab_set_map_op(&map[i], vaddr(pending_req, i), flags,
req->u.rw.seg[i].gref, pending_req->blkif->domid); req->u.rw.seg[i].gref,
pending_req->blkif->domid);
} }
ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, nseg); ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, nseg);
BUG_ON(ret); BUG_ON(ret);
/* Now swizzel the MFN in our domain with the MFN from the other domain /*
* Now swizzle the MFN in our domain with the MFN from the other domain
* so that when we access vaddr(pending_req,i) it has the contents of * so that when we access vaddr(pending_req,i) it has the contents of
* the page from the other domain. * the page from the other domain.
*/ */
...@@ -423,7 +429,8 @@ static void __end_block_io_op(struct pending_req *pending_req, int error) ...@@ -423,7 +429,8 @@ static void __end_block_io_op(struct pending_req *pending_req, int error)
pending_req->status = BLKIF_RSP_ERROR; pending_req->status = BLKIF_RSP_ERROR;
} }
/* If all of the bio's have completed it is time to unmap /*
* If all of the bio's have completed it is time to unmap
* the grant references associated with 'request' and provide * the grant references associated with 'request' and provide
* the proper response on the ring. * the proper response on the ring.
*/ */
...@@ -510,8 +517,8 @@ static int do_block_io_op(struct blkif_st *blkif) ...@@ -510,8 +517,8 @@ static int do_block_io_op(struct blkif_st *blkif)
} }
/* /*
* Transumation of the 'struct blkif_request' to a proper 'struct bio' * Transmutation of the 'struct blkif_request' to a proper 'struct bio'
* and call the 'submit_bio' to pass it to the underlaying storage. * and call the 'submit_bio' to pass it to the underlying storage.
*/ */
static int dispatch_rw_block_io(struct blkif_st *blkif, static int dispatch_rw_block_io(struct blkif_st *blkif,
struct blkif_request *req, struct blkif_request *req,
...@@ -538,8 +545,10 @@ static int dispatch_rw_block_io(struct blkif_st *blkif, ...@@ -538,8 +545,10 @@ static int dispatch_rw_block_io(struct blkif_st *blkif,
case BLKIF_OP_FLUSH_DISKCACHE: case BLKIF_OP_FLUSH_DISKCACHE:
blkif->st_f_req++; blkif->st_f_req++;
operation = WRITE_FLUSH; operation = WRITE_FLUSH;
/* The frontend likes to set this to -1, which vbd_translate /*
* is alergic too. */ * The frontend likes to set this to -1, which vbd_translate
* is alergic too.
*/
req->u.rw.sector_number = 0; req->u.rw.sector_number = 0;
break; break;
case BLKIF_OP_WRITE_BARRIER: case BLKIF_OP_WRITE_BARRIER:
...@@ -585,8 +594,11 @@ static int dispatch_rw_block_io(struct blkif_st *blkif, ...@@ -585,8 +594,11 @@ static int dispatch_rw_block_io(struct blkif_st *blkif,
preq.sector_number + preq.nr_sects, preq.dev); preq.sector_number + preq.nr_sects, preq.dev);
goto fail_response; goto fail_response;
} }
/* This check _MUST_ be done after vbd_translate as the preq.bdev
* is set there. */ /*
* This check _MUST_ be done after vbd_translate as the preq.bdev
* is set there.
*/
for (i = 0; i < nseg; i++) { for (i = 0; i < nseg; i++) {
if (((int)preq.sector_number|(int)seg[i].nsec) & if (((int)preq.sector_number|(int)seg[i].nsec) &
((bdev_logical_block_size(preq.bdev) >> 9) - 1)) { ((bdev_logical_block_size(preq.bdev) >> 9) - 1)) {
...@@ -595,7 +607,9 @@ static int dispatch_rw_block_io(struct blkif_st *blkif, ...@@ -595,7 +607,9 @@ static int dispatch_rw_block_io(struct blkif_st *blkif,
goto fail_response; goto fail_response;
} }
} }
/* If we have failed at this point, we need to undo the M2P override,
/*
* If we have failed at this point, we need to undo the M2P override,
* set gnttab_set_unmap_op on all of the grant references and perform * set gnttab_set_unmap_op on all of the grant references and perform
* the hypercall to unmap the grants - that is all done in * the hypercall to unmap the grants - that is all done in
* xen_blkbk_unmap. * xen_blkbk_unmap.
...@@ -638,8 +652,8 @@ static int dispatch_rw_block_io(struct blkif_st *blkif, ...@@ -638,8 +652,8 @@ static int dispatch_rw_block_io(struct blkif_st *blkif,
bio->bi_end_io = end_block_io_op; bio->bi_end_io = end_block_io_op;
} }
/*
/* We set it one so that the last submit_bio does not have to call * We set it one so that the last submit_bio does not have to call
* atomic_inc. * atomic_inc.
*/ */
atomic_set(&pending_req->pendcnt, nbio); atomic_set(&pending_req->pendcnt, nbio);
......
...@@ -47,12 +47,17 @@ ...@@ -47,12 +47,17 @@
__FILE__ , __LINE__ , ## _a) __FILE__ , __LINE__ , ## _a)
struct vbd { struct vbd {
blkif_vdev_t handle; /* what the domain refers to this vbd as */ /* What the domain refers to this vbd as. */
unsigned char readonly; /* Non-zero -> read-only */ blkif_vdev_t handle;
unsigned char type; /* VDISK_xxx */ /* Non-zero -> read-only */
u32 pdevice; /* phys device that this vbd maps to */ unsigned char readonly;
/* VDISK_xxx */
unsigned char type;
/* phys device that this vbd maps to. */
u32 pdevice;
struct block_device *bdev; struct block_device *bdev;
sector_t size; /* Cached size parameter */ /* Cached size parameter. */
sector_t size;
bool flush_support; bool flush_support;
}; };
......
...@@ -425,7 +425,7 @@ int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt, ...@@ -425,7 +425,7 @@ int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
return err; return err;
} }
/** /*
* Entry point to this code when a new device is created. Allocate the basic * Entry point to this code when a new device is created. Allocate the basic
* structures, and watch the store waiting for the hotplug scripts to tell us * structures, and watch the store waiting for the hotplug scripts to tell us
* the device's physical major and minor numbers. Switch to InitWait. * the device's physical major and minor numbers. Switch to InitWait.
...@@ -473,7 +473,7 @@ static int xen_blkbk_probe(struct xenbus_device *dev, ...@@ -473,7 +473,7 @@ static int xen_blkbk_probe(struct xenbus_device *dev,
} }
/** /*
* Callback received when the hotplug scripts have placed the physical-device * Callback received when the hotplug scripts have placed the physical-device
* node. Read it and the mode node, and create a vbd. If the frontend is * node. Read it and the mode node, and create a vbd. If the frontend is
* ready, connect. * ready, connect.
...@@ -495,9 +495,11 @@ static void backend_changed(struct xenbus_watch *watch, ...@@ -495,9 +495,11 @@ static void backend_changed(struct xenbus_watch *watch,
err = xenbus_scanf(XBT_NIL, dev->nodename, "physical-device", "%x:%x", err = xenbus_scanf(XBT_NIL, dev->nodename, "physical-device", "%x:%x",
&major, &minor); &major, &minor);
if (XENBUS_EXIST_ERR(err)) { if (XENBUS_EXIST_ERR(err)) {
/* Since this watch will fire once immediately after it is /*
registered, we expect this. Ignore it, and wait for the * Since this watch will fire once immediately after it is
hotplug scripts. */ * registered, we expect this. Ignore it, and wait for the
* hotplug scripts.
*/
return; return;
} }
if (err != 2) { if (err != 2) {
...@@ -562,7 +564,7 @@ static void backend_changed(struct xenbus_watch *watch, ...@@ -562,7 +564,7 @@ static void backend_changed(struct xenbus_watch *watch,
} }
/** /*
* Callback received when the frontend's state changes. * Callback received when the frontend's state changes.
*/ */
static void frontend_changed(struct xenbus_device *dev, static void frontend_changed(struct xenbus_device *dev,
...@@ -584,13 +586,16 @@ static void frontend_changed(struct xenbus_device *dev, ...@@ -584,13 +586,16 @@ static void frontend_changed(struct xenbus_device *dev,
case XenbusStateInitialised: case XenbusStateInitialised:
case XenbusStateConnected: case XenbusStateConnected:
/* Ensure we connect even when two watches fire in /*
close successsion and we miss the intermediate value * Ensure we connect even when two watches fire in
of frontend_state. */ * close successsion and we miss the intermediate value
* of frontend_state.
*/
if (dev->state == XenbusStateConnected) if (dev->state == XenbusStateConnected)
break; break;
/* Enforce precondition before potential leak point. /*
* Enforce precondition before potential leak point.
* blkif_disconnect() is idempotent. * blkif_disconnect() is idempotent.
*/ */
xen_blkif_disconnect(be->blkif); xen_blkif_disconnect(be->blkif);
...@@ -627,7 +632,7 @@ static void frontend_changed(struct xenbus_device *dev, ...@@ -627,7 +632,7 @@ static void frontend_changed(struct xenbus_device *dev,
/* ** Connection ** */ /* ** Connection ** */
/** /*
* Write the physical details regarding the block device to the store, and * Write the physical details regarding the block device to the store, and
* switch to Connected state. * switch to Connected state.
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment