Commit f4eef1b6 authored by Paul Durrant's avatar Paul Durrant Committed by Juergen Gross

xen-blkback: support dynamic unbind/bind

By simply re-attaching to shared rings during connect_ring() rather than
assuming they are freshly allocated (i.e assuming the counters are zero)
it is possible for vbd instances to be unbound and re-bound from and to
(respectively) a running guest.

This has been tested by running:

while true;
  do fio --name=randwrite --ioengine=libaio --iodepth=16 \
  --rw=randwrite --bs=4k --direct=1 --size=1G --verify=crc32;
  done

in a PV guest whilst running:

while true;
  do echo vbd-$DOMID-$VBD >unbind;
  echo unbound;
  sleep 5;
  echo vbd-$DOMID-$VBD >bind;
  echo bound;
  sleep 3;
  done

in dom0 from /sys/bus/xen-backend/drivers/vbd to continuously unbind and
re-bind its system disk image.

This is a highly useful feature for a backend module as it allows it to be
unloaded and re-loaded (i.e. updated) without requiring domUs to be halted.
This was also tested by running:

while true;
  do echo vbd-$DOMID-$VBD >unbind;
  echo unbound;
  sleep 5;
  rmmod xen-blkback;
  echo unloaded;
  sleep 1;
  modprobe xen-blkback;
  echo bound;
  cd $(pwd);
  sleep 3;
  done

in dom0 whilst running the same loop as above in the (single) PV guest.

Some (less stressful) testing has also been done using a Windows HVM guest
with the latest 9.0 PV drivers installed.
Signed-off-by: default avatarPaul Durrant <pdurrant@amazon.com>
Reviewed-by: default avatarJuergen Gross <jgross@suse.com>
Reviewed-by: default avatarRoger Pau Monné <roger.pau@citrix.com>
Signed-off-by: default avatarJuergen Gross <jgross@suse.com>
parent 1ee54195
...@@ -190,6 +190,9 @@ static int xen_blkif_map(struct xen_blkif_ring *ring, grant_ref_t *gref, ...@@ -190,6 +190,9 @@ static int xen_blkif_map(struct xen_blkif_ring *ring, grant_ref_t *gref,
{ {
int err; int err;
struct xen_blkif *blkif = ring->blkif; struct xen_blkif *blkif = ring->blkif;
const struct blkif_common_sring *sring_common;
RING_IDX rsp_prod, req_prod;
unsigned int size;
/* Already connected through? */ /* Already connected through? */
if (ring->irq) if (ring->irq)
...@@ -200,46 +203,62 @@ static int xen_blkif_map(struct xen_blkif_ring *ring, grant_ref_t *gref, ...@@ -200,46 +203,62 @@ static int xen_blkif_map(struct xen_blkif_ring *ring, grant_ref_t *gref,
if (err < 0) if (err < 0)
return err; return err;
sring_common = (struct blkif_common_sring *)ring->blk_ring;
rsp_prod = READ_ONCE(sring_common->rsp_prod);
req_prod = READ_ONCE(sring_common->req_prod);
switch (blkif->blk_protocol) { switch (blkif->blk_protocol) {
case BLKIF_PROTOCOL_NATIVE: case BLKIF_PROTOCOL_NATIVE:
{ {
struct blkif_sring *sring; struct blkif_sring *sring_native =
sring = (struct blkif_sring *)ring->blk_ring; (struct blkif_sring *)ring->blk_ring;
BACK_RING_INIT(&ring->blk_rings.native, sring,
XEN_PAGE_SIZE * nr_grefs); BACK_RING_ATTACH(&ring->blk_rings.native, sring_native,
rsp_prod, XEN_PAGE_SIZE * nr_grefs);
size = __RING_SIZE(sring_native, XEN_PAGE_SIZE * nr_grefs);
break; break;
} }
case BLKIF_PROTOCOL_X86_32: case BLKIF_PROTOCOL_X86_32:
{ {
struct blkif_x86_32_sring *sring_x86_32; struct blkif_x86_32_sring *sring_x86_32 =
sring_x86_32 = (struct blkif_x86_32_sring *)ring->blk_ring; (struct blkif_x86_32_sring *)ring->blk_ring;
BACK_RING_INIT(&ring->blk_rings.x86_32, sring_x86_32,
XEN_PAGE_SIZE * nr_grefs); BACK_RING_ATTACH(&ring->blk_rings.x86_32, sring_x86_32,
rsp_prod, XEN_PAGE_SIZE * nr_grefs);
size = __RING_SIZE(sring_x86_32, XEN_PAGE_SIZE * nr_grefs);
break; break;
} }
case BLKIF_PROTOCOL_X86_64: case BLKIF_PROTOCOL_X86_64:
{ {
struct blkif_x86_64_sring *sring_x86_64; struct blkif_x86_64_sring *sring_x86_64 =
sring_x86_64 = (struct blkif_x86_64_sring *)ring->blk_ring; (struct blkif_x86_64_sring *)ring->blk_ring;
BACK_RING_INIT(&ring->blk_rings.x86_64, sring_x86_64,
XEN_PAGE_SIZE * nr_grefs); BACK_RING_ATTACH(&ring->blk_rings.x86_64, sring_x86_64,
rsp_prod, XEN_PAGE_SIZE * nr_grefs);
size = __RING_SIZE(sring_x86_64, XEN_PAGE_SIZE * nr_grefs);
break; break;
} }
default: default:
BUG(); BUG();
} }
err = -EIO;
if (req_prod - rsp_prod > size)
goto fail;
err = bind_interdomain_evtchn_to_irqhandler(blkif->domid, evtchn, err = bind_interdomain_evtchn_to_irqhandler(blkif->domid, evtchn,
xen_blkif_be_int, 0, xen_blkif_be_int, 0,
"blkif-backend", ring); "blkif-backend", ring);
if (err < 0) { if (err < 0)
xenbus_unmap_ring_vfree(blkif->be->dev, ring->blk_ring); goto fail;
ring->blk_rings.common.sring = NULL;
return err;
}
ring->irq = err; ring->irq = err;
return 0; return 0;
fail:
xenbus_unmap_ring_vfree(blkif->be->dev, ring->blk_ring);
ring->blk_rings.common.sring = NULL;
return err;
} }
static int xen_blkif_disconnect(struct xen_blkif *blkif) static int xen_blkif_disconnect(struct xen_blkif *blkif)
...@@ -1131,7 +1150,8 @@ static struct xenbus_driver xen_blkbk_driver = { ...@@ -1131,7 +1150,8 @@ static struct xenbus_driver xen_blkbk_driver = {
.ids = xen_blkbk_ids, .ids = xen_blkbk_ids,
.probe = xen_blkbk_probe, .probe = xen_blkbk_probe,
.remove = xen_blkbk_remove, .remove = xen_blkbk_remove,
.otherend_changed = frontend_changed .otherend_changed = frontend_changed,
.allow_rebind = true,
}; };
int xen_blkif_xenbus_init(void) int xen_blkif_xenbus_init(void)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment