From 17636f47a474af4e1da8275cd8bcf63c70643aef Mon Sep 17 00:00:00 2001 From: Keir Fraser Date: Fri, 4 Jul 2008 12:00:38 +0100 Subject: Teach xc_save to use event-channel-based domain suspend if available. If the guest provides a suspend event channel through xenstore, xc_save will use it in preference to the old xenstore-based method. Xend is still informed when the domain has suspended so that it can perform device migration in parallel with last-round migration. Signed-off-by: Brendan Cully --- tools/xcutils/xc_save.c | 138 +++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 137 insertions(+), 1 deletion(-) (limited to 'tools/xcutils') diff --git a/tools/xcutils/xc_save.c b/tools/xcutils/xc_save.c index 3feecc524b..c72500a700 100644 --- a/tools/xcutils/xc_save.c +++ b/tools/xcutils/xc_save.c @@ -23,11 +23,18 @@ #include #include +static struct suspendinfo { + int xce; /* event channel handle */ + + int suspend_evtchn; + int suspended_evtchn; +} si; + /** * Issue a suspend request through stdout, and receive the acknowledgement * from stdin. This is handled by XendCheckpoint in the Python layer. */ -static int suspend(int domid) +static int compat_suspend(int domid) { char ans[30]; @@ -38,6 +45,131 @@ static int suspend(int domid) !strncmp(ans, "done\n", 5)); } +static int suspend_evtchn_release(int xc, int domid) +{ + if (si.suspended_evtchn >= 0) { + xc_dom_subscribe(xc, domid, 0); + xc_evtchn_unbind(si.xce, si.suspended_evtchn); + si.suspended_evtchn = -1; + } + if (si.suspend_evtchn >= 0) { + xc_evtchn_unbind(si.xce, si.suspend_evtchn); + si.suspend_evtchn = -1; + } + if (si.xce >= 0) { + xc_evtchn_close(si.xce); + si.xce = -1; + } + + return 0; +} + +static int suspend_evtchn_init(int xc, int domid) +{ + struct xs_handle *xs; + char path[128]; + char *portstr; + unsigned int plen; + int port; + int rc; + + si.xce = -1; + si.suspend_evtchn = -1; + si.suspended_evtchn = -1; + + xs = xs_daemon_open(); + if (!xs) { + errx(1, "failed to get xenstore handle"); + return -1; + } + sprintf(path, "/local/domain/%d/device/suspend/event-channel", domid); + portstr = xs_read(xs, XBT_NULL, path, &plen); + xs_daemon_close(xs); + + if (!portstr || !plen) { + warnx("could not read suspend event channel"); + return -1; + } + + port = atoi(portstr); + free(portstr); + + si.xce = xc_evtchn_open(); + if (si.xce < 0) { + errx(1, "failed to open event channel handle"); + goto cleanup; + } + + si.suspend_evtchn = xc_evtchn_bind_interdomain(si.xce, domid, port); + if (si.suspend_evtchn < 0) { + errx(1, "failed to bind suspend event channel: %d", + si.suspend_evtchn); + goto cleanup; + } + + si.suspended_evtchn = xc_evtchn_bind_unbound_port(si.xce, domid); + if (si.suspended_evtchn < 0) { + errx(1, "failed to allocate suspend notification port: %d", + si.suspended_evtchn); + goto cleanup; + } + + rc = xc_dom_subscribe(xc, domid, si.suspended_evtchn); + if (rc < 0) { + errx(1, "failed to subscribe to domain: %d", rc); + goto cleanup; + } + + return 0; + + cleanup: + suspend_evtchn_release(xc, domid); + + return -1; +} + +/** + * Issue a suspend request to a dedicated event channel in the guest, and + * receive the acknowledgement from the subscribe event channel. */ +static int evtchn_suspend(int domid) +{ + int xcefd; + int rc; + + rc = xc_evtchn_notify(si.xce, si.suspend_evtchn); + if (rc < 0) { + errx(1, "failed to notify suspend request channel: %d", rc); + return 0; + } + + xcefd = xc_evtchn_fd(si.xce); + do { + rc = xc_evtchn_pending(si.xce); + if (rc < 0) { + errx(1, "error polling suspend notification channel: %d", rc); + return 0; + } + } while (rc != si.suspended_evtchn); + + /* harmless for one-off suspend */ + if (xc_evtchn_unmask(si.xce, si.suspended_evtchn) < 0) + errx(1, "failed to unmask suspend notification channel: %d", rc); + + /* notify xend that it can do device migration */ + printf("suspended\n"); + fflush(stdout); + + return 1; +} + +static int suspend(int domid) +{ + if (si.suspend_evtchn >= 0) + return evtchn_suspend(domid); + + return compat_suspend(domid); +} + /* For HVM guests, there are two sources of dirty pages: the Xen shadow * log-dirty bitmap, which we get with a hypercall, and qemu's version. * The protocol for getting page-dirtying data from qemu uses a @@ -188,10 +320,14 @@ main(int argc, char **argv) max_f = atoi(argv[4]); flags = atoi(argv[5]); + suspend_evtchn_init(xc_fd, domid); + ret = xc_domain_save(xc_fd, io_fd, domid, maxit, max_f, flags, &suspend, !!(flags & XCFLAGS_HVM), &init_qemu_maps, &qemu_flip_buffer); + suspend_evtchn_release(xc_fd, domid); + xc_interface_close(xc_fd); return ret; -- cgit v1.2.3