Mailing List Archive

[PATCH] Fix save/restore for HVM domains with viridian=1
# HG changeset patch
# User Paul Durrant <paul.durrant@citrix.com>
# Date 1322232651 0
# Node ID a3c5e87c73a991861fcdd9a5a1eb8ebc635a2d09
# Parent 0a0c02a616768bfab16c072788cb76be1893c37f
Fix save/restore for HVM domains with viridian=1

xc_domain_save/restore currently pay no attention to HVM_PARAM_VIRIDIAN which
results in an HVM domain running a recent version on Windows (post-Vista)
locking up on a domain restore due to EOIs (done via a viridian MSR write)
being silently dropped.
This patch adds an extra save entry for the viridian parameter and also
adds code in the viridian kernel module to catch attempted use of viridian
functionality when the HVM parameter has not been set.

Signed-off-by: Paul Durrant <paul.durrant@citrix.com>

diff -r 0a0c02a61676 -r a3c5e87c73a9 tools/libxc/xc_domain_restore.c
--- a/tools/libxc/xc_domain_restore.c Mon Nov 21 21:28:34 2011 +0000
+++ b/tools/libxc/xc_domain_restore.c Fri Nov 25 14:50:51 2011 +0000
@@ -675,6 +675,7 @@ typedef struct {
uint64_t vm86_tss;
uint64_t console_pfn;
uint64_t acpi_ioport_location;
+ uint64_t viridian;
} pagebuf_t;

static int pagebuf_init(pagebuf_t* buf)
@@ -809,6 +810,16 @@ static int pagebuf_get_one(xc_interface
}
return pagebuf_get_one(xch, ctx, buf, fd, dom);

+ case XC_SAVE_ID_HVM_VIRIDIAN:
+ /* Skip padding 4 bytes then read the acpi ioport location. */
+ if ( RDEXACT(fd, &buf->viridian, sizeof(uint32_t)) ||
+ RDEXACT(fd, &buf->viridian, sizeof(uint64_t)) )
+ {
+ PERROR("error read the viridian flag");
+ return -1;
+ }
+ return pagebuf_get_one(xch, ctx, buf, fd, dom);
+
default:
if ( (count > MAX_BATCH_SIZE) || (count < 0) ) {
ERROR("Max batch size exceeded (%d). Giving up.", count);
@@ -1440,6 +1451,9 @@ int xc_domain_restore(xc_interface *xch,
fcntl(io_fd, F_SETFL, orig_io_fd_flags | O_NONBLOCK);
}

+ if (pagebuf.viridian != 0)
+ xc_set_hvm_param(xch, dom, HVM_PARAM_VIRIDIAN, 1);
+
if (pagebuf.acpi_ioport_location == 1) {
DBGPRINTF("Use new firmware ioport from the checkpoint\n");
xc_set_hvm_param(xch, dom, HVM_PARAM_ACPI_IOPORTS_LOCATION, 1);
diff -r 0a0c02a61676 -r a3c5e87c73a9 tools/libxc/xc_domain_save.c
--- a/tools/libxc/xc_domain_save.c Mon Nov 21 21:28:34 2011 +0000
+++ b/tools/libxc/xc_domain_save.c Fri Nov 25 14:50:51 2011 +0000
@@ -1506,6 +1506,18 @@ int xc_domain_save(xc_interface *xch, in
PERROR("Error when writing the firmware ioport version");
goto out;
}
+
+ chunk.id = XC_SAVE_ID_HVM_VIRIDIAN;
+ chunk.data = 0;
+ xc_get_hvm_param(xch, dom, HVM_PARAM_VIRIDIAN,
+ (unsigned long *)&chunk.data);
+
+ if ( (chunk.data != 0) &&
+ wrexact(io_fd, &chunk, sizeof(chunk)) )
+ {
+ PERROR("Error when writing the viridian flag");
+ goto out;
+ }
}

if ( !callbacks->checkpoint )
diff -r 0a0c02a61676 -r a3c5e87c73a9 tools/libxc/xg_save_restore.h
--- a/tools/libxc/xg_save_restore.h Mon Nov 21 21:28:34 2011 +0000
+++ b/tools/libxc/xg_save_restore.h Fri Nov 25 14:50:51 2011 +0000
@@ -134,6 +134,7 @@
#define XC_SAVE_ID_HVM_CONSOLE_PFN -8 /* (HVM-only) */
#define XC_SAVE_ID_LAST_CHECKPOINT -9 /* Commit to restoring after completion of current iteration. */
#define XC_SAVE_ID_HVM_ACPI_IOPORTS_LOCATION -10
+#define XC_SAVE_ID_HVM_VIRIDIAN -11

/*
** We process save/restore/migrate in batches of pages; the below
diff -r 0a0c02a61676 -r a3c5e87c73a9 xen/arch/x86/hvm/viridian.c
--- a/xen/arch/x86/hvm/viridian.c Mon Nov 21 21:28:34 2011 +0000
+++ b/xen/arch/x86/hvm/viridian.c Fri Nov 25 14:50:51 2011 +0000
@@ -206,8 +206,11 @@ int wrmsr_viridian_regs(uint32_t idx, ui
struct vcpu *v = current;
struct domain *d = v->domain;

- if ( !is_viridian_domain(d) )
+ if ( !is_viridian_domain(d) ) {
+ gdprintk(XENLOG_WARNING, "%s: %d not a viridian domain\n", __func__,
+ d->domain_id);
return 0;
+ }

switch ( idx )
{
@@ -271,8 +274,11 @@ int rdmsr_viridian_regs(uint32_t idx, ui
struct vcpu *v = current;
struct domain *d = v->domain;

- if ( !is_viridian_domain(d) )
+ if ( !is_viridian_domain(d) ) {
+ gdprintk(XENLOG_WARNING, "%s: %d not a viridian domain\n", __func__,
+ d->domain_id);
return 0;
+ }

switch ( idx )
{
@@ -411,6 +417,8 @@ static int viridian_load_domain_ctxt(str
if ( hvm_load_entry(VIRIDIAN_DOMAIN, h, &ctxt) != 0 )
return -EINVAL;

+ ASSERT(is_viridian_domain(d));
+
d->arch.hvm_domain.viridian.hypercall_gpa.raw = ctxt.hypercall_gpa;
d->arch.hvm_domain.viridian.guest_os_id.raw = ctxt.guest_os_id;

@@ -455,6 +463,8 @@ static int viridian_load_vcpu_ctxt(struc
if ( hvm_load_entry(VIRIDIAN_VCPU, h, &ctxt) != 0 )
return -EINVAL;

+ ASSERT(is_viridian_domain(d));
+
v->arch.hvm_vcpu.viridian.apic_assist.raw = ctxt.apic_assist;

return 0;

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel
Re: [PATCH] Fix save/restore for HVM domains with viridian=1 [ In reply to ]
>>> On 25.11.11 at 15:50, Paul Durrant <paul.durrant@citrix.com> wrote:
> # HG changeset patch
> # User Paul Durrant <paul.durrant@citrix.com>
> # Date 1322232651 0
> # Node ID a3c5e87c73a991861fcdd9a5a1eb8ebc635a2d09
> # Parent 0a0c02a616768bfab16c072788cb76be1893c37f
> Fix save/restore for HVM domains with viridian=1
>
> xc_domain_save/restore currently pay no attention to HVM_PARAM_VIRIDIAN
> which
> results in an HVM domain running a recent version on Windows (post-Vista)
> locking up on a domain restore due to EOIs (done via a viridian MSR write)
> being silently dropped.
> This patch adds an extra save entry for the viridian parameter and also
> adds code in the viridian kernel module to catch attempted use of viridian
> functionality when the HVM parameter has not been set.

Assuming this means the changes to {rd,wr}msr_viridian_regs(), isn't
this going to needlessly spam the log? I.e. printing this just once (per
VM) would seem to suffice. Or alternatively I would think that
HVM_DBG_LOG() would be a better choice here.

Jan

> Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
>
> diff -r 0a0c02a61676 -r a3c5e87c73a9 tools/libxc/xc_domain_restore.c
> --- a/tools/libxc/xc_domain_restore.c Mon Nov 21 21:28:34 2011 +0000
> +++ b/tools/libxc/xc_domain_restore.c Fri Nov 25 14:50:51 2011 +0000
> @@ -675,6 +675,7 @@ typedef struct {
> uint64_t vm86_tss;
> uint64_t console_pfn;
> uint64_t acpi_ioport_location;
> + uint64_t viridian;
> } pagebuf_t;
>
> static int pagebuf_init(pagebuf_t* buf)
> @@ -809,6 +810,16 @@ static int pagebuf_get_one(xc_interface
> }
> return pagebuf_get_one(xch, ctx, buf, fd, dom);
>
> + case XC_SAVE_ID_HVM_VIRIDIAN:
> + /* Skip padding 4 bytes then read the acpi ioport location. */
> + if ( RDEXACT(fd, &buf->viridian, sizeof(uint32_t)) ||
> + RDEXACT(fd, &buf->viridian, sizeof(uint64_t)) )
> + {
> + PERROR("error read the viridian flag");
> + return -1;
> + }
> + return pagebuf_get_one(xch, ctx, buf, fd, dom);
> +
> default:
> if ( (count > MAX_BATCH_SIZE) || (count < 0) ) {
> ERROR("Max batch size exceeded (%d). Giving up.", count);
> @@ -1440,6 +1451,9 @@ int xc_domain_restore(xc_interface *xch,
> fcntl(io_fd, F_SETFL, orig_io_fd_flags | O_NONBLOCK);
> }
>
> + if (pagebuf.viridian != 0)
> + xc_set_hvm_param(xch, dom, HVM_PARAM_VIRIDIAN, 1);
> +
> if (pagebuf.acpi_ioport_location == 1) {
> DBGPRINTF("Use new firmware ioport from the checkpoint\n");
> xc_set_hvm_param(xch, dom, HVM_PARAM_ACPI_IOPORTS_LOCATION, 1);
> diff -r 0a0c02a61676 -r a3c5e87c73a9 tools/libxc/xc_domain_save.c
> --- a/tools/libxc/xc_domain_save.c Mon Nov 21 21:28:34 2011 +0000
> +++ b/tools/libxc/xc_domain_save.c Fri Nov 25 14:50:51 2011 +0000
> @@ -1506,6 +1506,18 @@ int xc_domain_save(xc_interface *xch, in
> PERROR("Error when writing the firmware ioport version");
> goto out;
> }
> +
> + chunk.id = XC_SAVE_ID_HVM_VIRIDIAN;
> + chunk.data = 0;
> + xc_get_hvm_param(xch, dom, HVM_PARAM_VIRIDIAN,
> + (unsigned long *)&chunk.data);
> +
> + if ( (chunk.data != 0) &&
> + wrexact(io_fd, &chunk, sizeof(chunk)) )
> + {
> + PERROR("Error when writing the viridian flag");
> + goto out;
> + }
> }
>
> if ( !callbacks->checkpoint )
> diff -r 0a0c02a61676 -r a3c5e87c73a9 tools/libxc/xg_save_restore.h
> --- a/tools/libxc/xg_save_restore.h Mon Nov 21 21:28:34 2011 +0000
> +++ b/tools/libxc/xg_save_restore.h Fri Nov 25 14:50:51 2011 +0000
> @@ -134,6 +134,7 @@
> #define XC_SAVE_ID_HVM_CONSOLE_PFN -8 /* (HVM-only) */
> #define XC_SAVE_ID_LAST_CHECKPOINT -9 /* Commit to restoring after
> completion of current iteration. */
> #define XC_SAVE_ID_HVM_ACPI_IOPORTS_LOCATION -10
> +#define XC_SAVE_ID_HVM_VIRIDIAN -11
>
> /*
> ** We process save/restore/migrate in batches of pages; the below
> diff -r 0a0c02a61676 -r a3c5e87c73a9 xen/arch/x86/hvm/viridian.c
> --- a/xen/arch/x86/hvm/viridian.c Mon Nov 21 21:28:34 2011 +0000
> +++ b/xen/arch/x86/hvm/viridian.c Fri Nov 25 14:50:51 2011 +0000
> @@ -206,8 +206,11 @@ int wrmsr_viridian_regs(uint32_t idx, ui
> struct vcpu *v = current;
> struct domain *d = v->domain;
>
> - if ( !is_viridian_domain(d) )
> + if ( !is_viridian_domain(d) ) {
> + gdprintk(XENLOG_WARNING, "%s: %d not a viridian domain\n",
> __func__,
> + d->domain_id);
> return 0;
> + }
>
> switch ( idx )
> {
> @@ -271,8 +274,11 @@ int rdmsr_viridian_regs(uint32_t idx, ui
> struct vcpu *v = current;
> struct domain *d = v->domain;
>
> - if ( !is_viridian_domain(d) )
> + if ( !is_viridian_domain(d) ) {
> + gdprintk(XENLOG_WARNING, "%s: %d not a viridian domain\n",
> __func__,
> + d->domain_id);
> return 0;
> + }
>
> switch ( idx )
> {
> @@ -411,6 +417,8 @@ static int viridian_load_domain_ctxt(str
> if ( hvm_load_entry(VIRIDIAN_DOMAIN, h, &ctxt) != 0 )
> return -EINVAL;
>
> + ASSERT(is_viridian_domain(d));
> +
> d->arch.hvm_domain.viridian.hypercall_gpa.raw = ctxt.hypercall_gpa;
> d->arch.hvm_domain.viridian.guest_os_id.raw = ctxt.guest_os_id;
>
> @@ -455,6 +463,8 @@ static int viridian_load_vcpu_ctxt(struc
> if ( hvm_load_entry(VIRIDIAN_VCPU, h, &ctxt) != 0 )
> return -EINVAL;
>
> + ASSERT(is_viridian_domain(d));
> +
> v->arch.hvm_vcpu.viridian.apic_assist.raw = ctxt.apic_assist;
>
> return 0;
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xensource.com
> http://lists.xensource.com/xen-devel



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel
Re: [PATCH] Fix save/restore for HVM domains with viridian=1 [ In reply to ]
At 14:50 +0000 on 25 Nov (1322232658), Paul Durrant wrote:
> # HG changeset patch
> # User Paul Durrant <paul.durrant@citrix.com>
> # Date 1322232651 0
> # Node ID a3c5e87c73a991861fcdd9a5a1eb8ebc635a2d09
> # Parent 0a0c02a616768bfab16c072788cb76be1893c37f
> Fix save/restore for HVM domains with viridian=1
>
> xc_domain_save/restore currently pay no attention to HVM_PARAM_VIRIDIAN which
> results in an HVM domain running a recent version on Windows (post-Vista)
> locking up on a domain restore due to EOIs (done via a viridian MSR write)
> being silently dropped.
> This patch adds an extra save entry for the viridian parameter and also
> adds code in the viridian kernel module to catch attempted use of viridian
> functionality when the HVM parameter has not been set.
>
> Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
>
> diff -r 0a0c02a61676 -r a3c5e87c73a9 tools/libxc/xc_domain_restore.c
> --- a/tools/libxc/xc_domain_restore.c Mon Nov 21 21:28:34 2011 +0000
> +++ b/tools/libxc/xc_domain_restore.c Fri Nov 25 14:50:51 2011 +0000
> @@ -675,6 +675,7 @@ typedef struct {
> uint64_t vm86_tss;
> uint64_t console_pfn;
> uint64_t acpi_ioport_location;
> + uint64_t viridian;
> } pagebuf_t;
>
> static int pagebuf_init(pagebuf_t* buf)
> @@ -809,6 +810,16 @@ static int pagebuf_get_one(xc_interface
> }
> return pagebuf_get_one(xch, ctx, buf, fd, dom);
>
> + case XC_SAVE_ID_HVM_VIRIDIAN:
> + /* Skip padding 4 bytes then read the acpi ioport location. */

Ahem. ^^^^^^^^^^^^^^^^^^^^

> + if ( RDEXACT(fd, &buf->viridian, sizeof(uint32_t)) ||
> + RDEXACT(fd, &buf->viridian, sizeof(uint64_t)) )
> + {
> + PERROR("error read the viridian flag");
> + return -1;
> + }
> + return pagebuf_get_one(xch, ctx, buf, fd, dom);
> +
> default:
> if ( (count > MAX_BATCH_SIZE) || (count < 0) ) {
> ERROR("Max batch size exceeded (%d). Giving up.", count);
> @@ -1440,6 +1451,9 @@ int xc_domain_restore(xc_interface *xch,
> fcntl(io_fd, F_SETFL, orig_io_fd_flags | O_NONBLOCK);
> }
>
> + if (pagebuf.viridian != 0)
> + xc_set_hvm_param(xch, dom, HVM_PARAM_VIRIDIAN, 1);
> +
> if (pagebuf.acpi_ioport_location == 1) {
> DBGPRINTF("Use new firmware ioport from the checkpoint\n");
> xc_set_hvm_param(xch, dom, HVM_PARAM_ACPI_IOPORTS_LOCATION, 1);
> diff -r 0a0c02a61676 -r a3c5e87c73a9 tools/libxc/xc_domain_save.c
> --- a/tools/libxc/xc_domain_save.c Mon Nov 21 21:28:34 2011 +0000
> +++ b/tools/libxc/xc_domain_save.c Fri Nov 25 14:50:51 2011 +0000
> @@ -1506,6 +1506,18 @@ int xc_domain_save(xc_interface *xch, in
> PERROR("Error when writing the firmware ioport version");
> goto out;
> }
> +
> + chunk.id = XC_SAVE_ID_HVM_VIRIDIAN;
> + chunk.data = 0;
> + xc_get_hvm_param(xch, dom, HVM_PARAM_VIRIDIAN,
> + (unsigned long *)&chunk.data);
> +
> + if ( (chunk.data != 0) &&
> + wrexact(io_fd, &chunk, sizeof(chunk)) )
> + {
> + PERROR("Error when writing the viridian flag");
> + goto out;
> + }
> }
>
> if ( !callbacks->checkpoint )
> diff -r 0a0c02a61676 -r a3c5e87c73a9 tools/libxc/xg_save_restore.h
> --- a/tools/libxc/xg_save_restore.h Mon Nov 21 21:28:34 2011 +0000
> +++ b/tools/libxc/xg_save_restore.h Fri Nov 25 14:50:51 2011 +0000
> @@ -134,6 +134,7 @@
> #define XC_SAVE_ID_HVM_CONSOLE_PFN -8 /* (HVM-only) */
> #define XC_SAVE_ID_LAST_CHECKPOINT -9 /* Commit to restoring after completion of current iteration. */
> #define XC_SAVE_ID_HVM_ACPI_IOPORTS_LOCATION -10
> +#define XC_SAVE_ID_HVM_VIRIDIAN -11
>
> /*
> ** We process save/restore/migrate in batches of pages; the below
> diff -r 0a0c02a61676 -r a3c5e87c73a9 xen/arch/x86/hvm/viridian.c
> --- a/xen/arch/x86/hvm/viridian.c Mon Nov 21 21:28:34 2011 +0000
> +++ b/xen/arch/x86/hvm/viridian.c Fri Nov 25 14:50:51 2011 +0000
> @@ -206,8 +206,11 @@ int wrmsr_viridian_regs(uint32_t idx, ui
> struct vcpu *v = current;
> struct domain *d = v->domain;
>
> - if ( !is_viridian_domain(d) )
> + if ( !is_viridian_domain(d) ) {
> + gdprintk(XENLOG_WARNING, "%s: %d not a viridian domain\n", __func__,
> + d->domain_id);

This should probably be rate-limited; a confused domain could cause a
_lot_ of these. Might we want to pause/kill the domain as well, or
inject a fault?

> return 0;
> + }
>
> switch ( idx )
> {
> @@ -271,8 +274,11 @@ int rdmsr_viridian_regs(uint32_t idx, ui
> struct vcpu *v = current;
> struct domain *d = v->domain;
>
> - if ( !is_viridian_domain(d) )
> + if ( !is_viridian_domain(d) ) {
> + gdprintk(XENLOG_WARNING, "%s: %d not a viridian domain\n", __func__,
> + d->domain_id);

Likewise.

> return 0;
> + }
>
> switch ( idx )
> {
> @@ -411,6 +417,8 @@ static int viridian_load_domain_ctxt(str
> if ( hvm_load_entry(VIRIDIAN_DOMAIN, h, &ctxt) != 0 )
> return -EINVAL;
>
> + ASSERT(is_viridian_domain(d));
> +

I don't think it's appropriate to crash Xen if the save file is bogus.

> d->arch.hvm_domain.viridian.hypercall_gpa.raw = ctxt.hypercall_gpa;
> d->arch.hvm_domain.viridian.guest_os_id.raw = ctxt.guest_os_id;
>
> @@ -455,6 +463,8 @@ static int viridian_load_vcpu_ctxt(struc
> if ( hvm_load_entry(VIRIDIAN_VCPU, h, &ctxt) != 0 )
> return -EINVAL;
>
> + ASSERT(is_viridian_domain(d));
> +

Likewise.

Tim.

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel
Re: [PATCH] Fix save/restore for HVM domains with viridian=1 [ In reply to ]
> -----Original Message-----
> From: Tim Deegan [mailto:tim@xen.org]
> Sent: 25 November 2011 15:01
> To: Paul Durrant
> Cc: xen-devel@lists.xensource.com
> Subject: Re: [Xen-devel] [PATCH] Fix save/restore for HVM domains
> with viridian=1
>
> At 14:50 +0000 on 25 Nov (1322232658), Paul Durrant wrote:
[snip]
> >
> > + case XC_SAVE_ID_HVM_VIRIDIAN:
> > + /* Skip padding 4 bytes then read the acpi ioport
> location.
> > + */
>
> Ahem.
> ^^^^^^^^^^^^^^^^^^^^
>

Yes, whoops.

[snip]
> > @@ -206,8 +206,11 @@ int wrmsr_viridian_regs(uint32_t idx, ui
> > struct vcpu *v = current;
> > struct domain *d = v->domain;
> >
> > - if ( !is_viridian_domain(d) )
> > + if ( !is_viridian_domain(d) ) {
> > + gdprintk(XENLOG_WARNING, "%s: %d not a viridian
> domain\n", __func__,
> > + d->domain_id);
>
> This should probably be rate-limited; a confused domain could cause
> a _lot_ of these. Might we want to pause/kill the domain as well,
> or inject a fault?
>

Yes, true.

> > return 0;
> > + }
> >
> > switch ( idx )
> > {
> > @@ -271,8 +274,11 @@ int rdmsr_viridian_regs(uint32_t idx, ui
> > struct vcpu *v = current;
> > struct domain *d = v->domain;
> >
> > - if ( !is_viridian_domain(d) )
> > + if ( !is_viridian_domain(d) ) {
> > + gdprintk(XENLOG_WARNING, "%s: %d not a viridian
> domain\n", __func__,
> > + d->domain_id);
>
> Likewise.
>
> > return 0;
> > + }
> >
> > switch ( idx )
> > {
> > @@ -411,6 +417,8 @@ static int viridian_load_domain_ctxt(str
> > if ( hvm_load_entry(VIRIDIAN_DOMAIN, h, &ctxt) != 0 )
> > return -EINVAL;
> >
> > + ASSERT(is_viridian_domain(d));
> > +
>
> I don't think it's appropriate to crash Xen if the save file is
> bogus.
>

There's a similar ASSERT in the hypercall function anyway; would you rather I turned that into a rate limited warning too?

Paul

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel
Re: [PATCH] Fix save/restore for HVM domains with viridian=1 [ In reply to ]
On 25/11/2011 14:58, "Jan Beulich" <JBeulich@suse.com> wrote:

>> xc_domain_save/restore currently pay no attention to HVM_PARAM_VIRIDIAN
>> which
>> results in an HVM domain running a recent version on Windows (post-Vista)
>> locking up on a domain restore due to EOIs (done via a viridian MSR write)
>> being silently dropped.
>> This patch adds an extra save entry for the viridian parameter and also
>> adds code in the viridian kernel module to catch attempted use of viridian
>> functionality when the HVM parameter has not been set.
>
> Assuming this means the changes to {rd,wr}msr_viridian_regs(), isn't
> this going to needlessly spam the log? I.e. printing this just once (per
> VM) would seem to suffice. Or alternatively I would think that
> HVM_DBG_LOG() would be a better choice here.

It's unlikely to print more than once before the VM wedges. We could go
further and inject #GP, which we ought to be doing anyway.

-- Keir



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel
Re: [PATCH] Fix save/restore for HVM domains with viridian=1 [ In reply to ]
On 25/11/2011 15:00, "Tim Deegan" <tim@xen.org> wrote:

>> - if ( !is_viridian_domain(d) )
>> + if ( !is_viridian_domain(d) ) {
>> + gdprintk(XENLOG_WARNING, "%s: %d not a viridian domain\n", __func__,
>> + d->domain_id);
>
> This should probably be rate-limited; a confused domain could cause a
> _lot_ of these. Might we want to pause/kill the domain as well, or
> inject a fault?

We do XENLOG_WARNING logging on various paths that silently fail (e.g.,
weird writes to the VIOAPIC). We should probably just #GP, that will silence
the guest pretty quickly. I can make that change.

-- Keir



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel
Re: [PATCH] Fix save/restore for HVM domains with viridian=1 [ In reply to ]
At 15:14 +0000 on 25 Nov (1322234058), Paul Durrant wrote:
> > > @@ -411,6 +417,8 @@ static int viridian_load_domain_ctxt(str
> > > if ( hvm_load_entry(VIRIDIAN_DOMAIN, h, &ctxt) != 0 )
> > > return -EINVAL;
> > >
> > > + ASSERT(is_viridian_domain(d));
> > > +
> >
> > I don't think it's appropriate to crash Xen if the save file is
> > bogus.
> >
>
> There's a similar ASSERT in the hypercall function anyway; would you rather I turned that into a rate limited warning too?
>

If you mean the one in viridian_hypercall(), no - if that function is
called for a non-viridian VM that's a bug in Xen so the ASSERT() is
correct.

The viridian_load_*_ctxt() functions are called based on the HVM save
record the tools gave us, so we should at most return an error if they
lead us astray. And I think it should be OK to load the HVM state and then
set the HVM params, so the current (lack of) response is correct.

Cheers,

Tim.

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel
Re: [PATCH] Fix save/restore for HVM domains with viridian=1 [ In reply to ]
Yes, I forgot about #GP. Windows 7 unfortunately handles and but doesn't complain. I'll ditch the logging.

Paul

> -----Original Message-----
> From: Keir Fraser [mailto:keir.xen@gmail.com]
> Sent: 25 November 2011 15:18
> To: Tim (Xen.org); Paul Durrant
> Cc: xen-devel@lists.xensource.com
> Subject: Re: [Xen-devel] [PATCH] Fix save/restore for HVM domains
> with viridian=1
>
> On 25/11/2011 15:00, "Tim Deegan" <tim@xen.org> wrote:
>
> >> - if ( !is_viridian_domain(d) )
> >> + if ( !is_viridian_domain(d) ) {
> >> + gdprintk(XENLOG_WARNING, "%s: %d not a viridian
> domain\n", __func__,
> >> + d->domain_id);
> >
> > This should probably be rate-limited; a confused domain could
> cause a
> > _lot_ of these. Might we want to pause/kill the domain as well,
> or
> > inject a fault?
>
> We do XENLOG_WARNING logging on various paths that silently fail
> (e.g., weird writes to the VIOAPIC). We should probably just #GP,
> that will silence the guest pretty quickly. I can make that change.
>
> -- Keir
>


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel
[PATCH] Fix save/restore for HVM domains with viridian=1 [ In reply to ]
# HG changeset patch
# User Paul Durrant <paul.durrant@citrix.com>
# Date 1322234894 0
# Node ID b50479d64deea72b11008fd444aa951737b2573d
# Parent 0a0c02a616768bfab16c072788cb76be1893c37f
Fix save/restore for HVM domains with viridian=1

xc_domain_save/restore currently pay no attention to HVM_PARAM_VIRIDIAN which
results in an HVM domain running a recent version on Windows (post-Vista)
locking up on a domain restore due to EOIs (done via a viridian MSR write)
being silently dropped.
This patch adds an extra save entry for the viridian parameter and also
adds code in the viridian kernel module to catch attempted use of viridian
functionality when the HVM parameter has not been set.

Signed-off-by: Paul Durrant <paul.durrant@citrix.com>

diff -r 0a0c02a61676 -r b50479d64dee tools/libxc/xc_domain_restore.c
--- a/tools/libxc/xc_domain_restore.c Mon Nov 21 21:28:34 2011 +0000
+++ b/tools/libxc/xc_domain_restore.c Fri Nov 25 15:28:14 2011 +0000
@@ -675,6 +675,7 @@ typedef struct {
uint64_t vm86_tss;
uint64_t console_pfn;
uint64_t acpi_ioport_location;
+ uint64_t viridian;
} pagebuf_t;

static int pagebuf_init(pagebuf_t* buf)
@@ -809,6 +810,16 @@ static int pagebuf_get_one(xc_interface
}
return pagebuf_get_one(xch, ctx, buf, fd, dom);

+ case XC_SAVE_ID_HVM_VIRIDIAN:
+ /* Skip padding 4 bytes then read the viridian flag. */
+ if ( RDEXACT(fd, &buf->viridian, sizeof(uint32_t)) ||
+ RDEXACT(fd, &buf->viridian, sizeof(uint64_t)) )
+ {
+ PERROR("error read the viridian flag");
+ return -1;
+ }
+ return pagebuf_get_one(xch, ctx, buf, fd, dom);
+
default:
if ( (count > MAX_BATCH_SIZE) || (count < 0) ) {
ERROR("Max batch size exceeded (%d). Giving up.", count);
@@ -1440,6 +1451,9 @@ int xc_domain_restore(xc_interface *xch,
fcntl(io_fd, F_SETFL, orig_io_fd_flags | O_NONBLOCK);
}

+ if (pagebuf.viridian != 0)
+ xc_set_hvm_param(xch, dom, HVM_PARAM_VIRIDIAN, 1);
+
if (pagebuf.acpi_ioport_location == 1) {
DBGPRINTF("Use new firmware ioport from the checkpoint\n");
xc_set_hvm_param(xch, dom, HVM_PARAM_ACPI_IOPORTS_LOCATION, 1);
diff -r 0a0c02a61676 -r b50479d64dee tools/libxc/xc_domain_save.c
--- a/tools/libxc/xc_domain_save.c Mon Nov 21 21:28:34 2011 +0000
+++ b/tools/libxc/xc_domain_save.c Fri Nov 25 15:28:14 2011 +0000
@@ -1506,6 +1506,18 @@ int xc_domain_save(xc_interface *xch, in
PERROR("Error when writing the firmware ioport version");
goto out;
}
+
+ chunk.id = XC_SAVE_ID_HVM_VIRIDIAN;
+ chunk.data = 0;
+ xc_get_hvm_param(xch, dom, HVM_PARAM_VIRIDIAN,
+ (unsigned long *)&chunk.data);
+
+ if ( (chunk.data != 0) &&
+ wrexact(io_fd, &chunk, sizeof(chunk)) )
+ {
+ PERROR("Error when writing the viridian flag");
+ goto out;
+ }
}

if ( !callbacks->checkpoint )
diff -r 0a0c02a61676 -r b50479d64dee tools/libxc/xg_save_restore.h
--- a/tools/libxc/xg_save_restore.h Mon Nov 21 21:28:34 2011 +0000
+++ b/tools/libxc/xg_save_restore.h Fri Nov 25 15:28:14 2011 +0000
@@ -134,6 +134,7 @@
#define XC_SAVE_ID_HVM_CONSOLE_PFN -8 /* (HVM-only) */
#define XC_SAVE_ID_LAST_CHECKPOINT -9 /* Commit to restoring after completion of current iteration. */
#define XC_SAVE_ID_HVM_ACPI_IOPORTS_LOCATION -10
+#define XC_SAVE_ID_HVM_VIRIDIAN -11

/*
** We process save/restore/migrate in batches of pages; the below

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel
[PATCH] Fix save/restore for HVM domains with viridian=1 [ In reply to ]
# HG changeset patch
# User Paul Durrant <paul.durrant@citrix.com>
# Date 1322234894 0
# Node ID b50479d64deea72b11008fd444aa951737b2573d
# Parent 0a0c02a616768bfab16c072788cb76be1893c37f
Fix save/restore for HVM domains with viridian=1

xc_domain_save/restore currently pay no attention to HVM_PARAM_VIRIDIAN which
results in an HVM domain running a recent version on Windows (post-Vista)
locking up on a domain restore due to EOIs (done via a viridian MSR write)
being silently dropped.
This patch adds an extra save entry for the viridian parameter and also
adds code in the viridian kernel module to catch attempted use of viridian
functionality when the HVM parameter has not been set.

Signed-off-by: Paul Durrant <paul.durrant@citrix.com>

diff -r 0a0c02a61676 -r b50479d64dee tools/libxc/xc_domain_restore.c
--- a/tools/libxc/xc_domain_restore.c Mon Nov 21 21:28:34 2011 +0000
+++ b/tools/libxc/xc_domain_restore.c Fri Nov 25 15:28:14 2011 +0000
@@ -675,6 +675,7 @@ typedef struct {
uint64_t vm86_tss;
uint64_t console_pfn;
uint64_t acpi_ioport_location;
+ uint64_t viridian;
} pagebuf_t;

static int pagebuf_init(pagebuf_t* buf)
@@ -809,6 +810,16 @@ static int pagebuf_get_one(xc_interface
}
return pagebuf_get_one(xch, ctx, buf, fd, dom);

+ case XC_SAVE_ID_HVM_VIRIDIAN:
+ /* Skip padding 4 bytes then read the viridian flag. */
+ if ( RDEXACT(fd, &buf->viridian, sizeof(uint32_t)) ||
+ RDEXACT(fd, &buf->viridian, sizeof(uint64_t)) )
+ {
+ PERROR("error read the viridian flag");
+ return -1;
+ }
+ return pagebuf_get_one(xch, ctx, buf, fd, dom);
+
default:
if ( (count > MAX_BATCH_SIZE) || (count < 0) ) {
ERROR("Max batch size exceeded (%d). Giving up.", count);
@@ -1440,6 +1451,9 @@ int xc_domain_restore(xc_interface *xch,
fcntl(io_fd, F_SETFL, orig_io_fd_flags | O_NONBLOCK);
}

+ if (pagebuf.viridian != 0)
+ xc_set_hvm_param(xch, dom, HVM_PARAM_VIRIDIAN, 1);
+
if (pagebuf.acpi_ioport_location == 1) {
DBGPRINTF("Use new firmware ioport from the checkpoint\n");
xc_set_hvm_param(xch, dom, HVM_PARAM_ACPI_IOPORTS_LOCATION, 1);
diff -r 0a0c02a61676 -r b50479d64dee tools/libxc/xc_domain_save.c
--- a/tools/libxc/xc_domain_save.c Mon Nov 21 21:28:34 2011 +0000
+++ b/tools/libxc/xc_domain_save.c Fri Nov 25 15:28:14 2011 +0000
@@ -1506,6 +1506,18 @@ int xc_domain_save(xc_interface *xch, in
PERROR("Error when writing the firmware ioport version");
goto out;
}
+
+ chunk.id = XC_SAVE_ID_HVM_VIRIDIAN;
+ chunk.data = 0;
+ xc_get_hvm_param(xch, dom, HVM_PARAM_VIRIDIAN,
+ (unsigned long *)&chunk.data);
+
+ if ( (chunk.data != 0) &&
+ wrexact(io_fd, &chunk, sizeof(chunk)) )
+ {
+ PERROR("Error when writing the viridian flag");
+ goto out;
+ }
}

if ( !callbacks->checkpoint )
diff -r 0a0c02a61676 -r b50479d64dee tools/libxc/xg_save_restore.h
--- a/tools/libxc/xg_save_restore.h Mon Nov 21 21:28:34 2011 +0000
+++ b/tools/libxc/xg_save_restore.h Fri Nov 25 15:28:14 2011 +0000
@@ -134,6 +134,7 @@
#define XC_SAVE_ID_HVM_CONSOLE_PFN -8 /* (HVM-only) */
#define XC_SAVE_ID_LAST_CHECKPOINT -9 /* Commit to restoring after completion of current iteration. */
#define XC_SAVE_ID_HVM_ACPI_IOPORTS_LOCATION -10
+#define XC_SAVE_ID_HVM_VIRIDIAN -11

/*
** We process save/restore/migrate in batches of pages; the below

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel
Re: [PATCH] Fix save/restore for HVM domains with viridian=1 [ In reply to ]
Sorry, ignore this one. I need to fix the comment.

Paul

> -----Original Message-----
> From: Paul Durrant [mailto:paul.durrant@citrix.com]
> Sent: 25 November 2011 15:29
> To: xen-devel@lists.xensource.com
> Cc: Paul Durrant
> Subject: [PATCH] Fix save/restore for HVM domains with viridian=1
>
> # HG changeset patch
> # User Paul Durrant <paul.durrant@citrix.com> # Date 1322234894 0 #
> Node ID b50479d64deea72b11008fd444aa951737b2573d
> # Parent 0a0c02a616768bfab16c072788cb76be1893c37f
> Fix save/restore for HVM domains with viridian=1
>
> xc_domain_save/restore currently pay no attention to
> HVM_PARAM_VIRIDIAN which results in an HVM domain running a recent
> version on Windows (post-Vista) locking up on a domain restore due
> to EOIs (done via a viridian MSR write) being silently dropped.
> This patch adds an extra save entry for the viridian parameter and
> also adds code in the viridian kernel module to catch attempted use
> of viridian functionality when the HVM parameter has not been set.
>
> Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
>
> diff -r 0a0c02a61676 -r b50479d64dee tools/libxc/xc_domain_restore.c
> --- a/tools/libxc/xc_domain_restore.c Mon Nov 21 21:28:34 2011
> +0000
> +++ b/tools/libxc/xc_domain_restore.c Fri Nov 25 15:28:14 2011
> +0000
> @@ -675,6 +675,7 @@ typedef struct {
> uint64_t vm86_tss;
> uint64_t console_pfn;
> uint64_t acpi_ioport_location;
> + uint64_t viridian;
> } pagebuf_t;
>
> static int pagebuf_init(pagebuf_t* buf) @@ -809,6 +810,16 @@ static
> int pagebuf_get_one(xc_interface
> }
> return pagebuf_get_one(xch, ctx, buf, fd, dom);
>
> + case XC_SAVE_ID_HVM_VIRIDIAN:
> + /* Skip padding 4 bytes then read the viridian flag. */
> + if ( RDEXACT(fd, &buf->viridian, sizeof(uint32_t)) ||
> + RDEXACT(fd, &buf->viridian, sizeof(uint64_t)) )
> + {
> + PERROR("error read the viridian flag");
> + return -1;
> + }
> + return pagebuf_get_one(xch, ctx, buf, fd, dom);
> +
> default:
> if ( (count > MAX_BATCH_SIZE) || (count < 0) ) {
> ERROR("Max batch size exceeded (%d). Giving up.",
> count); @@ -1440,6 +1451,9 @@ int xc_domain_restore(xc_interface
> *xch,
> fcntl(io_fd, F_SETFL, orig_io_fd_flags | O_NONBLOCK);
> }
>
> + if (pagebuf.viridian != 0)
> + xc_set_hvm_param(xch, dom, HVM_PARAM_VIRIDIAN, 1);
> +
> if (pagebuf.acpi_ioport_location == 1) {
> DBGPRINTF("Use new firmware ioport from the checkpoint\n");
> xc_set_hvm_param(xch, dom, HVM_PARAM_ACPI_IOPORTS_LOCATION,
> 1); diff -r 0a0c02a61676 -r b50479d64dee
> tools/libxc/xc_domain_save.c
> --- a/tools/libxc/xc_domain_save.c Mon Nov 21 21:28:34 2011 +0000
> +++ b/tools/libxc/xc_domain_save.c Fri Nov 25 15:28:14 2011 +0000
> @@ -1506,6 +1506,18 @@ int xc_domain_save(xc_interface *xch, in
> PERROR("Error when writing the firmware ioport
> version");
> goto out;
> }
> +
> + chunk.id = XC_SAVE_ID_HVM_VIRIDIAN;
> + chunk.data = 0;
> + xc_get_hvm_param(xch, dom, HVM_PARAM_VIRIDIAN,
> + (unsigned long *)&chunk.data);
> +
> + if ( (chunk.data != 0) &&
> + wrexact(io_fd, &chunk, sizeof(chunk)) )
> + {
> + PERROR("Error when writing the viridian flag");
> + goto out;
> + }
> }
>
> if ( !callbacks->checkpoint )
> diff -r 0a0c02a61676 -r b50479d64dee tools/libxc/xg_save_restore.h
> --- a/tools/libxc/xg_save_restore.h Mon Nov 21 21:28:34 2011
> +0000
> +++ b/tools/libxc/xg_save_restore.h Fri Nov 25 15:28:14 2011
> +0000
> @@ -134,6 +134,7 @@
> #define XC_SAVE_ID_HVM_CONSOLE_PFN -8 /* (HVM-only) */
> #define XC_SAVE_ID_LAST_CHECKPOINT -9 /* Commit to restoring
> after completion of current iteration. */
> #define XC_SAVE_ID_HVM_ACPI_IOPORTS_LOCATION -10
> +#define XC_SAVE_ID_HVM_VIRIDIAN -11
>
> /*
> ** We process save/restore/migrate in batches of pages; the below

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel
[PATCH] Fix save/restore for HVM domains with viridian=1 [ In reply to ]
# HG changeset patch
# User Paul Durrant <paul.durrant@citrix.com>
# Date 1322235040 0
# Node ID 2a8ef49f60cfe5a6c4c9f434af472ab58a125a7a
# Parent 0a0c02a616768bfab16c072788cb76be1893c37f
Fix save/restore for HVM domains with viridian=1

xc_domain_save/restore currently pay no attention to HVM_PARAM_VIRIDIAN which
results in an HVM domain running a recent version on Windows (post-Vista)
locking up on a domain restore due to EOIs (done via a viridian MSR write)
being silently dropped.
This patch adds an extra save entry for the viridian parameter.

Signed-off-by: Paul Durrant <paul.durrant@citrix.com>

diff -r 0a0c02a61676 -r 2a8ef49f60cf tools/libxc/xc_domain_restore.c
--- a/tools/libxc/xc_domain_restore.c Mon Nov 21 21:28:34 2011 +0000
+++ b/tools/libxc/xc_domain_restore.c Fri Nov 25 15:30:40 2011 +0000
@@ -675,6 +675,7 @@ typedef struct {
uint64_t vm86_tss;
uint64_t console_pfn;
uint64_t acpi_ioport_location;
+ uint64_t viridian;
} pagebuf_t;

static int pagebuf_init(pagebuf_t* buf)
@@ -809,6 +810,16 @@ static int pagebuf_get_one(xc_interface
}
return pagebuf_get_one(xch, ctx, buf, fd, dom);

+ case XC_SAVE_ID_HVM_VIRIDIAN:
+ /* Skip padding 4 bytes then read the viridian flag. */
+ if ( RDEXACT(fd, &buf->viridian, sizeof(uint32_t)) ||
+ RDEXACT(fd, &buf->viridian, sizeof(uint64_t)) )
+ {
+ PERROR("error read the viridian flag");
+ return -1;
+ }
+ return pagebuf_get_one(xch, ctx, buf, fd, dom);
+
default:
if ( (count > MAX_BATCH_SIZE) || (count < 0) ) {
ERROR("Max batch size exceeded (%d). Giving up.", count);
@@ -1440,6 +1451,9 @@ int xc_domain_restore(xc_interface *xch,
fcntl(io_fd, F_SETFL, orig_io_fd_flags | O_NONBLOCK);
}

+ if (pagebuf.viridian != 0)
+ xc_set_hvm_param(xch, dom, HVM_PARAM_VIRIDIAN, 1);
+
if (pagebuf.acpi_ioport_location == 1) {
DBGPRINTF("Use new firmware ioport from the checkpoint\n");
xc_set_hvm_param(xch, dom, HVM_PARAM_ACPI_IOPORTS_LOCATION, 1);
diff -r 0a0c02a61676 -r 2a8ef49f60cf tools/libxc/xc_domain_save.c
--- a/tools/libxc/xc_domain_save.c Mon Nov 21 21:28:34 2011 +0000
+++ b/tools/libxc/xc_domain_save.c Fri Nov 25 15:30:40 2011 +0000
@@ -1506,6 +1506,18 @@ int xc_domain_save(xc_interface *xch, in
PERROR("Error when writing the firmware ioport version");
goto out;
}
+
+ chunk.id = XC_SAVE_ID_HVM_VIRIDIAN;
+ chunk.data = 0;
+ xc_get_hvm_param(xch, dom, HVM_PARAM_VIRIDIAN,
+ (unsigned long *)&chunk.data);
+
+ if ( (chunk.data != 0) &&
+ wrexact(io_fd, &chunk, sizeof(chunk)) )
+ {
+ PERROR("Error when writing the viridian flag");
+ goto out;
+ }
}

if ( !callbacks->checkpoint )
diff -r 0a0c02a61676 -r 2a8ef49f60cf tools/libxc/xg_save_restore.h
--- a/tools/libxc/xg_save_restore.h Mon Nov 21 21:28:34 2011 +0000
+++ b/tools/libxc/xg_save_restore.h Fri Nov 25 15:30:40 2011 +0000
@@ -134,6 +134,7 @@
#define XC_SAVE_ID_HVM_CONSOLE_PFN -8 /* (HVM-only) */
#define XC_SAVE_ID_LAST_CHECKPOINT -9 /* Commit to restoring after completion of current iteration. */
#define XC_SAVE_ID_HVM_ACPI_IOPORTS_LOCATION -10
+#define XC_SAVE_ID_HVM_VIRIDIAN -11

/*
** We process save/restore/migrate in batches of pages; the below

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel
Re: [PATCH] Fix save/restore for HVM domains with viridian=1 [ In reply to ]
On 25/11/2011 15:30, "Paul Durrant" <paul.durrant@citrix.com> wrote:

> # HG changeset patch
> # User Paul Durrant <paul.durrant@citrix.com>
> # Date 1322235040 0
> # Node ID 2a8ef49f60cfe5a6c4c9f434af472ab58a125a7a
> # Parent 0a0c02a616768bfab16c072788cb76be1893c37f
> Fix save/restore for HVM domains with viridian=1
>
> xc_domain_save/restore currently pay no attention to HVM_PARAM_VIRIDIAN which
> results in an HVM domain running a recent version on Windows (post-Vista)
> locking up on a domain restore due to EOIs (done via a viridian MSR write)
> being silently dropped.
> This patch adds an extra save entry for the viridian parameter.

Ok, I did check in the previous with the intention of fixing up the Xen
bits. Is the best fix now agreed to just remove the Xen bits? :-) I can
check in a patch to do that if so.

-- Keir

> Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
>
> diff -r 0a0c02a61676 -r 2a8ef49f60cf tools/libxc/xc_domain_restore.c
> --- a/tools/libxc/xc_domain_restore.c Mon Nov 21 21:28:34 2011 +0000
> +++ b/tools/libxc/xc_domain_restore.c Fri Nov 25 15:30:40 2011 +0000
> @@ -675,6 +675,7 @@ typedef struct {
> uint64_t vm86_tss;
> uint64_t console_pfn;
> uint64_t acpi_ioport_location;
> + uint64_t viridian;
> } pagebuf_t;
>
> static int pagebuf_init(pagebuf_t* buf)
> @@ -809,6 +810,16 @@ static int pagebuf_get_one(xc_interface
> }
> return pagebuf_get_one(xch, ctx, buf, fd, dom);
>
> + case XC_SAVE_ID_HVM_VIRIDIAN:
> + /* Skip padding 4 bytes then read the viridian flag. */
> + if ( RDEXACT(fd, &buf->viridian, sizeof(uint32_t)) ||
> + RDEXACT(fd, &buf->viridian, sizeof(uint64_t)) )
> + {
> + PERROR("error read the viridian flag");
> + return -1;
> + }
> + return pagebuf_get_one(xch, ctx, buf, fd, dom);
> +
> default:
> if ( (count > MAX_BATCH_SIZE) || (count < 0) ) {
> ERROR("Max batch size exceeded (%d). Giving up.", count);
> @@ -1440,6 +1451,9 @@ int xc_domain_restore(xc_interface *xch,
> fcntl(io_fd, F_SETFL, orig_io_fd_flags | O_NONBLOCK);
> }
>
> + if (pagebuf.viridian != 0)
> + xc_set_hvm_param(xch, dom, HVM_PARAM_VIRIDIAN, 1);
> +
> if (pagebuf.acpi_ioport_location == 1) {
> DBGPRINTF("Use new firmware ioport from the checkpoint\n");
> xc_set_hvm_param(xch, dom, HVM_PARAM_ACPI_IOPORTS_LOCATION, 1);
> diff -r 0a0c02a61676 -r 2a8ef49f60cf tools/libxc/xc_domain_save.c
> --- a/tools/libxc/xc_domain_save.c Mon Nov 21 21:28:34 2011 +0000
> +++ b/tools/libxc/xc_domain_save.c Fri Nov 25 15:30:40 2011 +0000
> @@ -1506,6 +1506,18 @@ int xc_domain_save(xc_interface *xch, in
> PERROR("Error when writing the firmware ioport version");
> goto out;
> }
> +
> + chunk.id = XC_SAVE_ID_HVM_VIRIDIAN;
> + chunk.data = 0;
> + xc_get_hvm_param(xch, dom, HVM_PARAM_VIRIDIAN,
> + (unsigned long *)&chunk.data);
> +
> + if ( (chunk.data != 0) &&
> + wrexact(io_fd, &chunk, sizeof(chunk)) )
> + {
> + PERROR("Error when writing the viridian flag");
> + goto out;
> + }
> }
>
> if ( !callbacks->checkpoint )
> diff -r 0a0c02a61676 -r 2a8ef49f60cf tools/libxc/xg_save_restore.h
> --- a/tools/libxc/xg_save_restore.h Mon Nov 21 21:28:34 2011 +0000
> +++ b/tools/libxc/xg_save_restore.h Fri Nov 25 15:30:40 2011 +0000
> @@ -134,6 +134,7 @@
> #define XC_SAVE_ID_HVM_CONSOLE_PFN -8 /* (HVM-only) */
> #define XC_SAVE_ID_LAST_CHECKPOINT -9 /* Commit to restoring after
> completion of current iteration. */
> #define XC_SAVE_ID_HVM_ACPI_IOPORTS_LOCATION -10
> +#define XC_SAVE_ID_HVM_VIRIDIAN -11
>
> /*
> ** We process save/restore/migrate in batches of pages; the below
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xensource.com
> http://lists.xensource.com/xen-devel



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel
Re: [PATCH] Fix save/restore for HVM domains with viridian=1 [ In reply to ]
Keir,

Yes, just ditch the kernel change.

Paul

> -----Original Message-----
> From: Keir Fraser [mailto:keir.xen@gmail.com]
> Sent: 25 November 2011 15:40
> To: Paul Durrant; xen-devel@lists.xensource.com
> Subject: Re: [Xen-devel] [PATCH] Fix save/restore for HVM domains
> with viridian=1
>
> On 25/11/2011 15:30, "Paul Durrant" <paul.durrant@citrix.com> wrote:
>
> #
> > Node ID 2a8ef49f60cfe5a6c4c9f434af472ab58a125a7a
> > # Parent 0a0c02a616768bfab16c072788cb76be1893c37f
> > Fix save/restore for HVM domains with viridian=1
> >
> > xc_domain_save/restore currently pay no attention to
> > HVM_PARAM_VIRIDIAN which results in an HVM domain running a recent
> > version on Windows (post-Vista) locking up on a domain restore due
> to
> > EOIs (done via a viridian MSR write) being silently dropped.
> > This patch adds an extra save entry for the viridian parameter.
>
> Ok, I did check in the previous with the intention of fixing up the
> Xen bits. Is the best fix now agreed to just remove the Xen bits? :-
> ) I can check in a patch to do that if so.
>
> -- Keir
>
> > Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
> >
> > diff -r 0a0c02a61676 -r 2a8ef49f60cf
> tools/libxc/xc_domain_restore.c
> > --- a/tools/libxc/xc_domain_restore.c Mon Nov 21 21:28:34 2011
> +0000
> > +++ b/tools/libxc/xc_domain_restore.c Fri Nov 25 15:30:40 2011
> +0000
> > @@ -675,6 +675,7 @@ typedef struct {
> > uint64_t vm86_tss;
> > uint64_t console_pfn;
> > uint64_t acpi_ioport_location;
> > + uint64_t viridian;
> > } pagebuf_t;
> >
> > static int pagebuf_init(pagebuf_t* buf) @@ -809,6 +810,16 @@
> static
> > int pagebuf_get_one(xc_interface
> > }
> > return pagebuf_get_one(xch, ctx, buf, fd, dom);
> >
> > + case XC_SAVE_ID_HVM_VIRIDIAN:
> > + /* Skip padding 4 bytes then read the viridian flag. */
> > + if ( RDEXACT(fd, &buf->viridian, sizeof(uint32_t)) ||
> > + RDEXACT(fd, &buf->viridian, sizeof(uint64_t)) )
> > + {
> > + PERROR("error read the viridian flag");
> > + return -1;
> > + }
> > + return pagebuf_get_one(xch, ctx, buf, fd, dom);
> > +
> > default:
> > if ( (count > MAX_BATCH_SIZE) || (count < 0) ) {
> > ERROR("Max batch size exceeded (%d). Giving up.",
> count);
> > @@ -1440,6 +1451,9 @@ int xc_domain_restore(xc_interface *xch,
> > fcntl(io_fd, F_SETFL, orig_io_fd_flags | O_NONBLOCK);
> > }
> >
> > + if (pagebuf.viridian != 0)
> > + xc_set_hvm_param(xch, dom, HVM_PARAM_VIRIDIAN, 1);
> > +
> > if (pagebuf.acpi_ioport_location == 1) {
> > DBGPRINTF("Use new firmware ioport from the
> checkpoint\n");
> > xc_set_hvm_param(xch, dom,
> HVM_PARAM_ACPI_IOPORTS_LOCATION,
> > 1); diff -r 0a0c02a61676 -r 2a8ef49f60cf
> tools/libxc/xc_domain_save.c
> > --- a/tools/libxc/xc_domain_save.c Mon Nov 21 21:28:34 2011 +0000
> > +++ b/tools/libxc/xc_domain_save.c Fri Nov 25 15:30:40 2011 +0000
> > @@ -1506,6 +1506,18 @@ int xc_domain_save(xc_interface *xch, in
> > PERROR("Error when writing the firmware ioport
> version");
> > goto out;
> > }
> > +
> > + chunk.id = XC_SAVE_ID_HVM_VIRIDIAN;
> > + chunk.data = 0;
> > + xc_get_hvm_param(xch, dom, HVM_PARAM_VIRIDIAN,
> > + (unsigned long *)&chunk.data);
> > +
> > + if ( (chunk.data != 0) &&
> > + wrexact(io_fd, &chunk, sizeof(chunk)) )
> > + {
> > + PERROR("Error when writing the viridian flag");
> > + goto out;
> > + }
> > }
> >
> > if ( !callbacks->checkpoint )
> > diff -r 0a0c02a61676 -r 2a8ef49f60cf tools/libxc/xg_save_restore.h
> > --- a/tools/libxc/xg_save_restore.h Mon Nov 21 21:28:34 2011 +0000
> > +++ b/tools/libxc/xg_save_restore.h Fri Nov 25 15:30:40 2011 +0000
> > @@ -134,6 +134,7 @@
> > #define XC_SAVE_ID_HVM_CONSOLE_PFN -8 /* (HVM-only) */
> > #define XC_SAVE_ID_LAST_CHECKPOINT -9 /* Commit to restoring
> after
> > completion of current iteration. */
> > #define XC_SAVE_ID_HVM_ACPI_IOPORTS_LOCATION -10
> > +#define XC_SAVE_ID_HVM_VIRIDIAN -11
> >
> > /*
> > ** We process save/restore/migrate in batches of pages; the below
> >
> > _______________________________________________
> > Xen-devel mailing list
> > Xen-devel@lists.xensource.com
> > http://lists.xensource.com/xen-devel
>


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel