Mailing List Archive

[xen-unstable] hvm: Cleanups to state-restore paths (including vmxassist world restore).
# HG changeset patch
# User kfraser@localhost.localdomain
# Date 1187025502 -3600
# Node ID 08e962b8597cdc7c1dbef7c8228f4ceef1d5737e
# Parent d8b5b02c52cf997eaa9be4717dd2da1a8a1cb4d2
hvm: Cleanups to state-restore paths (including vmxassist world restore).
Signed-off-by: Keir Fraser <keir@xensource.com>
---
xen/arch/x86/hvm/hvm.c | 2
xen/arch/x86/hvm/svm/svm.c | 14 ++----
xen/arch/x86/hvm/vmx/vmx.c | 105 ++++++++++++++-------------------------------
3 files changed, 41 insertions(+), 80 deletions(-)

diff -r d8b5b02c52cf -r 08e962b8597c xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c Mon Aug 13 16:47:11 2007 +0100
+++ b/xen/arch/x86/hvm/hvm.c Mon Aug 13 18:18:22 2007 +0100
@@ -587,7 +587,7 @@ int hvm_set_cr0(unsigned long value)
hvm_update_guest_efer(v);
}

- if ( !paging_mode_hap(v->domain) && v->arch.hvm_vcpu.guest_cr[3] )
+ if ( !paging_mode_hap(v->domain) )
{
put_page(mfn_to_page(get_mfn_from_gpfn(
v->arch.hvm_vcpu.guest_cr[3] >> PAGE_SHIFT)));
diff -r d8b5b02c52cf -r 08e962b8597c xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c Mon Aug 13 16:47:11 2007 +0100
+++ b/xen/arch/x86/hvm/svm/svm.c Mon Aug 13 18:18:22 2007 +0100
@@ -345,14 +345,13 @@ int svm_vmcb_restore(struct vcpu *v, str
vmcb->rflags = c->rflags;

v->arch.hvm_vcpu.guest_cr[0] = c->cr0 | X86_CR0_ET;
+ v->arch.hvm_vcpu.guest_cr[2] = c->cr2;
+ v->arch.hvm_vcpu.guest_cr[3] = c->cr3;
+ v->arch.hvm_vcpu.guest_cr[4] = c->cr4;
svm_update_guest_cr(v, 0);
-
- v->arch.hvm_vcpu.guest_cr[2] = c->cr2;
svm_update_guest_cr(v, 2);
-
- v->arch.hvm_vcpu.guest_cr[4] = c->cr4;
svm_update_guest_cr(v, 4);
-
+
#ifdef HVM_DEBUG_SUSPEND
printk("%s: cr3=0x%"PRIx64", cr0=0x%"PRIx64", cr4=0x%"PRIx64".\n",
__func__, c->cr3, c->cr0, c->cr4);
@@ -360,15 +359,14 @@ int svm_vmcb_restore(struct vcpu *v, str

if ( hvm_paging_enabled(v) && !paging_mode_hap(v->domain) )
{
- HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 c->cr3 = %"PRIx64, c->cr3);
+ HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 = %"PRIx64, c->cr3);
mfn = gmfn_to_mfn(v->domain, c->cr3 >> PAGE_SHIFT);
if( !mfn_valid(mfn) || !get_page(mfn_to_page(mfn), v->domain) )
goto bad_cr3;
old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
v->arch.guest_table = pagetable_from_pfn(mfn);
- if (old_base_mfn)
+ if ( old_base_mfn )
put_page(mfn_to_page(old_base_mfn));
- v->arch.hvm_vcpu.guest_cr[3] = c->cr3;
}

vmcb->idtr.limit = c->idtr_limit;
diff -r d8b5b02c52cf -r 08e962b8597c xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c Mon Aug 13 16:47:11 2007 +0100
+++ b/xen/arch/x86/hvm/vmx/vmx.c Mon Aug 13 18:18:22 2007 +0100
@@ -61,6 +61,7 @@ static int vmx_alloc_vlapic_mapping(str
static int vmx_alloc_vlapic_mapping(struct domain *d);
static void vmx_free_vlapic_mapping(struct domain *d);
static void vmx_install_vlapic_mapping(struct vcpu *v);
+static void vmx_update_guest_cr(struct vcpu *v, unsigned int cr);
static void vmx_update_guest_efer(struct vcpu *v);

static int vmx_domain_initialise(struct domain *d)
@@ -572,50 +573,33 @@ int vmx_vmcs_restore(struct vcpu *v, str
__vmwrite(GUEST_RSP, c->rsp);
__vmwrite(GUEST_RFLAGS, c->rflags);

- v->arch.hvm_vcpu.hw_cr[0] = (c->cr0 | X86_CR0_PE | X86_CR0_PG |
- X86_CR0_NE | X86_CR0_WP | X86_CR0_ET);
- __vmwrite(GUEST_CR0, v->arch.hvm_vcpu.hw_cr[0]);
- v->arch.hvm_vcpu.guest_cr[0] = c->cr0;
- __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vcpu.guest_cr[0]);
-
+ v->arch.hvm_vcpu.guest_cr[0] = c->cr0 | X86_CR0_ET;
v->arch.hvm_vcpu.guest_cr[2] = c->cr2;
-
- v->arch.hvm_vcpu.guest_efer = c->msr_efer;
+ v->arch.hvm_vcpu.guest_cr[3] = c->cr3;
+ v->arch.hvm_vcpu.guest_cr[4] = c->cr4;
+ vmx_update_guest_cr(v, 0);
+ vmx_update_guest_cr(v, 2);
+ vmx_update_guest_cr(v, 4);

#ifdef HVM_DEBUG_SUSPEND
printk("%s: cr3=0x%"PRIx64", cr0=0x%"PRIx64", cr4=0x%"PRIx64".\n",
__func__, c->cr3, c->cr0, c->cr4);
#endif

- if ( !hvm_paging_enabled(v) )
- {
- HVM_DBG_LOG(DBG_LEVEL_VMMU, "%s: paging not enabled.", __func__);
- goto skip_cr3;
- }
-
- HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 = %"PRIx64, c->cr3);
- /* current!=vcpu as not called by arch_vmx_do_launch */
- mfn = gmfn_to_mfn(v->domain, c->cr3 >> PAGE_SHIFT);
- if ( !mfn_valid(mfn) || !get_page(mfn_to_page(mfn), v->domain) )
- {
- gdprintk(XENLOG_ERR, "Invalid CR3 value=0x%"PRIx64".\n", c->cr3);
- vmx_vmcs_exit(v);
- return -EINVAL;
- }
-
- old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
- v->arch.guest_table = pagetable_from_pfn(mfn);
- if ( old_base_mfn )
- put_page(mfn_to_page(old_base_mfn));
-
- skip_cr3:
- v->arch.hvm_vcpu.guest_cr[3] = c->cr3;
-
+ if ( hvm_paging_enabled(v) )
+ {
+ HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 = %"PRIx64, c->cr3);
+ mfn = gmfn_to_mfn(v->domain, c->cr3 >> PAGE_SHIFT);
+ if ( !mfn_valid(mfn) || !get_page(mfn_to_page(mfn), v->domain) )
+ goto bad_cr3;
+ old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
+ v->arch.guest_table = pagetable_from_pfn(mfn);
+ if ( old_base_mfn )
+ put_page(mfn_to_page(old_base_mfn));
+ }
+
+ v->arch.hvm_vcpu.guest_efer = c->msr_efer;
vmx_update_guest_efer(v);
-
- __vmwrite(GUEST_CR4, (c->cr4 | HVM_CR4_HOST_MASK));
- v->arch.hvm_vcpu.guest_cr[4] = c->cr4;
- __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vcpu.guest_cr[4]);

__vmwrite(GUEST_IDTR_LIMIT, c->idtr_limit);
__vmwrite(GUEST_IDTR_BASE, c->idtr_base);
@@ -696,6 +680,11 @@ int vmx_vmcs_restore(struct vcpu *v, str
}

return 0;
+
+ bad_cr3:
+ gdprintk(XENLOG_ERR, "Invalid CR3 value=0x%"PRIx64"\n", c->cr3);
+ vmx_vmcs_exit(v);
+ return -EINVAL;
}

#if defined(__x86_64__) && defined(HVM_DEBUG_SUSPEND)
@@ -1923,29 +1912,14 @@ static int vmx_world_restore(struct vcpu
__vmwrite(GUEST_RFLAGS, c->eflags);

v->arch.hvm_vcpu.guest_cr[0] = c->cr0;
- __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vcpu.guest_cr[0]);
-
- if ( !hvm_paging_enabled(v) )
- goto skip_cr3;
-
- if ( c->cr3 == v->arch.hvm_vcpu.guest_cr[3] )
- {
- /*
- * This is simple TLB flush, implying the guest has
- * removed some translation or changed page attributes.
- * We simply invalidate the shadow.
- */
- mfn = get_mfn_from_gpfn(c->cr3 >> PAGE_SHIFT);
- if ( mfn != pagetable_get_pfn(v->arch.guest_table) )
- goto bad_cr3;
- }
- else
- {
- /*
- * If different, make a shadow. Check if the PDBR is valid
- * first.
- */
- HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 c->cr3 = %x", c->cr3);
+ v->arch.hvm_vcpu.guest_cr[3] = c->cr3;
+ v->arch.hvm_vcpu.guest_cr[4] = c->cr4;
+ vmx_update_guest_cr(v, 0);
+ vmx_update_guest_cr(v, 4);
+
+ if ( hvm_paging_enabled(v) )
+ {
+ HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 = %x", c->cr3);
mfn = get_mfn_from_gpfn(c->cr3 >> PAGE_SHIFT);
if ( !mfn_valid(mfn) || !get_page(mfn_to_page(mfn), v->domain) )
goto bad_cr3;
@@ -1953,18 +1927,7 @@ static int vmx_world_restore(struct vcpu
v->arch.guest_table = pagetable_from_pfn(mfn);
if ( old_base_mfn )
put_page(mfn_to_page(old_base_mfn));
- v->arch.hvm_vcpu.guest_cr[3] = c->cr3;
- }
-
- skip_cr3:
- if ( !hvm_paging_enabled(v) )
- HVM_DBG_LOG(DBG_LEVEL_VMMU, "switching to vmxassist. use phys table");
- else
- HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %x", c->cr3);
-
- __vmwrite(GUEST_CR4, (c->cr4 | HVM_CR4_HOST_MASK));
- v->arch.hvm_vcpu.guest_cr[4] = c->cr4;
- __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vcpu.guest_cr[4]);
+ }

__vmwrite(GUEST_IDTR_LIMIT, c->idtr_limit);
__vmwrite(GUEST_IDTR_BASE, c->idtr_base);

_______________________________________________
Xen-changelog mailing list
Xen-changelog@lists.xensource.com
http://lists.xensource.com/xen-changelog