Mailing List Archive

[PATCH 06/11] nEPT: Try to enable EPT paging for L2 guest.
From: Zhang Xiantao <xiantao.zhang@intel.com>

Once found EPT is enabled by L1 VMM, enabled nested EPT support
for L2 guest.

Signed-off-by: Zhang Xiantao <xiantao.zhang@intel.com>
---
xen/arch/x86/hvm/vmx/vmx.c | 16 +++++++++--
xen/arch/x86/hvm/vmx/vvmx.c | 50 ++++++++++++++++++++++++++++--------
xen/include/asm-x86/hvm/vmx/vvmx.h | 5 +++-
3 files changed, 56 insertions(+), 15 deletions(-)

diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 06455bf..1bfb67f 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -1513,6 +1513,7 @@ static struct hvm_function_table __read_mostly vmx_function_table = {
.nhvm_vcpu_guestcr3 = nvmx_vcpu_guestcr3,
.nhvm_vcpu_p2m_base = nvmx_vcpu_eptp_base,
.nhvm_vcpu_asid = nvmx_vcpu_asid,
+ .nhvm_vmcx_hap_enabled = nvmx_ept_enabled,
.nhvm_vmcx_guest_intercepts_trap = nvmx_intercepts_exception,
.nhvm_vcpu_vmexit_trap = nvmx_vmexit_trap,
.nhvm_intr_blocked = nvmx_intr_blocked,
@@ -2055,6 +2056,7 @@ static void ept_handle_violation(unsigned long qualification, paddr_t gpa)
unsigned long gla, gfn = gpa >> PAGE_SHIFT;
mfn_t mfn;
p2m_type_t p2mt;
+ int ret;
struct domain *d = current->domain;

if ( tb_init_done )
@@ -2073,14 +2075,22 @@ static void ept_handle_violation(unsigned long qualification, paddr_t gpa)
__trace_var(TRC_HVM_NPF, 0, sizeof(_d), &_d);
}

- if ( hvm_hap_nested_page_fault(gpa,
+ ret = hvm_hap_nested_page_fault(gpa,
qualification & EPT_GLA_VALID ? 1 : 0,
qualification & EPT_GLA_VALID
? __vmread(GUEST_LINEAR_ADDRESS) : ~0ull,
qualification & EPT_READ_VIOLATION ? 1 : 0,
qualification & EPT_WRITE_VIOLATION ? 1 : 0,
- qualification & EPT_EXEC_VIOLATION ? 1 : 0) )
- return;
+ qualification & EPT_EXEC_VIOLATION ? 1 : 0);
+ switch (ret) {
+ case 0:
+ break;
+ case 1:
+ return;
+ case -1:
+ vcpu_nestedhvm(current).nv_vmexit_pending = 1;
+ return;
+ }

/* Everything else is an error. */
mfn = get_gfn_query_unlocked(d, gfn, &p2mt);
diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
index 76cf757..ab68b52 100644
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -41,6 +41,7 @@ int nvmx_vcpu_initialise(struct vcpu *v)
gdprintk(XENLOG_ERR, "nest: allocation for shadow vmcs failed\n");
goto out;
}
+ nvmx->ept.enabled = 0;
nvmx->vmxon_region_pa = 0;
nvcpu->nv_vvmcx = NULL;
nvcpu->nv_vvmcxaddr = VMCX_EADDR;
@@ -96,9 +97,11 @@ uint64_t nvmx_vcpu_guestcr3(struct vcpu *v)

uint64_t nvmx_vcpu_eptp_base(struct vcpu *v)
{
- /* TODO */
- ASSERT(0);
- return 0;
+ uint64_t eptp_base;
+ struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
+
+ eptp_base = __get_vvmcs(nvcpu->nv_vvmcx, EPT_POINTER);
+ return eptp_base & PAGE_MASK;
}

uint32_t nvmx_vcpu_asid(struct vcpu *v)
@@ -108,6 +111,13 @@ uint32_t nvmx_vcpu_asid(struct vcpu *v)
return 0;
}

+bool_t nvmx_ept_enabled(struct vcpu *v)
+{
+ struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
+
+ return !!(nvmx->ept.enabled);
+}
+
static const enum x86_segment sreg_to_index[] = {
[VMX_SREG_ES] = x86_seg_es,
[VMX_SREG_CS] = x86_seg_cs,
@@ -503,14 +513,16 @@ void nvmx_update_exec_control(struct vcpu *v, u32 host_cntrl)
}

void nvmx_update_secondary_exec_control(struct vcpu *v,
- unsigned long value)
+ unsigned long host_cntrl)
{
u32 shadow_cntrl;
struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
+ struct nestedvmx *nvmx = &vcpu_2_nvmx(v);

shadow_cntrl = __get_vvmcs(nvcpu->nv_vvmcx, SECONDARY_VM_EXEC_CONTROL);
- shadow_cntrl |= value;
- set_shadow_control(v, SECONDARY_VM_EXEC_CONTROL, shadow_cntrl);
+ nvmx->ept.enabled = !!(shadow_cntrl & SECONDARY_EXEC_ENABLE_EPT);
+ shadow_cntrl |= host_cntrl;
+ __vmwrite(SECONDARY_VM_EXEC_CONTROL, shadow_cntrl);
}

static void nvmx_update_pin_control(struct vcpu *v, unsigned long host_cntrl)
@@ -818,6 +830,19 @@ static void load_shadow_guest_state(struct vcpu *v)
/* TODO: CR3 target control */
}

+
+static uint64_t get_shadow_eptp(struct vcpu *v)
+{
+ uint64_t eptp_asr;
+ uint64_t np2m_base = nvmx_vcpu_eptp_base(v);
+ struct p2m_domain *p2m = p2m_get_nestedp2m(v, np2m_base);
+ struct ept_data *ept_data = p2m->hap_data;
+
+ eptp_asr = pagetable_get_pfn(p2m_get_pagetable(p2m));
+ ept_data->ept_ctl.asr = eptp_asr;
+ return ept_data->ept_ctl.eptp;
+}
+
static void virtual_vmentry(struct cpu_user_regs *regs)
{
struct vcpu *v = current;
@@ -862,7 +887,10 @@ static void virtual_vmentry(struct cpu_user_regs *regs)
/* updating host cr0 to sync TS bit */
__vmwrite(HOST_CR0, v->arch.hvm_vmx.host_cr0);

- /* TODO: EPT_POINTER */
+ /* Setup virtual ETP for L2 guest*/
+ if ( nestedhvm_paging_mode_hap(v) )
+ __vmwrite(EPT_POINTER, get_shadow_eptp(v));
+
}

static void sync_vvmcs_guest_state(struct vcpu *v, struct cpu_user_regs *regs)
@@ -915,8 +943,8 @@ static void sync_vvmcs_ro(struct vcpu *v)
/* Adjust exit_reason/exit_qualifciation for violation case */
if ( __get_vvmcs(vvmcs, VM_EXIT_REASON) ==
EXIT_REASON_EPT_VIOLATION ) {
- __set_vvmcs(vvmcs, EXIT_QUALIFICATION, nvmx->ept_exit.exit_qual);
- __set_vvmcs(vvmcs, VM_EXIT_REASON, nvmx->ept_exit.exit_reason);
+ __set_vvmcs(vvmcs, EXIT_QUALIFICATION, nvmx->ept.exit_qual);
+ __set_vvmcs(vvmcs, VM_EXIT_REASON, nvmx->ept.exit_reason);
}
}

@@ -1480,8 +1508,8 @@ nvmx_hap_walk_L1_p2m(struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa,
case EPT_TRANSLATE_VIOLATION:
case EPT_TRANSLATE_MISCONFIG:
rc = NESTEDHVM_PAGEFAULT_INJECT;
- nvmx->ept_exit.exit_reason = exit_reason;
- nvmx->ept_exit.exit_qual = exit_qual;
+ nvmx->ept.exit_reason = exit_reason;
+ nvmx->ept.exit_qual = exit_qual;
break;
case EPT_TRANSLATE_RETRY:
rc = NESTEDHVM_PAGEFAULT_RETRY;
diff --git a/xen/include/asm-x86/hvm/vmx/vvmx.h b/xen/include/asm-x86/hvm/vmx/vvmx.h
index 8eb377b..661cd8a 100644
--- a/xen/include/asm-x86/hvm/vmx/vvmx.h
+++ b/xen/include/asm-x86/hvm/vmx/vvmx.h
@@ -33,9 +33,10 @@ struct nestedvmx {
u32 error_code;
} intr;
struct {
+ char enabled;
uint32_t exit_reason;
uint32_t exit_qual;
- } ept_exit;
+ } ept;
};

#define vcpu_2_nvmx(v) (vcpu_nestedhvm(v).u.nvmx)
@@ -110,6 +111,8 @@ int nvmx_intercepts_exception(struct vcpu *v,
unsigned int trap, int error_code);
void nvmx_domain_relinquish_resources(struct domain *d);

+bool_t nvmx_ept_enabled(struct vcpu *v);
+
int nvmx_handle_vmxon(struct cpu_user_regs *regs);
int nvmx_handle_vmxoff(struct cpu_user_regs *regs);

--
1.7.1


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel
Re: [PATCH 06/11] nEPT: Try to enable EPT paging for L2 guest. [ In reply to ]
At 01:57 +0800 on 11 Dec (1355191038), xiantao.zhang@intel.com wrote:
> From: Zhang Xiantao <xiantao.zhang@intel.com>
>
> Once found EPT is enabled by L1 VMM, enabled nested EPT support
> for L2 guest.
>
> Signed-off-by: Zhang Xiantao <xiantao.zhang@intel.com>

Acked-by: Tim Deegan <tim@xen.org>
(though strictly speaking this isn't x86/mm code)

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel