Mailing List Archive

Information required on shadow paging in xen for intel vt.
Hi Everyone,



I have just started studying the xen source code for the project on
how it take advantages of intel vt support for shadow paging.

I have some findings on source for shadow paging as given below (at
the end of this mail).

Could you please send information on how shadow paging is handled in
xen3.0.3 in the context of intel vt and how to track the source code for
the same.

Sorry to write to you on such an early stage of my learning on xen
code.

But your help will be immense help to me on understanding of xen.



With Regards,

Aditya Shevalkar.

Email : aditya.shevalkar@nechclst.in



xen-3.0.3_0-src\xen\arch\x86\hvm\vmx \vmx.c



static int vmx_do_page_fault(unsigned long va, struct cpu_user_regs
*regs)

{

int result;



#if 0 /* keep for debugging */

{

unsigned long eip, cs;



__vmread(GUEST_CS_BASE, &cs);

__vmread(GUEST_RIP, &eip);

HVM_DBG_LOG(DBG_LEVEL_VMMU,

"vmx_do_page_fault = 0x%lx, cs_base=%lx, "

"eip = %lx, error_code = %lx\n",

va, cs, eip, (unsigned long)regs->error_code);

}

#endif



result = shadow_fault(va, regs);



TRACE_VMEXIT (2,result);

#if 0

if ( !result )

{

__vmread(GUEST_RIP, &eip);

printk("vmx pgfault to guest va=%lx eip=%lx\n", va, eip);

}

#endif



return result;

}



xen-3.0.3_0-src\xen\arch\x86\hvm\vmx \vmx.c



case TRAP_page_fault:

{

__vmread(EXIT_QUALIFICATION, &va);

__vmread(VM_EXIT_INTR_ERROR_CODE, &regs->error_code);



TRACE_VMEXIT(3, regs->error_code);

TRACE_VMEXIT(4, va);



HVM_DBG_LOG(DBG_LEVEL_VMMU,

"eax=%lx, ebx=%lx, ecx=%lx, edx=%lx, esi=%lx,
edi=%lx",

(unsigned long)regs->eax, (unsigned
long)regs->ebx,

(unsigned long)regs->ecx, (unsigned
long)regs->edx,

(unsigned long)regs->esi, (unsigned
long)regs->edi);



if ( !vmx_do_page_fault(va, regs) )

{

/* Inject #PG using Interruption-Information Fields. */

vmx_inject_hw_exception(v, TRAP_page_fault,
regs->error_code);

v->arch.hvm_vmx.cpu_cr2 = va;

TRACE_3D(TRC_VMX_INT, v->domain->domain_id,

TRAP_page_fault, va);

}

break;

}



xen-3.0.3_0-src\xen\include\asm-x86\hvm\hvm.h



static inline int

hvm_paging_enabled(struct vcpu *v)

{

return hvm_funcs.paging_enabled(v);

}





xen-3.0.3_0-src\xen\include\asm-x86\hvm\vmx\vmx.h



static inline int vmx_paging_enabled(struct vcpu *v)

{

unsigned long cr0;

__vmread_vcpu(v, CR0_READ_SHADOW, &cr0);

return ((cr0 & (X86_CR0_PE|X86_CR0_PG)) == (X86_CR0_PE|X86_CR0_PG));

}



xen-3.0.3_0-src\xen\arch\x86\mm\shadow\multi.c



/***********************************************************************
***/

/* Handling HVM guest writes to pagetables */



/* Check that the user is allowed to perform this write.

* Returns a mapped pointer to write to, and the mfn it's on,

* or NULL for error. */

static inline void * emulate_map_dest(struct vcpu *v,

unsigned long vaddr,

struct x86_emulate_ctxt *ctxt,

mfn_t *mfnp)

{

walk_t gw;

u32 flags;

gfn_t gfn;

mfn_t mfn;



guest_walk_tables(v, vaddr, &gw, 1);

flags = accumulate_guest_flags(v, &gw);

gfn = guest_l1e_get_gfn(gw.eff_l1e);

mfn = vcpu_gfn_to_mfn(v, gfn);

sh_audit_gw(v, &gw);

unmap_walk(v, &gw);



if ( !(flags & _PAGE_PRESENT)

|| !(flags & _PAGE_RW)

|| (!(flags & _PAGE_USER) && ring_3(ctxt->regs)) )

{

/* This write would have faulted even on bare metal */

v->arch.shadow.propagate_fault = 1;

return NULL;

}



if ( !valid_mfn(mfn) )

{

/* Attempted a write to a bad gfn. This should never happen:

* after all, we're here because this write is to a page table.
*/

BUG();

}



ASSERT(sh_mfn_is_a_page_table(mfn));

*mfnp = mfn;

return sh_map_domain_page(mfn) + (vaddr & ~PAGE_MASK);

}