Mailing List Archive

[xen-unstable] svm: Replace call to svm_load_cr2 with conditional call to svm_trace_vmentry
# HG changeset patch
# User kfraser@localhost.localdomain
# Date 1180011418 -3600
# Node ID fe0499f6235c964d385cdcb6b9127f5601186cea
# Parent 8fcefab1d63b040111786ef9653ecb5332ac1171
svm: Replace call to svm_load_cr2 with conditional call to svm_trace_vmentry

Remove the call to svm_load_cr2 (which doesn't do anything useful).
The old svm_load_cr2 is now replaced with a svm_trace_vmentry. A call
to this function is done if "tb_init_done" is non-zero, so we don't call
this unless it's actually being used.

Signed-off-by: Mats Petersson <mats.petersson@amd.com>
---
xen/arch/x86/hvm/svm/svm.c | 4 +---
xen/arch/x86/hvm/svm/x86_32/exits.S | 21 ++++++++++++++++++---
xen/arch/x86/hvm/svm/x86_64/exits.S | 21 ++++++++++++++++++---
3 files changed, 37 insertions(+), 9 deletions(-)

diff -r 8fcefab1d63b -r fe0499f6235c xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c Thu May 24 13:45:47 2007 +0100
+++ b/xen/arch/x86/hvm/svm/svm.c Thu May 24 13:56:58 2007 +0100
@@ -2520,14 +2520,12 @@ asmlinkage void svm_vmexit_handler(struc
}
}

-asmlinkage void svm_load_cr2(void)
+asmlinkage void svm_trace_vmentry(void)
{
struct vcpu *v = current;

/* This is the last C code before the VMRUN instruction. */
HVMTRACE_0D(VMENTRY, v);
-
- asm volatile ( "mov %0,%%cr2" : : "r" (v->arch.hvm_svm.cpu_cr2) );
}

/*
diff -r 8fcefab1d63b -r fe0499f6235c xen/arch/x86/hvm/svm/x86_32/exits.S
--- a/xen/arch/x86/hvm/svm/x86_32/exits.S Thu May 24 13:45:47 2007 +0100
+++ b/xen/arch/x86/hvm/svm/x86_32/exits.S Thu May 24 13:56:58 2007 +0100
@@ -38,15 +38,22 @@ ENTRY(svm_asm_do_resume)
ENTRY(svm_asm_do_resume)
GET_CURRENT(%ebx)
CLGI
- /* Run ASID stuff. */
- call svm_asid_handle_vmrun

movl VCPU_processor(%ebx),%eax
shl $IRQSTAT_shift,%eax
testl $~0,irq_stat(%eax,1)
jnz svm_process_softirqs
+
+ call svm_asid_handle_vmrun
call svm_intr_assist
- call svm_load_cr2
+
+ /* Check if the trace buffer is initialized.
+ * Because the below condition is unlikely, we jump out of line
+ * instead of having a mostly taken branch over the unlikely code.
+ */
+ cmpb $0,tb_init_done
+ jnz svm_trace
+svm_trace_done:

movl VCPU_svm_vmcb(%ebx),%ecx
movl UREGS_eax(%esp),%eax
@@ -89,3 +96,11 @@ svm_process_softirqs:
STGI
call do_softirq
jmp svm_asm_do_resume
+
+svm_trace:
+ /* Call out to C, as this is not speed critical path
+ * Note: svm_trace_vmentry will recheck the tb_init_done,
+ * but this is on the slow path, so who cares
+ */
+ call svm_trace_vmentry
+ jmp svm_trace_done
diff -r 8fcefab1d63b -r fe0499f6235c xen/arch/x86/hvm/svm/x86_64/exits.S
--- a/xen/arch/x86/hvm/svm/x86_64/exits.S Thu May 24 13:45:47 2007 +0100
+++ b/xen/arch/x86/hvm/svm/x86_64/exits.S Thu May 24 13:56:58 2007 +0100
@@ -38,16 +38,23 @@ ENTRY(svm_asm_do_resume)
ENTRY(svm_asm_do_resume)
GET_CURRENT(%rbx)
CLGI
- /* Run ASID stuff. */
- call svm_asid_handle_vmrun

movl VCPU_processor(%rbx),%eax
shl $IRQSTAT_shift,%rax
leaq irq_stat(%rip),%rdx
testl $~0,(%rdx,%rax,1)
jnz svm_process_softirqs
+
+ call svm_asid_handle_vmrun
call svm_intr_assist
- call svm_load_cr2
+
+ /* Check if the trace buffer is initialized.
+ * Because the below condition is unlikely, we jump out of line
+ * instead of having a mostly taken branch over the unlikely code.
+ */
+ cmpb $0,tb_init_done(%rip)
+ jnz svm_trace
+svm_trace_done:

movq VCPU_svm_vmcb(%rbx),%rcx
movq UREGS_rax(%rsp),%rax
@@ -106,3 +113,11 @@ svm_process_softirqs:
STGI
call do_softirq
jmp svm_asm_do_resume
+
+svm_trace:
+ /* Call out to C, as this is not speed critical path
+ * Note: svm_trace_vmentry will recheck the tb_init_done,
+ * but this is on the slow path, so who cares
+ */
+ call svm_trace_vmentry
+ jmp svm_trace_done

_______________________________________________
Xen-changelog mailing list
Xen-changelog@lists.xensource.com
http://lists.xensource.com/xen-changelog