Mailing List Archive

[PATCH] add canonical address checks to HVM
Add proper long mode canonical address checks to PIO emulation and MSR
writes, the former paralleling the limit checks added for 32-bit guests.
Also catches two more cases in the MSR handling code where only ECX
(rather than RCX) should be used.

Signed-off-by: Jan Beulich <jbeulich@novell.com>

Index: 2006-11-27/xen/arch/x86/hvm/svm/emulate.c
===================================================================
--- 2006-11-27.orig/xen/arch/x86/hvm/svm/emulate.c 2006-11-27 17:12:49.000000000 +0100
+++ 2006-11-27/xen/arch/x86/hvm/svm/emulate.c 2006-11-29 15:32:10.000000000 +0100
@@ -128,17 +128,6 @@ static inline unsigned long DECODE_GPR_V
return (unsigned long) -1; \
}

-#if 0
-/*
- * hv_is_canonical - checks if the given address is canonical
- */
-static inline u64 hv_is_canonical(u64 addr)
-{
- u64 bits = addr & (u64)0xffff800000000000;
- return (u64)((bits == (u64)0xffff800000000000) || (bits == (u64)0x0));
-}
-#endif
-
#define modrm operand [0]

#define sib operand [1]
Index: 2006-11-27/xen/arch/x86/hvm/svm/svm.c
===================================================================
--- 2006-11-27.orig/xen/arch/x86/hvm/svm/svm.c 2006-11-28 11:01:57.000000000 +0100
+++ 2006-11-27/xen/arch/x86/hvm/svm/svm.c 2006-11-29 15:55:03.000000000 +0100
@@ -269,13 +269,11 @@ static int svm_long_mode_enabled(struct
return test_bit(SVM_CPU_STATE_LMA_ENABLED, &v->arch.hvm_svm.cpu_state);
}

-#define IS_CANO_ADDRESS(add) 1
-
static inline int long_mode_do_msr_read(struct cpu_user_regs *regs)
{
u64 msr_content = 0;
- struct vcpu *vc = current;
- struct vmcb_struct *vmcb = vc->arch.hvm_svm.vmcb;
+ struct vcpu *v = current;
+ struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;

switch ((u32)regs->ecx)
{
@@ -284,17 +282,37 @@ static inline int long_mode_do_msr_read(
msr_content &= ~EFER_SVME;
break;

+#ifdef __x86_64__
case MSR_FS_BASE:
+ if ( !svm_long_mode_enabled(v) )
+ {
+ svm_inject_exception(v, TRAP_gp_fault, 1, 0);
+ return 0;
+ }
+
msr_content = vmcb->fs.base;
break;

case MSR_GS_BASE:
+ if ( !svm_long_mode_enabled(v) )
+ {
+ svm_inject_exception(v, TRAP_gp_fault, 1, 0);
+ return 0;
+ }
+
msr_content = vmcb->gs.base;
break;

case MSR_SHADOW_GS_BASE:
+ if ( !svm_long_mode_enabled(v) )
+ {
+ svm_inject_exception(v, TRAP_gp_fault, 1, 0);
+ return 0;
+ }
+
msr_content = vmcb->kerngsbase;
break;
+#endif

case MSR_STAR:
msr_content = vmcb->star;
@@ -326,13 +344,14 @@ static inline int long_mode_do_msr_read(
static inline int long_mode_do_msr_write(struct cpu_user_regs *regs)
{
u64 msr_content = (u32)regs->eax | ((u64)regs->edx << 32);
+ u32 ecx = regs->ecx;
struct vcpu *v = current;
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;

HVM_DBG_LOG(DBG_LEVEL_1, "msr %x msr_content %"PRIx64"\n",
- (u32)regs->ecx, msr_content);
+ ecx, msr_content);

- switch ( (u32)regs->ecx )
+ switch ( ecx )
{
case MSR_EFER:
#ifdef __x86_64__
@@ -371,37 +390,49 @@ static inline int long_mode_do_msr_write
vmcb->efer = msr_content | EFER_SVME;
break;

+#ifdef __x86_64__
case MSR_FS_BASE:
case MSR_GS_BASE:
+ case MSR_SHADOW_GS_BASE:
if ( !svm_long_mode_enabled(v) )
- goto exit_and_crash;
+ {
+ svm_inject_exception(v, TRAP_gp_fault, 1, 0);
+ return 0;
+ }

- if (!IS_CANO_ADDRESS(msr_content))
+ if ( !IS_CANO_ADDRESS(msr_content) )
{
- HVM_DBG_LOG(DBG_LEVEL_1, "Not cano address of msr write\n");
+ HVM_DBG_LOG(DBG_LEVEL_1, "Not cano address of base msr write\n");
svm_inject_exception(v, TRAP_gp_fault, 1, 0);
+ return 0;
}

- if (regs->ecx == MSR_FS_BASE)
+ if ( ecx == MSR_FS_BASE )
vmcb->fs.base = msr_content;
- else
+ else if ( ecx == MSR_GS_BASE )
vmcb->gs.base = msr_content;
+ else
+ vmcb->kerngsbase = msr_content;
break;
-
- case MSR_SHADOW_GS_BASE:
- vmcb->kerngsbase = msr_content;
- break;
+#endif

case MSR_STAR:
vmcb->star = msr_content;
break;

case MSR_LSTAR:
- vmcb->lstar = msr_content;
- break;
-
case MSR_CSTAR:
- vmcb->cstar = msr_content;
+ if (!IS_CANO_ADDRESS(msr_content))
+ {
+ HVM_DBG_LOG(DBG_LEVEL_1, "Not cano address of star msr write\n");
+ svm_inject_exception(v, TRAP_gp_fault, 1, 0);
+ return 0;
+ }
+
+ if ( ecx == MSR_LSTAR )
+ vmcb->lstar = msr_content;
+ else
+ vmcb->cstar = msr_content;
break;

case MSR_SYSCALL_MASK:
@@ -413,11 +444,6 @@ static inline int long_mode_do_msr_write
}

return 1;
-
- exit_and_crash:
- gdprintk(XENLOG_ERR, "Fatal error writing MSR %lx\n", (long)regs->ecx);
- domain_crash(v->domain);
- return 1; /* handled */
}


@@ -1355,8 +1381,34 @@ static inline int svm_get_io_address(

*addr += seg->base;
}
- else if (seg == &vmcb->fs || seg == &vmcb->gs)
- *addr += seg->base;
+#ifdef __x86_64__
+ else
+ {
+ if (seg == &vmcb->fs || seg == &vmcb->gs)
+ *addr += seg->base;
+
+ if (!IS_CANO_ADDRESS(*addr) || !IS_CANO_ADDRESS(*addr + size - 1))
+ {
+ svm_inject_exception(v, TRAP_gp_fault, 1, 0);
+ return 0;
+ }
+ if (*count > (1UL << 48) / size)
+ *count = (1UL << 48) / size;
+ if (!(regs->eflags & EF_DF))
+ {
+ if (*addr + *count * size - 1 < *addr ||
+ !IS_CANO_ADDRESS(*addr + *count * size - 1))
+ *count = (*addr & ~((1UL << 48) - 1)) / size;
+ }
+ else
+ {
+ if ((*count - 1) * size > *addr ||
+ !IS_CANO_ADDRESS(*addr + (*count - 1) * size))
+ *count = (*addr & ~((1UL << 48) - 1)) / size + 1;
+ }
+ ASSERT(*count);
+ }
+#endif

return 1;
}
Index: 2006-11-27/xen/arch/x86/hvm/vmx/vmx.c
===================================================================
--- 2006-11-27.orig/xen/arch/x86/hvm/vmx/vmx.c 2006-11-28 11:14:04.000000000 +0100
+++ 2006-11-27/xen/arch/x86/hvm/vmx/vmx.c 2006-11-29 15:44:30.000000000 +0100
@@ -100,8 +100,14 @@ static void vmx_save_host_msrs(void)
msr_content = guest_msr_state->msrs[VMX_INDEX_MSR_ ## address]; \
break

-#define CASE_WRITE_MSR(address) \
+#define CASE_WRITE_MSR(address, canon) \
case MSR_ ## address: \
+ if ( !(canon) ) \
+ { \
+ HVM_DBG_LOG(DBG_LEVEL_1, "Not cano address of star msr write\n"); \
+ vmx_inject_hw_exception(v, TRAP_gp_fault, 0); \
+ return 0; \
+ } \
guest_msr_state->msrs[VMX_INDEX_MSR_ ## address] = msr_content; \
if ( !test_bit(VMX_INDEX_MSR_ ## address, &guest_msr_state->flags) )\
set_bit(VMX_INDEX_MSR_ ## address, &guest_msr_state->flags); \
@@ -109,7 +115,6 @@ static void vmx_save_host_msrs(void)
set_bit(VMX_INDEX_MSR_ ## address, &host_msr_state->flags); \
break

-#define IS_CANO_ADDRESS(add) 1
static inline int long_mode_do_msr_read(struct cpu_user_regs *regs)
{
u64 msr_content = 0;
@@ -124,19 +129,31 @@ static inline int long_mode_do_msr_read(

case MSR_FS_BASE:
if ( !(vmx_long_mode_enabled(v)) )
- goto exit_and_crash;
+ {
+ vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
+ return 0;
+ }

msr_content = __vmread(GUEST_FS_BASE);
break;

case MSR_GS_BASE:
if ( !(vmx_long_mode_enabled(v)) )
- goto exit_and_crash;
+ {
+ vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
+ return 0;
+ }

msr_content = __vmread(GUEST_GS_BASE);
break;

case MSR_SHADOW_GS_BASE:
+ if ( !(vmx_long_mode_enabled(v)) )
+ {
+ vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
+ return 0;
+ }
+
msr_content = guest_msr_state->shadow_gs;
break;

@@ -155,24 +172,20 @@ static inline int long_mode_do_msr_read(
regs->edx = (u32)(msr_content >> 32);

return 1;
-
- exit_and_crash:
- gdprintk(XENLOG_ERR, "Fatal error reading MSR %lx\n", (long)regs->ecx);
- domain_crash(v->domain);
- return 1; /* handled */
}

static inline int long_mode_do_msr_write(struct cpu_user_regs *regs)
{
u64 msr_content = (u32)regs->eax | ((u64)regs->edx << 32);
+ u32 ecx = regs->ecx;
struct vcpu *v = current;
struct vmx_msr_state *guest_msr_state = &v->arch.hvm_vmx.msr_state;
struct vmx_msr_state *host_msr_state = &this_cpu(host_msr_state);

HVM_DBG_LOG(DBG_LEVEL_1, "msr 0x%x msr_content 0x%"PRIx64"\n",
- (u32)regs->ecx, msr_content);
+ ecx, msr_content);

- switch ( (u32)regs->ecx ) {
+ switch ( ecx ) {
case MSR_EFER:
/* offending reserved bit will cause #GP */
if ( msr_content & ~(EFER_LME | EFER_LMA | EFER_NX | EFER_SCE) )
@@ -209,46 +222,42 @@ static inline int long_mode_do_msr_write

case MSR_FS_BASE:
case MSR_GS_BASE:
+ case MSR_SHADOW_GS_BASE:
if ( !vmx_long_mode_enabled(v) )
- goto exit_and_crash;
+ {
+ vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
+ return 0;
+ }

if ( !IS_CANO_ADDRESS(msr_content) )
{
- HVM_DBG_LOG(DBG_LEVEL_1, "Not cano address of msr write\n");
+ HVM_DBG_LOG(DBG_LEVEL_1, "Not cano address of base msr write\n");
vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
return 0;
}

- if ( regs->ecx == MSR_FS_BASE )
+ if ( ecx == MSR_FS_BASE )
__vmwrite(GUEST_FS_BASE, msr_content);
- else
+ else if ( ecx == MSR_GS_BASE )
__vmwrite(GUEST_GS_BASE, msr_content);
+ else
+ {
+ v->arch.hvm_vmx.msr_state.shadow_gs = msr_content;
+ wrmsrl(MSR_SHADOW_GS_BASE, msr_content);
+ }

break;

- case MSR_SHADOW_GS_BASE:
- if ( !(vmx_long_mode_enabled(v)) )
- goto exit_and_crash;
-
- v->arch.hvm_vmx.msr_state.shadow_gs = msr_content;
- wrmsrl(MSR_SHADOW_GS_BASE, msr_content);
- break;
-
- CASE_WRITE_MSR(STAR);
- CASE_WRITE_MSR(LSTAR);
- CASE_WRITE_MSR(CSTAR);
- CASE_WRITE_MSR(SYSCALL_MASK);
+ CASE_WRITE_MSR(STAR, 1);
+ CASE_WRITE_MSR(LSTAR, IS_CANO_ADDRESS(msr_content));
+ CASE_WRITE_MSR(CSTAR, IS_CANO_ADDRESS(msr_content));
+ CASE_WRITE_MSR(SYSCALL_MASK, 1);

default:
return 0;
}

return 1;
-
- exit_and_crash:
- gdprintk(XENLOG_ERR, "Fatal error writing MSR %lx\n", (long)regs->ecx);
- domain_crash(v->domain);
- return 1; /* handled */
}

/*
@@ -1192,6 +1201,31 @@ static void vmx_io_instruction(unsigned
ASSERT(count);
}
}
+#ifdef __x86_64__
+ else
+ {
+ if ( !IS_CANO_ADDRESS(addr) || !IS_CANO_ADDRESS(addr + size - 1) )
+ {
+ vmx_inject_hw_exception(current, TRAP_gp_fault, 0);
+ return;
+ }
+ if ( count > (1UL << 48) / size )
+ count = (1UL << 48) / size;
+ if ( !(regs->eflags & EF_DF) )
+ {
+ if ( addr + count * size - 1 < addr ||
+ !IS_CANO_ADDRESS(addr + count * size - 1) )
+ count = (addr & ~((1UL << 48) - 1)) / size;
+ }
+ else
+ {
+ if ( (count - 1) * size > addr ||
+ !IS_CANO_ADDRESS(addr + (count - 1) * size) )
+ count = (addr & ~((1UL << 48) - 1)) / size + 1;
+ }
+ ASSERT(count);
+ }
+#endif

/*
* Handle string pio instructions that cross pages or that
Index: 2006-11-27/xen/include/asm-x86/hvm/io.h
===================================================================
--- 2006-11-27.orig/xen/include/asm-x86/hvm/io.h 2006-11-28 09:02:26.000000000 +0100
+++ 2006-11-27/xen/include/asm-x86/hvm/io.h 2006-11-29 15:47:30.000000000 +0100
@@ -25,6 +25,12 @@
#include <public/hvm/ioreq.h>
#include <public/event_channel.h>

+#ifdef __x86_64__
+#define IS_CANO_ADDRESS(add) (((long)(add) >> 47) == ((long)(add) >> 63))
+#else
+#define IS_CANO_ADDRESS(add) 1
+#endif
+
#define operand_size(operand) \
((operand >> 24) & 0xFF)



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel
Re: [PATCH] add canonical address checks to HVM [ In reply to ]
On 29/11/06 15:05, "Jan Beulich" <jbeulich@novell.com> wrote:

> Add proper long mode canonical address checks to PIO emulation and MSR
> writes, the former paralleling the limit checks added for 32-bit guests.
> Also catches two more cases in the MSR handling code where only ECX
> (rather than RCX) should be used.
>
> Signed-off-by: Jan Beulich <jbeulich@novell.com>

I wonder if we would be better consistently *removing* the canonical-address
checks? It's not a security issue after all -- the check is done in hardware
only to prevent code from ever depending on being able to use the high
address bits for software flags. I think it is harmless to deviate from
native behaviour on this issue and makes our emulation code smaller and
simpler.

-- Keir


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel
Re: [PATCH] add canonical address checks to HVM [ In reply to ]
>>> Keir Fraser <keir@xensource.com> 30.11.06 18:55 >>>
>On 29/11/06 15:05, "Jan Beulich" <jbeulich@novell.com> wrote:
>
>> Add proper long mode canonical address checks to PIO emulation and MSR
>> writes, the former paralleling the limit checks added for 32-bit guests.
>> Also catches two more cases in the MSR handling code where only ECX
>> (rather than RCX) should be used.
>>
>> Signed-off-by: Jan Beulich <jbeulich@novell.com>
>
>I wonder if we would be better consistently *removing* the canonical-address
>checks? It's not a security issue after all -- the check is done in hardware
>only to prevent code from ever depending on being able to use the high
>address bits for software flags. I think it is harmless to deviate from
>native behaviour on this issue and makes our emulation code smaller and
>simpler.

I think it might be a security issue:
- In MSR writes, are you certain there's not going to be any problem now or
in the future when the state gets actually loaded into CPU registers?
- In memory accesses, at least until no failures to read/write guest memory
are being ignored anymore.

Jan

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel
Re: [PATCH] add canonical address checks to HVM [ In reply to ]
On 1/12/06 8:05 am, "Jan Beulich" <jbeulich@novell.com> wrote:

> I think it might be a security issue:
> - In MSR writes, are you certain there's not going to be any problem now or
> in the future when the state gets actually loaded into CPU registers?
> - In memory accesses, at least until no failures to read/write guest memory
> are being ignored anymore.

We should be defensive about guest reads/writes/MSR-accesses anyway. I.e.,
we should at least accept faults on those accesses, and make sure the worst
that happens is a domain crash.

-- Keir


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel
Re: [PATCH] add canonical address checks to HVM [ In reply to ]
>>> Keir Fraser <keir@xensource.com> 01.12.06 09:07 >>>
>On 1/12/06 8:05 am, "Jan Beulich" <jbeulich@novell.com> wrote:
>
>> I think it might be a security issue:
>> - In MSR writes, are you certain there's not going to be any problem now or
>> in the future when the state gets actually loaded into CPU registers?
>> - In memory accesses, at least until no failures to read/write guest memory
>> are being ignored anymore.
>
>We should be defensive about guest reads/writes/MSR-accesses anyway. I.e.,
>we should at least accept faults on those accesses, and make sure the worst
>that happens is a domain crash.

That I take for granted. But it's far from optimal. I don't know about modern
Windows (has been too long since I was last looking at their handling of this),
but at least Linux takes precautions when doing potentially dangerous
accesses in so many places that it would seem unreasonable to crash a
domain when it could be passed a simple fault at the right point, and let it
decide for itself whether it wants to die.

Jan

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel
Re: [PATCH] add canonical address checks to HVM [ In reply to ]
On 1/12/06 08:20, "Jan Beulich" <jbeulich@novell.com> wrote:

>> We should be defensive about guest reads/writes/MSR-accesses anyway. I.e.,
>> we should at least accept faults on those accesses, and make sure the worst
>> that happens is a domain crash.
>
> That I take for granted. But it's far from optimal. I don't know about modern
> Windows (has been too long since I was last looking at their handling of
> this),
> but at least Linux takes precautions when doing potentially dangerous
> accesses in so many places that it would seem unreasonable to crash a
> domain when it could be passed a simple fault at the right point, and let it
> decide for itself whether it wants to die.

Linux would never be barking mad enough to poke a non-canonical address in
64-bit mode, I'm sure. :-) But yes, I will take a closer look at the
patch...

-- Keir


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel
Re: [PATCH] add canonical address checks to HVM [ In reply to ]
On 29/11/06 15:05, "Jan Beulich" <jbeulich@novell.com> wrote:

> +#ifdef __x86_64__
> +#define IS_CANO_ADDRESS(add) (((long)(add) >> 47) == ((long)(add) >> 63))
> +#else
> +#define IS_CANO_ADDRESS(add) 1
> +#endif
> +

Is there any guarantee that right-shift is signed when using gcc?

How about (int16_t)((add) >> 48) == -(int)(((add) >> 47) & 1) ??

-- Keir


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel
Re: [PATCH] add canonical address checks to HVM [ In reply to ]
>>> Keir Fraser <keir@xensource.com> 01.12.06 11:12 >>>
>On 29/11/06 15:05, "Jan Beulich" <jbeulich@novell.com> wrote:
>
>> +#ifdef __x86_64__
>> +#define IS_CANO_ADDRESS(add) (((long)(add) >> 47) == ((long)(add) >> 63))
>> +#else
>> +#define IS_CANO_ADDRESS(add) 1
>> +#endif
>> +
>
>Is there any guarantee that right-shift is signed when using gcc?

I suppose so, I believe this is assumed to be that way in various other places.
However, I'm not sure I have an idea where I could look up implementation
defined behavior for gcc.

>How about (int16_t)((add) >> 48) == -(int)(((add) >> 47) & 1) ??

Sure, should work too, but would incur more overhead. I was actually trying
to even avoid the two shifts, but I wasn't able to find something that would
use just one *and* would be faster than the version I submitted.

Jan

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel
Re: [PATCH] add canonical address checks to HVM [ In reply to ]
>> Is there any guarantee that right-shift is signed when using gcc?
>
> I suppose so, I believe this is assumed to be that way in various other
> places.
> However, I'm not sure I have an idea where I could look up implementation
> defined behavior for gcc.

Looking into it a bit I think a compiler has to be consistent (i.e., it is
implementation *defined*) and gcc makes reasonable effort to do signed
shifts on architectures that have ISA support for it. I may add a boot-time
BUG_ON() just as a sanity check. :-)

> Sure, should work too, but would incur more overhead. I was actually trying
> to even avoid the two shifts, but I wasn't able to find something that would
> use just one *and* would be faster than the version I submitted.

I don't think it's possible. Two shifts and a compare is pretty tight.

-- Keir

>
> Jan
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xensource.com
> http://lists.xensource.com/xen-devel


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel